001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.util.hbck;
019
020import static org.junit.Assert.assertEquals;
021
022import java.io.IOException;
023import java.util.ArrayList;
024import java.util.List;
025import org.apache.hadoop.conf.Configuration;
026import org.apache.hadoop.fs.FSDataOutputStream;
027import org.apache.hadoop.fs.FileSystem;
028import org.apache.hadoop.fs.Path;
029import org.apache.hadoop.hbase.CatalogFamilyFormat;
030import org.apache.hadoop.hbase.HBaseTestingUtility;
031import org.apache.hadoop.hbase.HRegionLocation;
032import org.apache.hadoop.hbase.MetaTableAccessor;
033import org.apache.hadoop.hbase.NamespaceDescriptor;
034import org.apache.hadoop.hbase.ServerName;
035import org.apache.hadoop.hbase.TableName;
036import org.apache.hadoop.hbase.client.Admin;
037import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
038import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
039import org.apache.hadoop.hbase.client.Connection;
040import org.apache.hadoop.hbase.client.ConnectionFactory;
041import org.apache.hadoop.hbase.client.Delete;
042import org.apache.hadoop.hbase.client.Put;
043import org.apache.hadoop.hbase.client.RegionInfo;
044import org.apache.hadoop.hbase.client.RegionInfoBuilder;
045import org.apache.hadoop.hbase.client.RegionLocator;
046import org.apache.hadoop.hbase.client.Result;
047import org.apache.hadoop.hbase.client.ResultScanner;
048import org.apache.hadoop.hbase.client.Scan;
049import org.apache.hadoop.hbase.client.Table;
050import org.apache.hadoop.hbase.client.TableDescriptor;
051import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
052import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
053import org.apache.hadoop.hbase.testclassification.LargeTests;
054import org.apache.hadoop.hbase.testclassification.MiscTests;
055import org.apache.hadoop.hbase.util.Bytes;
056import org.apache.hadoop.hbase.util.CommonFSUtils;
057import org.apache.zookeeper.KeeperException;
058import org.junit.After;
059import org.junit.Before;
060import org.junit.experimental.categories.Category;
061import org.slf4j.Logger;
062import org.slf4j.LoggerFactory;
063
064/**
065 * This testing base class creates a minicluster and testing table table
066 * and shuts down the cluster afterwards. It also provides methods wipes out
067 * meta and to inject errors into meta and the file system.
068 *
069 * Tests should generally break stuff, then attempt to rebuild the meta table
070 * offline, then restart hbase, and finally perform checks.
071 *
072 * NOTE: This is a slow set of tests which takes ~30s each needs to run on a
073 * relatively beefy machine. It seems necessary to have each test in a new jvm
074 * since minicluster startup and tear downs seem to leak file handles and
075 * eventually cause out of file handle exceptions.
076 */
077@Category({MiscTests.class, LargeTests.class})
078public class OfflineMetaRebuildTestCore {
079  private final static Logger LOG = LoggerFactory
080      .getLogger(OfflineMetaRebuildTestCore.class);
081  protected HBaseTestingUtility TEST_UTIL;
082  protected Configuration conf;
083  private final static byte[] FAM = Bytes.toBytes("fam");
084
085  // for the instance, reset every test run
086  protected Table htbl;
087  protected final static byte[][] splits = new byte[][] { Bytes.toBytes("A"),
088      Bytes.toBytes("B"), Bytes.toBytes("C") };
089
090  private final static String TABLE_BASE = "tableMetaRebuild";
091  private static int tableIdx = 0;
092  protected TableName table = TableName.valueOf("tableMetaRebuild");
093  protected Connection connection;
094
095  @Before
096  public void setUpBefore() throws Exception {
097    TEST_UTIL = new HBaseTestingUtility();
098    TEST_UTIL.getConfiguration().setInt("dfs.datanode.max.xceivers", 9192);
099    TEST_UTIL.startMiniCluster(3);
100    conf = TEST_UTIL.getConfiguration();
101    this.connection = ConnectionFactory.createConnection(conf);
102    assertEquals(0, TEST_UTIL.getAdmin().listTableDescriptors().size());
103
104    // setup the table
105    table = TableName.valueOf(TABLE_BASE + "-" + tableIdx);
106    tableIdx++;
107    htbl = setupTable(table);
108    populateTable(htbl);
109    assertEquals(5, scanMeta());
110    LOG.info("Table " + table + " has " + tableRowCount(conf, table)
111        + " entries.");
112    assertEquals(16, tableRowCount(conf, table));
113    TEST_UTIL.getAdmin().disableTable(table);
114    assertEquals(1, TEST_UTIL.getAdmin().listTableDescriptors().size());
115  }
116
117  @After
118  public void tearDownAfter() throws Exception {
119    if (this.htbl != null) {
120      this.htbl.close();
121      this.htbl = null;
122    }
123    this.connection.close();
124    TEST_UTIL.shutdownMiniCluster();
125  }
126
127  /**
128   * Setup a clean table before we start mucking with it.
129   *
130   * @throws IOException
131   * @throws InterruptedException
132   * @throws KeeperException
133   */
134  private Table setupTable(TableName tablename) throws Exception {
135    TableDescriptorBuilder tableDescriptorBuilder =
136      TableDescriptorBuilder.newBuilder(tablename);
137    ColumnFamilyDescriptor columnFamilyDescriptor =
138      ColumnFamilyDescriptorBuilder.newBuilder(FAM).build();
139    // If a table has no CF's it doesn't get checked
140    tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor);
141    TEST_UTIL.getAdmin().createTable(tableDescriptorBuilder.build(), splits);
142    return this.connection.getTable(tablename);
143  }
144
145  private void dumpMeta(TableDescriptor htd) throws IOException {
146    List<byte[]> metaRows = TEST_UTIL.getMetaTableRows(htd.getTableName());
147    for (byte[] row : metaRows) {
148      LOG.info(Bytes.toString(row));
149    }
150  }
151
152  private void populateTable(Table tbl) throws IOException {
153    byte[] values = { 'A', 'B', 'C', 'D' };
154    List<Put> puts = new ArrayList<>();
155    for (int i = 0; i < values.length; i++) {
156      for (int j = 0; j < values.length; j++) {
157        Put put = new Put(new byte[] { values[i], values[j] });
158        put.addColumn(Bytes.toBytes("fam"), new byte[]{}, new byte[]{values[i],
159                values[j]});
160        puts.add(put);
161      }
162    }
163    tbl.put(puts);
164  }
165
166  protected void deleteRegion(Configuration conf, final Table tbl,
167      byte[] startKey, byte[] endKey) throws IOException {
168
169    LOG.info("Before delete:");
170    TableDescriptor htd = tbl.getDescriptor();
171    dumpMeta(htd);
172
173    List<HRegionLocation> regions;
174    try(RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
175      regions = rl.getAllRegionLocations();
176    }
177
178    for (HRegionLocation e : regions) {
179      RegionInfo hri = e.getRegion();
180      ServerName hsa = e.getServerName();
181      if (Bytes.compareTo(hri.getStartKey(), startKey) == 0
182          && Bytes.compareTo(hri.getEndKey(), endKey) == 0) {
183
184        LOG.info("RegionName: " + hri.getRegionNameAsString());
185        byte[] deleteRow = hri.getRegionName();
186        TEST_UTIL.getAdmin().unassign(deleteRow, true);
187
188        LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
189        Path rootDir = CommonFSUtils.getRootDir(conf);
190        FileSystem fs = rootDir.getFileSystem(conf);
191        Path p = new Path(CommonFSUtils.getTableDir(rootDir, htd.getTableName()),
192            hri.getEncodedName());
193        fs.delete(p, true);
194
195        try (Table meta = this.connection.getTable(TableName.META_TABLE_NAME)) {
196          Delete delete = new Delete(deleteRow);
197          meta.delete(delete);
198        }
199      }
200      LOG.info(hri.toString() + hsa.toString());
201    }
202
203    TEST_UTIL.getMetaTableRows(htd.getTableName());
204    LOG.info("After delete:");
205    dumpMeta(htd);
206  }
207
208  protected RegionInfo createRegion(Configuration conf, final Table htbl,
209      byte[] startKey, byte[] endKey) throws IOException {
210    Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
211    RegionInfo hri = RegionInfoBuilder.newBuilder(htbl.getName())
212        .setStartKey(startKey)
213        .setEndKey(endKey)
214        .build();
215
216    LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
217    Path rootDir = CommonFSUtils.getRootDir(conf);
218    FileSystem fs = rootDir.getFileSystem(conf);
219    Path p = new Path(CommonFSUtils.getTableDir(rootDir, htbl.getName()),
220        hri.getEncodedName());
221    fs.mkdirs(p);
222    Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
223    FSDataOutputStream out = fs.create(riPath);
224    out.write(RegionInfo.toDelimitedByteArray(hri));
225    out.close();
226
227    // add to meta.
228    MetaTableAccessor.addRegionToMeta(TEST_UTIL.getConnection(), hri);
229    meta.close();
230    return hri;
231  }
232
233  protected void wipeOutMeta() throws IOException {
234    // Mess it up by blowing up meta.
235    Admin admin = TEST_UTIL.getAdmin();
236    Scan s = new Scan();
237    Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
238    ResultScanner scanner = meta.getScanner(s);
239    List<Delete> dels = new ArrayList<>();
240    for (Result r : scanner) {
241      RegionInfo info =
242          CatalogFamilyFormat.getRegionInfo(r);
243      if(info != null && !info.getTable().getNamespaceAsString()
244          .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) {
245        Delete d = new Delete(r.getRow());
246        dels.add(d);
247        admin.unassign(r.getRow(), true);
248      }
249    }
250    meta.delete(dels);
251    scanner.close();
252    meta.close();
253  }
254
255  /**
256   * Returns the number of rows in a given table. HBase must be up and the table
257   * should be present (will wait for timeout for a while otherwise)
258   *
259   * @return # of rows in the specified table
260   */
261  protected int tableRowCount(Configuration conf, TableName table)
262      throws IOException {
263    Table t = TEST_UTIL.getConnection().getTable(table);
264    Scan st = new Scan();
265
266    ResultScanner rst = t.getScanner(st);
267    int count = 0;
268    for (@SuppressWarnings("unused")
269    Result rt : rst) {
270      count++;
271    }
272    t.close();
273    return count;
274  }
275
276  /**
277   * Dumps hbase:meta table info
278   *
279   * @return # of entries in meta.
280   */
281  protected int scanMeta() throws IOException {
282    LOG.info("Scanning META");
283    MetaTableAccessor.fullScanMetaAndPrint(TEST_UTIL.getConnection());
284    return MetaTableAccessor.fullScanRegions(TEST_UTIL.getConnection()).size();
285  }
286
287  protected List<TableDescriptor> getTables(final Configuration configuration) throws IOException {
288    try (Connection connection = ConnectionFactory.createConnection(configuration)) {
289      try (Admin admin = connection.getAdmin()) {
290        return admin.listTableDescriptors();
291      }
292    }
293  }
294}