001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.replication.regionserver;
019
020import static org.junit.Assert.assertEquals;
021
022import java.io.IOException;
023import java.util.ArrayList;
024import java.util.Collections;
025import java.util.HashMap;
026import java.util.HashSet;
027import java.util.Iterator;
028import java.util.List;
029import java.util.Map;
030import java.util.Random;
031import java.util.Set;
032import java.util.concurrent.ThreadLocalRandom;
033import java.util.concurrent.atomic.AtomicBoolean;
034import org.apache.hadoop.conf.Configuration;
035import org.apache.hadoop.fs.FileSystem;
036import org.apache.hadoop.fs.FileUtil;
037import org.apache.hadoop.fs.Path;
038import org.apache.hadoop.hbase.Cell;
039import org.apache.hadoop.hbase.CellUtil;
040import org.apache.hadoop.hbase.HBaseClassTestRule;
041import org.apache.hadoop.hbase.HBaseTestingUtil;
042import org.apache.hadoop.hbase.HConstants;
043import org.apache.hadoop.hbase.KeyValue;
044import org.apache.hadoop.hbase.Stoppable;
045import org.apache.hadoop.hbase.TableName;
046import org.apache.hadoop.hbase.TableNotFoundException;
047import org.apache.hadoop.hbase.client.Admin;
048import org.apache.hadoop.hbase.client.Connection;
049import org.apache.hadoop.hbase.client.ConnectionFactory;
050import org.apache.hadoop.hbase.client.Get;
051import org.apache.hadoop.hbase.client.RegionInfo;
052import org.apache.hadoop.hbase.client.RegionLocator;
053import org.apache.hadoop.hbase.client.Result;
054import org.apache.hadoop.hbase.client.ResultScanner;
055import org.apache.hadoop.hbase.client.RetriesExhaustedException;
056import org.apache.hadoop.hbase.client.Scan;
057import org.apache.hadoop.hbase.client.Table;
058import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost;
059import org.apache.hadoop.hbase.testclassification.LargeTests;
060import org.apache.hadoop.hbase.testclassification.ReplicationTests;
061import org.apache.hadoop.hbase.util.Bytes;
062import org.apache.hadoop.hbase.util.CommonFSUtils;
063import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
064import org.apache.hadoop.hbase.util.HFileTestUtil;
065import org.junit.AfterClass;
066import org.junit.Assert;
067import org.junit.Before;
068import org.junit.BeforeClass;
069import org.junit.ClassRule;
070import org.junit.Test;
071import org.junit.experimental.categories.Category;
072import org.slf4j.Logger;
073import org.slf4j.LoggerFactory;
074
075import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
076
077import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
078import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry;
079import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.UUID;
080import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
081import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALKey;
082
083@Category({ ReplicationTests.class, LargeTests.class })
084public class TestReplicationSink {
085
086  @ClassRule
087  public static final HBaseClassTestRule CLASS_RULE =
088    HBaseClassTestRule.forClass(TestReplicationSink.class);
089
090  private static final Logger LOG = LoggerFactory.getLogger(TestReplicationSink.class);
091  private static final int BATCH_SIZE = 10;
092
093  protected final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
094
095  protected static ReplicationSink SINK;
096
097  protected static final TableName TABLE_NAME1 = TableName.valueOf("table1");
098  protected static final TableName TABLE_NAME2 = TableName.valueOf("table2");
099
100  protected static final byte[] FAM_NAME1 = Bytes.toBytes("info1");
101  protected static final byte[] FAM_NAME2 = Bytes.toBytes("info2");
102
103  protected static Table table1;
104  protected static Stoppable STOPPABLE = new Stoppable() {
105    final AtomicBoolean stop = new AtomicBoolean(false);
106
107    @Override
108    public boolean isStopped() {
109      return this.stop.get();
110    }
111
112    @Override
113    public void stop(String why) {
114      LOG.info("STOPPING BECAUSE: " + why);
115      this.stop.set(true);
116    }
117
118  };
119
120  protected static Table table2;
121  protected static String baseNamespaceDir;
122  protected static String hfileArchiveDir;
123  protected static String replicationClusterId;
124
125  /**
126   * @throws java.lang.Exception
127   */
128  @BeforeClass
129  public static void setUpBeforeClass() throws Exception {
130    TEST_UTIL.getConfiguration().set("hbase.replication.source.fs.conf.provider",
131      TestSourceFSConfigurationProvider.class.getCanonicalName());
132    TEST_UTIL.startMiniCluster(3);
133    RegionServerCoprocessorHost rsCpHost =
134      TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getRegionServerCoprocessorHost();
135    SINK = new ReplicationSink(new Configuration(TEST_UTIL.getConfiguration()), rsCpHost);
136    table1 = TEST_UTIL.createTable(TABLE_NAME1, FAM_NAME1);
137    table2 = TEST_UTIL.createTable(TABLE_NAME2, FAM_NAME2);
138    Path rootDir = CommonFSUtils.getRootDir(TEST_UTIL.getConfiguration());
139    baseNamespaceDir = new Path(rootDir, new Path(HConstants.BASE_NAMESPACE_DIR)).toString();
140    hfileArchiveDir = new Path(rootDir, new Path(HConstants.HFILE_ARCHIVE_DIRECTORY)).toString();
141    replicationClusterId = "12345";
142  }
143
144  /**
145   * @throws java.lang.Exception
146   */
147  @AfterClass
148  public static void tearDownAfterClass() throws Exception {
149    STOPPABLE.stop("Shutting down");
150    TEST_UTIL.shutdownMiniCluster();
151  }
152
153  /**
154   * @throws java.lang.Exception
155   */
156  @Before
157  public void setUp() throws Exception {
158    table1 = TEST_UTIL.deleteTableData(TABLE_NAME1);
159    table2 = TEST_UTIL.deleteTableData(TABLE_NAME2);
160  }
161
162  /**
163   * Insert a whole batch of entries
164   */
165  @Test
166  public void testBatchSink() throws Exception {
167    List<WALEntry> entries = new ArrayList<>(BATCH_SIZE);
168    List<Cell> cells = new ArrayList<>();
169    for (int i = 0; i < BATCH_SIZE; i++) {
170      entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
171    }
172    SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()),
173      replicationClusterId, baseNamespaceDir, hfileArchiveDir);
174    Scan scan = new Scan();
175    ResultScanner scanRes = table1.getScanner(scan);
176    assertEquals(BATCH_SIZE, scanRes.next(BATCH_SIZE).length);
177  }
178
179  /**
180   * Insert a mix of puts and deletes
181   */
182  @Test
183  public void testMixedPutDelete() throws Exception {
184    List<WALEntry> entries = new ArrayList<>(BATCH_SIZE / 2);
185    List<Cell> cells = new ArrayList<>();
186    for (int i = 0; i < BATCH_SIZE / 2; i++) {
187      entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
188    }
189    SINK.replicateEntries(entries, CellUtil.createCellScanner(cells), replicationClusterId,
190      baseNamespaceDir, hfileArchiveDir);
191
192    entries = new ArrayList<>(BATCH_SIZE);
193    cells = new ArrayList<>();
194    for (int i = 0; i < BATCH_SIZE; i++) {
195      entries.add(createEntry(TABLE_NAME1, i,
196        i % 2 != 0 ? KeyValue.Type.Put : KeyValue.Type.DeleteColumn, cells));
197    }
198
199    SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()),
200      replicationClusterId, baseNamespaceDir, hfileArchiveDir);
201    Scan scan = new Scan();
202    ResultScanner scanRes = table1.getScanner(scan);
203    assertEquals(BATCH_SIZE / 2, scanRes.next(BATCH_SIZE).length);
204  }
205
206  @Test
207  public void testLargeEditsPutDelete() throws Exception {
208    List<WALEntry> entries = new ArrayList<>();
209    List<Cell> cells = new ArrayList<>();
210    for (int i = 0; i < 5510; i++) {
211      entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
212    }
213    SINK.replicateEntries(entries, CellUtil.createCellScanner(cells), replicationClusterId,
214      baseNamespaceDir, hfileArchiveDir);
215
216    ResultScanner resultScanner = table1.getScanner(new Scan());
217    int totalRows = 0;
218    while (resultScanner.next() != null) {
219      totalRows++;
220    }
221    assertEquals(5510, totalRows);
222
223    entries = new ArrayList<>();
224    cells = new ArrayList<>();
225    for (int i = 0; i < 11000; i++) {
226      entries.add(createEntry(TABLE_NAME1, i,
227        i % 2 != 0 ? KeyValue.Type.Put : KeyValue.Type.DeleteColumn, cells));
228    }
229    SINK.replicateEntries(entries, CellUtil.createCellScanner(cells), replicationClusterId,
230      baseNamespaceDir, hfileArchiveDir);
231    resultScanner = table1.getScanner(new Scan());
232    totalRows = 0;
233    while (resultScanner.next() != null) {
234      totalRows++;
235    }
236    assertEquals(5500, totalRows);
237  }
238
239  /**
240   * Insert to 2 different tables
241   */
242  @Test
243  public void testMixedPutTables() throws Exception {
244    List<WALEntry> entries = new ArrayList<>(BATCH_SIZE / 2);
245    List<Cell> cells = new ArrayList<>();
246    for (int i = 0; i < BATCH_SIZE; i++) {
247      entries.add(createEntry(i % 2 == 0 ? TABLE_NAME2 : TABLE_NAME1, i, KeyValue.Type.Put, cells));
248    }
249
250    SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()),
251      replicationClusterId, baseNamespaceDir, hfileArchiveDir);
252    Scan scan = new Scan();
253    ResultScanner scanRes = table2.getScanner(scan);
254    for (Result res : scanRes) {
255      assertEquals(0, Bytes.toInt(res.getRow()) % 2);
256    }
257    scanRes = table1.getScanner(scan);
258    for (Result res : scanRes) {
259      assertEquals(1, Bytes.toInt(res.getRow()) % 2);
260    }
261  }
262
263  /**
264   * Insert then do different types of deletes
265   */
266  @Test
267  public void testMixedDeletes() throws Exception {
268    List<WALEntry> entries = new ArrayList<>(3);
269    List<Cell> cells = new ArrayList<>();
270    for (int i = 0; i < 3; i++) {
271      entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
272    }
273    SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()),
274      replicationClusterId, baseNamespaceDir, hfileArchiveDir);
275    entries = new ArrayList<>(3);
276    cells = new ArrayList<>();
277    entries.add(createEntry(TABLE_NAME1, 0, KeyValue.Type.DeleteColumn, cells));
278    entries.add(createEntry(TABLE_NAME1, 1, KeyValue.Type.DeleteFamily, cells));
279    entries.add(createEntry(TABLE_NAME1, 2, KeyValue.Type.DeleteColumn, cells));
280
281    SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()),
282      replicationClusterId, baseNamespaceDir, hfileArchiveDir);
283
284    Scan scan = new Scan();
285    ResultScanner scanRes = table1.getScanner(scan);
286    assertEquals(0, scanRes.next(3).length);
287  }
288
289  /**
290   * Puts are buffered, but this tests when a delete (not-buffered) is applied before the actual Put
291   * that creates it.
292   */
293  @Test
294  public void testApplyDeleteBeforePut() throws Exception {
295    List<WALEntry> entries = new ArrayList<>(5);
296    List<Cell> cells = new ArrayList<>();
297    for (int i = 0; i < 2; i++) {
298      entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
299    }
300    entries.add(createEntry(TABLE_NAME1, 1, KeyValue.Type.DeleteFamily, cells));
301    for (int i = 3; i < 5; i++) {
302      entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
303    }
304    SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()),
305      replicationClusterId, baseNamespaceDir, hfileArchiveDir);
306    Get get = new Get(Bytes.toBytes(1));
307    Result res = table1.get(get);
308    assertEquals(0, res.size());
309  }
310
311  @Test
312  public void testRethrowRetriesExhaustedException() throws Exception {
313    TableName notExistTable = TableName.valueOf("notExistTable");
314    List<WALEntry> entries = new ArrayList<>();
315    List<Cell> cells = new ArrayList<>();
316    for (int i = 0; i < 10; i++) {
317      entries.add(createEntry(notExistTable, i, KeyValue.Type.Put, cells));
318    }
319    try {
320      SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()),
321        replicationClusterId, baseNamespaceDir, hfileArchiveDir);
322      Assert.fail("Should re-throw TableNotFoundException.");
323    } catch (TableNotFoundException e) {
324    }
325    entries.clear();
326    cells.clear();
327    for (int i = 0; i < 10; i++) {
328      entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
329    }
330    try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) {
331      try (Admin admin = conn.getAdmin()) {
332        admin.disableTable(TABLE_NAME1);
333        try {
334          SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()),
335            replicationClusterId, baseNamespaceDir, hfileArchiveDir);
336          Assert.fail("Should re-throw RetriesExhaustedWithDetailsException.");
337        } catch (RetriesExhaustedException e) {
338        } finally {
339          admin.enableTable(TABLE_NAME1);
340        }
341      }
342    }
343  }
344
345  /**
346   * Test replicateEntries with a bulk load entry for 25 HFiles
347   */
348  @Test
349  public void testReplicateEntriesForHFiles() throws Exception {
350    Path dir = TEST_UTIL.getDataTestDirOnTestFS("testReplicateEntries");
351    Path familyDir = new Path(dir, Bytes.toString(FAM_NAME1));
352    int numRows = 10;
353    List<Path> p = new ArrayList<>(1);
354    final String hfilePrefix = "hfile-";
355
356    // 1. Generate 25 hfile ranges
357    Random rand = ThreadLocalRandom.current();
358    Set<Integer> numbers = new HashSet<>();
359    while (numbers.size() < 50) {
360      numbers.add(rand.nextInt(1000));
361    }
362    List<Integer> numberList = new ArrayList<>(numbers);
363    Collections.sort(numberList);
364    Map<String, Long> storeFilesSize = new HashMap<>(1);
365
366    // 2. Create 25 hfiles
367    Configuration conf = TEST_UTIL.getConfiguration();
368    FileSystem fs = dir.getFileSystem(conf);
369    Iterator<Integer> numbersItr = numberList.iterator();
370    for (int i = 0; i < 25; i++) {
371      Path hfilePath = new Path(familyDir, hfilePrefix + i);
372      HFileTestUtil.createHFile(conf, fs, hfilePath, FAM_NAME1, FAM_NAME1,
373        Bytes.toBytes(numbersItr.next()), Bytes.toBytes(numbersItr.next()), numRows);
374      p.add(hfilePath);
375      storeFilesSize.put(hfilePath.getName(), fs.getFileStatus(hfilePath).getLen());
376    }
377
378    // 3. Create a BulkLoadDescriptor and a WALEdit
379    Map<byte[], List<Path>> storeFiles = new HashMap<>(1);
380    storeFiles.put(FAM_NAME1, p);
381    org.apache.hadoop.hbase.wal.WALEdit edit = null;
382    WALProtos.BulkLoadDescriptor loadDescriptor = null;
383
384    try (Connection c = ConnectionFactory.createConnection(conf);
385      RegionLocator l = c.getRegionLocator(TABLE_NAME1)) {
386      RegionInfo regionInfo = l.getAllRegionLocations().get(0).getRegion();
387      loadDescriptor = ProtobufUtil.toBulkLoadDescriptor(TABLE_NAME1,
388        UnsafeByteOperations.unsafeWrap(regionInfo.getEncodedNameAsBytes()), storeFiles,
389        storeFilesSize, 1);
390      edit = org.apache.hadoop.hbase.wal.WALEdit.createBulkLoadEvent(regionInfo, loadDescriptor);
391    }
392    List<WALEntry> entries = new ArrayList<>(1);
393
394    // 4. Create a WALEntryBuilder
395    WALEntry.Builder builder = createWALEntryBuilder(TABLE_NAME1);
396
397    // 5. Copy the hfile to the path as it is in reality
398    for (int i = 0; i < 25; i++) {
399      String pathToHfileFromNS = new StringBuilder(100).append(TABLE_NAME1.getNamespaceAsString())
400        .append(Path.SEPARATOR).append(Bytes.toString(TABLE_NAME1.getName())).append(Path.SEPARATOR)
401        .append(Bytes.toString(loadDescriptor.getEncodedRegionName().toByteArray()))
402        .append(Path.SEPARATOR).append(Bytes.toString(FAM_NAME1)).append(Path.SEPARATOR)
403        .append(hfilePrefix + i).toString();
404      String dst = baseNamespaceDir + Path.SEPARATOR + pathToHfileFromNS;
405      Path dstPath = new Path(dst);
406      FileUtil.copy(fs, p.get(0), fs, dstPath, false, conf);
407    }
408
409    entries.add(builder.build());
410    try (ResultScanner scanner = table1.getScanner(new Scan())) {
411      // 6. Assert no existing data in table
412      assertEquals(0, scanner.next(numRows).length);
413    }
414    // 7. Replicate the bulk loaded entry
415    SINK.replicateEntries(entries, CellUtil.createCellScanner(edit.getCells().iterator()),
416      replicationClusterId, baseNamespaceDir, hfileArchiveDir);
417    try (ResultScanner scanner = table1.getScanner(new Scan())) {
418      // 8. Assert data is replicated
419      assertEquals(numRows, scanner.next(numRows).length);
420    }
421    // Clean up the created hfiles or it will mess up subsequent tests
422  }
423
424  /**
425   * Test failure metrics produced for failed replication edits
426   */
427  @Test
428  public void testFailedReplicationSinkMetrics() throws IOException {
429    long initialFailedBatches = SINK.getSinkMetrics().getFailedBatches();
430    long errorCount = 0L;
431    List<WALEntry> entries = new ArrayList<>(BATCH_SIZE);
432    List<Cell> cells = new ArrayList<>();
433    for (int i = 0; i < BATCH_SIZE; i++) {
434      entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
435    }
436    cells.clear(); // cause IndexOutOfBoundsException
437    try {
438      SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()),
439        replicationClusterId, baseNamespaceDir, hfileArchiveDir);
440      Assert.fail("Should re-throw ArrayIndexOutOfBoundsException.");
441    } catch (ArrayIndexOutOfBoundsException e) {
442      errorCount++;
443      assertEquals(initialFailedBatches + errorCount, SINK.getSinkMetrics().getFailedBatches());
444    }
445
446    entries.clear();
447    cells.clear();
448    TableName notExistTable = TableName.valueOf("notExistTable"); // cause TableNotFoundException
449    for (int i = 0; i < BATCH_SIZE; i++) {
450      entries.add(createEntry(notExistTable, i, KeyValue.Type.Put, cells));
451    }
452    try {
453      SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()),
454        replicationClusterId, baseNamespaceDir, hfileArchiveDir);
455      Assert.fail("Should re-throw TableNotFoundException.");
456    } catch (TableNotFoundException e) {
457      errorCount++;
458      assertEquals(initialFailedBatches + errorCount, SINK.getSinkMetrics().getFailedBatches());
459    }
460
461    entries.clear();
462    cells.clear();
463    for (int i = 0; i < BATCH_SIZE; i++) {
464      entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
465    }
466    // cause IOException in batch()
467    try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) {
468      try (Admin admin = conn.getAdmin()) {
469        admin.disableTable(TABLE_NAME1);
470        try {
471          SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()),
472            replicationClusterId, baseNamespaceDir, hfileArchiveDir);
473          Assert.fail("Should re-throw IOException.");
474        } catch (IOException e) {
475          errorCount++;
476          assertEquals(initialFailedBatches + errorCount, SINK.getSinkMetrics().getFailedBatches());
477        } finally {
478          admin.enableTable(TABLE_NAME1);
479        }
480      }
481    }
482  }
483
484  private WALEntry createEntry(TableName table, int row, KeyValue.Type type, List<Cell> cells) {
485    byte[] fam = table.equals(TABLE_NAME1) ? FAM_NAME1 : FAM_NAME2;
486    byte[] rowBytes = Bytes.toBytes(row);
487    // Just make sure we don't get the same ts for two consecutive rows with
488    // same key
489    try {
490      Thread.sleep(1);
491    } catch (InterruptedException e) {
492      LOG.info("Was interrupted while sleep, meh", e);
493    }
494    final long now = EnvironmentEdgeManager.currentTime();
495    KeyValue kv = null;
496    if (type.getCode() == KeyValue.Type.Put.getCode()) {
497      kv = new KeyValue(rowBytes, fam, fam, now, KeyValue.Type.Put, Bytes.toBytes(row));
498    } else if (type.getCode() == KeyValue.Type.DeleteColumn.getCode()) {
499      kv = new KeyValue(rowBytes, fam, fam, now, KeyValue.Type.DeleteColumn);
500    } else if (type.getCode() == KeyValue.Type.DeleteFamily.getCode()) {
501      kv = new KeyValue(rowBytes, fam, null, now, KeyValue.Type.DeleteFamily);
502    }
503    WALEntry.Builder builder = createWALEntryBuilder(table);
504    cells.add(kv);
505
506    return builder.build();
507  }
508
509  public static WALEntry.Builder createWALEntryBuilder(TableName table) {
510    WALEntry.Builder builder = WALEntry.newBuilder();
511    builder.setAssociatedCellCount(1);
512    WALKey.Builder keyBuilder = WALKey.newBuilder();
513    UUID.Builder uuidBuilder = UUID.newBuilder();
514    uuidBuilder.setLeastSigBits(HConstants.DEFAULT_CLUSTER_ID.getLeastSignificantBits());
515    uuidBuilder.setMostSigBits(HConstants.DEFAULT_CLUSTER_ID.getMostSignificantBits());
516    keyBuilder.setClusterId(uuidBuilder.build());
517    keyBuilder.setTableName(UnsafeByteOperations.unsafeWrap(table.getName()));
518    keyBuilder.setWriteTime(EnvironmentEdgeManager.currentTime());
519    keyBuilder.setEncodedRegionName(UnsafeByteOperations.unsafeWrap(HConstants.EMPTY_BYTE_ARRAY));
520    keyBuilder.setLogSequenceNumber(-1);
521    builder.setKey(keyBuilder.build());
522    return builder;
523  }
524}