001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.util;
019
020import java.io.IOException;
021import java.math.BigInteger;
022import java.util.Arrays;
023import java.util.Collection;
024import java.util.LinkedList;
025import java.util.List;
026import java.util.Map;
027import java.util.Set;
028import java.util.TreeMap;
029import org.apache.commons.lang3.ArrayUtils;
030import org.apache.commons.lang3.StringUtils;
031import org.apache.hadoop.conf.Configuration;
032import org.apache.hadoop.fs.FSDataInputStream;
033import org.apache.hadoop.fs.FSDataOutputStream;
034import org.apache.hadoop.fs.FileSystem;
035import org.apache.hadoop.fs.Path;
036import org.apache.hadoop.hbase.HBaseConfiguration;
037import org.apache.hadoop.hbase.HConstants;
038import org.apache.hadoop.hbase.HRegionLocation;
039import org.apache.hadoop.hbase.ServerName;
040import org.apache.hadoop.hbase.TableName;
041import org.apache.hadoop.hbase.client.Admin;
042import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
043import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
044import org.apache.hadoop.hbase.client.Connection;
045import org.apache.hadoop.hbase.client.ConnectionFactory;
046import org.apache.hadoop.hbase.client.NoServerForRegionException;
047import org.apache.hadoop.hbase.client.RegionInfo;
048import org.apache.hadoop.hbase.client.RegionLocator;
049import org.apache.hadoop.hbase.client.Table;
050import org.apache.hadoop.hbase.client.TableDescriptor;
051import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
052import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
053import org.apache.yetus.audience.InterfaceAudience;
054import org.slf4j.Logger;
055import org.slf4j.LoggerFactory;
056
057import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
058import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
059import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
060import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
061import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
062import org.apache.hbase.thirdparty.org.apache.commons.cli.GnuParser;
063import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter;
064import org.apache.hbase.thirdparty.org.apache.commons.cli.OptionBuilder;
065import org.apache.hbase.thirdparty.org.apache.commons.cli.Options;
066import org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException;
067
068/**
069 * The {@link RegionSplitter} class provides several utilities to help in the administration
070 * lifecycle for developers who choose to manually split regions instead of having HBase handle that
071 * automatically. The most useful utilities are:
072 * <p>
073 * <ul>
074 * <li>Create a table with a specified number of pre-split regions
075 * <li>Execute a rolling split of all regions on an existing table
076 * </ul>
077 * <p>
078 * Both operations can be safely done on a live server.
079 * <p>
080 * <b>Question:</b> How do I turn off automatic splitting? <br>
081 * <b>Answer:</b> Automatic splitting is determined by the configuration value
082 * <i>HConstants.HREGION_MAX_FILESIZE</i>. It is not recommended that you set this to Long.MAX_VALUE
083 * in case you forget about manual splits. A suggested setting is 100GB, which would result in &gt;
084 * 1hr major compactions if reached.
085 * <p>
086 * <b>Question:</b> Why did the original authors decide to manually split? <br>
087 * <b>Answer:</b> Specific workload characteristics of our use case allowed us to benefit from a
088 * manual split system.
089 * <p>
090 * <ul>
091 * <li>Data (~1k) that would grow instead of being replaced
092 * <li>Data growth was roughly uniform across all regions
093 * <li>OLTP workload. Data loss is a big deal.
094 * </ul>
095 * <p>
096 * <b>Question:</b> Why is manual splitting good for this workload? <br>
097 * <b>Answer:</b> Although automated splitting is not a bad option, there are benefits to manual
098 * splitting.
099 * <p>
100 * <ul>
101 * <li>With growing amounts of data, splits will continually be needed. Since you always know
102 * exactly what regions you have, long-term debugging and profiling is much easier with manual
103 * splits. It is hard to trace the logs to understand region level problems if it keeps splitting
104 * and getting renamed.
105 * <li>Data offlining bugs + unknown number of split regions == oh crap! If an WAL or StoreFile was
106 * mistakenly unprocessed by HBase due to a weird bug and you notice it a day or so later, you can
107 * be assured that the regions specified in these files are the same as the current regions and you
108 * have less headaches trying to restore/replay your data.
109 * <li>You can finely tune your compaction algorithm. With roughly uniform data growth, it's easy to
110 * cause split / compaction storms as the regions all roughly hit the same data size at the same
111 * time. With manual splits, you can let staggered, time-based major compactions spread out your
112 * network IO load.
113 * </ul>
114 * <p>
115 * <b>Question:</b> What's the optimal number of pre-split regions to create? <br>
116 * <b>Answer:</b> Mileage will vary depending upon your application.
117 * <p>
118 * The short answer for our application is that we started with 10 pre-split regions / server and
119 * watched our data growth over time. It's better to err on the side of too little regions and
120 * rolling split later.
121 * <p>
122 * The more complicated answer is that this depends upon the largest storefile in your region. With
123 * a growing data size, this will get larger over time. You want the largest region to be just big
124 * enough that the {@link org.apache.hadoop.hbase.regionserver.HStore} compact selection algorithm
125 * only compacts it due to a timed major. If you don't, your cluster can be prone to compaction
126 * storms as the algorithm decides to run major compactions on a large series of regions all at
127 * once. Note that compaction storms are due to the uniform data growth, not the manual split
128 * decision.
129 * <p>
130 * If you pre-split your regions too thin, you can increase the major compaction interval by
131 * configuring HConstants.MAJOR_COMPACTION_PERIOD. If your data size grows too large, use this
132 * script to perform a network IO safe rolling split of all regions.
133 */
134@InterfaceAudience.Private
135public class RegionSplitter {
136  private static final Logger LOG = LoggerFactory.getLogger(RegionSplitter.class);
137
138  /**
139   * A generic interface for the RegionSplitter code to use for all it's functionality. Note that
140   * the original authors of this code use {@link HexStringSplit} to partition their table and set
141   * it as default, but provided this for your custom algorithm. To use, create a new derived class
142   * from this interface and call {@link RegionSplitter#createPresplitTable} or
143   * RegionSplitter#rollingSplit(TableName, SplitAlgorithm, Configuration) with the argument
144   * splitClassName giving the name of your class.
145   */
146  public interface SplitAlgorithm {
147    /**
148     * Split a pre-existing region into 2 regions. first row (inclusive) last row (exclusive)
149     * @return the split row to use
150     */
151    byte[] split(byte[] start, byte[] end);
152
153    /**
154     * Split an entire table. number of regions to split the table into user input is validated at
155     * this time. may throw a runtime exception in response to a parse failure
156     * @return array of split keys for the initial regions of the table. The length of the returned
157     *         array should be numRegions-1.
158     */
159    byte[][] split(int numRegions);
160
161    /**
162     * Some MapReduce jobs may want to run multiple mappers per region, this is intended for such
163     * usecase.
164     * @param start     first row (inclusive)
165     * @param end       last row (exclusive)
166     * @param numSplits number of splits to generate
167     * @param inclusive whether start and end are returned as split points
168     */
169    byte[][] split(byte[] start, byte[] end, int numSplits, boolean inclusive);
170
171    /**
172     * In HBase, the first row is represented by an empty byte array. This might cause problems with
173     * your split algorithm or row printing. All your APIs will be passed firstRow() instead of
174     * empty array.
175     * @return your representation of your first row
176     */
177    byte[] firstRow();
178
179    /**
180     * In HBase, the last row is represented by an empty byte array. This might cause problems with
181     * your split algorithm or row printing. All your APIs will be passed firstRow() instead of
182     * empty array.
183     * @return your representation of your last row
184     */
185    byte[] lastRow();
186
187    /**
188     * In HBase, the last row is represented by an empty byte array. Set this value to help the
189     * split code understand how to evenly divide the first region. raw user input (may throw
190     * RuntimeException on parse failure)
191     */
192    void setFirstRow(String userInput);
193
194    /**
195     * In HBase, the last row is represented by an empty byte array. Set this value to help the
196     * split code understand how to evenly divide the last region. Note that this last row is
197     * inclusive for all rows sharing the same prefix. raw user input (may throw RuntimeException on
198     * parse failure)
199     */
200    void setLastRow(String userInput);
201
202    /**
203     * user or file input for row
204     * @return byte array representation of this row for HBase
205     */
206    byte[] strToRow(String input);
207
208    /**
209     * byte array representing a row in HBase
210     * @return String to use for debug &amp; file printing
211     */
212    String rowToStr(byte[] row);
213
214    /** Returns the separator character to use when storing / printing the row */
215    String separator();
216
217    /**
218     * Set the first row
219     * @param userInput byte array of the row key.
220     */
221    void setFirstRow(byte[] userInput);
222
223    /**
224     * Set the last row
225     * @param userInput byte array of the row key.
226     */
227    void setLastRow(byte[] userInput);
228  }
229
230  /**
231   * The main function for the RegionSplitter application. Common uses:
232   * <p>
233   * <ul>
234   * <li>create a table named 'myTable' with 60 pre-split regions containing 2 column families
235   * 'test' &amp; 'rs', assuming the keys are hex-encoded ASCII:
236   * <ul>
237   * <li>bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -c 60 -f test:rs myTable
238   * HexStringSplit
239   * </ul>
240   * <li>create a table named 'myTable' with 50 pre-split regions, assuming the keys are
241   * decimal-encoded ASCII:
242   * <ul>
243   * <li>bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -c 50 myTable DecimalStringSplit
244   * </ul>
245   * <li>perform a rolling split of 'myTable' (i.e. 60 =&gt; 120 regions), # 2 outstanding splits at
246   * a time, assuming keys are uniformly distributed bytes:
247   * <ul>
248   * <li>bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -r -o 2 myTable UniformSplit
249   * </ul>
250   * </ul>
251   * There are three SplitAlgorithms built into RegionSplitter, HexStringSplit, DecimalStringSplit,
252   * and UniformSplit. These are different strategies for choosing region boundaries. See their
253   * source code for details. Usage: RegionSplitter &lt;TABLE&gt; &lt;SPLITALGORITHM&gt; &lt;-c
254   * &lt;# regions&gt; -f &lt;family:family:...&gt; | -r [-o &lt;# outstanding splits&gt;]&gt; [-D
255   * &lt;conf.param=value&gt;] HBase IO problem user requested exit problem parsing user input
256   */
257  @SuppressWarnings("static-access")
258  public static void main(String[] args) throws IOException, InterruptedException, ParseException {
259    Configuration conf = HBaseConfiguration.create();
260
261    // parse user input
262    Options opt = new Options();
263    opt.addOption(OptionBuilder.withArgName("property=value").hasArg()
264      .withDescription("Override HBase Configuration Settings").create("D"));
265    opt.addOption(OptionBuilder.withArgName("region count").hasArg()
266      .withDescription("Create a new table with a pre-split number of regions").create("c"));
267    opt.addOption(OptionBuilder.withArgName("family:family:...").hasArg()
268      .withDescription("Column Families to create with new table.  Required with -c").create("f"));
269    opt.addOption("h", false, "Print this usage help");
270    opt.addOption("r", false, "Perform a rolling split of an existing region");
271    opt.addOption(OptionBuilder.withArgName("count").hasArg()
272      .withDescription("Max outstanding splits that have unfinished major compactions")
273      .create("o"));
274    opt.addOption(null, "firstrow", true, "First Row in Table for Split Algorithm");
275    opt.addOption(null, "lastrow", true, "Last Row in Table for Split Algorithm");
276    opt.addOption(null, "risky", false, "Skip verification steps to complete quickly. "
277      + "STRONGLY DISCOURAGED for production systems.  ");
278    CommandLine cmd = new GnuParser().parse(opt, args);
279
280    if (cmd.hasOption("D")) {
281      for (String confOpt : cmd.getOptionValues("D")) {
282        String[] kv = confOpt.split("=", 2);
283        if (kv.length == 2) {
284          conf.set(kv[0], kv[1]);
285          LOG.debug("-D configuration override: " + kv[0] + "=" + kv[1]);
286        } else {
287          throw new ParseException("-D option format invalid: " + confOpt);
288        }
289      }
290    }
291
292    if (cmd.hasOption("risky")) {
293      conf.setBoolean("split.verify", false);
294    }
295
296    boolean createTable = cmd.hasOption("c") && cmd.hasOption("f");
297    boolean rollingSplit = cmd.hasOption("r");
298    boolean oneOperOnly = createTable ^ rollingSplit;
299
300    if (2 != cmd.getArgList().size() || !oneOperOnly || cmd.hasOption("h")) {
301      new HelpFormatter().printHelp("bin/hbase regionsplitter <TABLE> <SPLITALGORITHM>\n"
302        + "SPLITALGORITHM is the java class name of a class implementing "
303        + "SplitAlgorithm, or one of the special strings HexStringSplit or "
304        + "DecimalStringSplit or UniformSplit, which are built-in split algorithms. "
305        + "HexStringSplit treats keys as hexadecimal ASCII, and "
306        + "DecimalStringSplit treats keys as decimal ASCII, and "
307        + "UniformSplit treats keys as arbitrary bytes.", opt);
308      return;
309    }
310    TableName tableName = TableName.valueOf(cmd.getArgs()[0]);
311    String splitClass = cmd.getArgs()[1];
312    SplitAlgorithm splitAlgo = newSplitAlgoInstance(conf, splitClass);
313
314    if (cmd.hasOption("firstrow")) {
315      splitAlgo.setFirstRow(cmd.getOptionValue("firstrow"));
316    }
317    if (cmd.hasOption("lastrow")) {
318      splitAlgo.setLastRow(cmd.getOptionValue("lastrow"));
319    }
320
321    if (createTable) {
322      conf.set("split.count", cmd.getOptionValue("c"));
323      createPresplitTable(tableName, splitAlgo, cmd.getOptionValue("f").split(":"), conf);
324    }
325
326    if (rollingSplit) {
327      if (cmd.hasOption("o")) {
328        conf.set("split.outstanding", cmd.getOptionValue("o"));
329      }
330      rollingSplit(tableName, splitAlgo, conf);
331    }
332  }
333
334  static void createPresplitTable(TableName tableName, SplitAlgorithm splitAlgo,
335    String[] columnFamilies, Configuration conf) throws IOException, InterruptedException {
336    final int splitCount = conf.getInt("split.count", 0);
337    Preconditions.checkArgument(splitCount > 1, "Split count must be > 1");
338
339    Preconditions.checkArgument(columnFamilies.length > 0,
340      "Must specify at least one column family. ");
341    LOG.debug("Creating table " + tableName + " with " + columnFamilies.length
342      + " column families.  Presplitting to " + splitCount + " regions");
343
344    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
345    for (String cf : columnFamilies) {
346      builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf));
347    }
348    try (Connection connection = ConnectionFactory.createConnection(conf)) {
349      Admin admin = connection.getAdmin();
350      try {
351        Preconditions.checkArgument(!admin.tableExists(tableName),
352          "Table already exists: " + tableName);
353        admin.createTable(builder.build(), splitAlgo.split(splitCount));
354      } finally {
355        admin.close();
356      }
357      LOG.debug("Table created!  Waiting for regions to show online in META...");
358      if (!conf.getBoolean("split.verify", true)) {
359        // NOTE: createTable is synchronous on the table, but not on the regions
360        int onlineRegions = 0;
361        try (RegionLocator locator = connection.getRegionLocator(tableName)) {
362          while (onlineRegions < splitCount) {
363            onlineRegions = locator.getAllRegionLocations().size();
364            LOG.debug(onlineRegions + " of " + splitCount + " regions online...");
365            if (onlineRegions < splitCount) {
366              Thread.sleep(10 * 1000); // sleep
367            }
368          }
369        }
370      }
371      LOG.debug("Finished creating table with " + splitCount + " regions");
372    }
373  }
374
375  /**
376   * Alternative getCurrentNrHRS which is no longer available.
377   * @return Rough count of regionservers out on cluster.
378   * @throws IOException if a remote or network exception occurs
379   */
380  private static int getRegionServerCount(final Connection connection) throws IOException {
381    try (Admin admin = connection.getAdmin()) {
382      Collection<ServerName> servers = admin.getRegionServers();
383      return servers == null || servers.isEmpty() ? 0 : servers.size();
384    }
385  }
386
387  private static byte[] readFile(final FileSystem fs, final Path path) throws IOException {
388    FSDataInputStream tmpIn = fs.open(path);
389    try {
390      byte[] rawData = new byte[tmpIn.available()];
391      tmpIn.readFully(rawData);
392      return rawData;
393    } finally {
394      tmpIn.close();
395    }
396  }
397
398  static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configuration conf)
399    throws IOException, InterruptedException {
400    final int minOS = conf.getInt("split.outstanding", 2);
401    try (Connection connection = ConnectionFactory.createConnection(conf)) {
402      // Max outstanding splits. default == 50% of servers
403      final int MAX_OUTSTANDING = Math.max(getRegionServerCount(connection) / 2, minOS);
404
405      Path hbDir = CommonFSUtils.getRootDir(conf);
406      Path tableDir = CommonFSUtils.getTableDir(hbDir, tableName);
407      Path splitFile = new Path(tableDir, "_balancedSplit");
408      FileSystem fs = FileSystem.get(conf);
409
410      // Get a list of daughter regions to create
411      LinkedList<Pair<byte[], byte[]>> tmpRegionSet = null;
412      try (Table table = connection.getTable(tableName)) {
413        tmpRegionSet = getSplits(connection, tableName, splitAlgo);
414      }
415      LinkedList<Pair<byte[], byte[]>> outstanding = Lists.newLinkedList();
416      int splitCount = 0;
417      final int origCount = tmpRegionSet.size();
418
419      // all splits must compact & we have 1 compact thread, so 2 split
420      // requests to the same RS can stall the outstanding split queue.
421      // To fix, group the regions into an RS pool and round-robin through it
422      LOG.debug("Bucketing regions by regionserver...");
423      TreeMap<ServerName, LinkedList<Pair<byte[], byte[]>>> daughterRegions = Maps.newTreeMap();
424      // Get a regionLocator. Need it in below.
425      try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) {
426        for (Pair<byte[], byte[]> dr : tmpRegionSet) {
427          ServerName rsLocation = regionLocator.getRegionLocation(dr.getSecond()).getServerName();
428          if (!daughterRegions.containsKey(rsLocation)) {
429            LinkedList<Pair<byte[], byte[]>> entry = Lists.newLinkedList();
430            daughterRegions.put(rsLocation, entry);
431          }
432          daughterRegions.get(rsLocation).add(dr);
433        }
434        LOG.debug("Done with bucketing.  Split time!");
435        long startTime = EnvironmentEdgeManager.currentTime();
436
437        // Open the split file and modify it as splits finish
438        byte[] rawData = readFile(fs, splitFile);
439
440        FSDataOutputStream splitOut = fs.create(splitFile);
441        try {
442          splitOut.write(rawData);
443
444          try {
445            // *** split code ***
446            while (!daughterRegions.isEmpty()) {
447              LOG.debug(daughterRegions.size() + " RS have regions to splt.");
448
449              // Get ServerName to region count mapping
450              final TreeMap<ServerName, Integer> rsSizes = Maps.newTreeMap();
451              List<HRegionLocation> hrls = regionLocator.getAllRegionLocations();
452              for (HRegionLocation hrl : hrls) {
453                ServerName sn = hrl.getServerName();
454                if (rsSizes.containsKey(sn)) {
455                  rsSizes.put(sn, rsSizes.get(sn) + 1);
456                } else {
457                  rsSizes.put(sn, 1);
458                }
459              }
460
461              // Round-robin through the ServerName list. Choose the lightest-loaded servers
462              // first to keep the master from load-balancing regions as we split.
463              for (Map.Entry<ServerName,
464                LinkedList<Pair<byte[], byte[]>>> daughterRegion : daughterRegions.entrySet()) {
465                Pair<byte[], byte[]> dr = null;
466                ServerName rsLoc = daughterRegion.getKey();
467                LinkedList<Pair<byte[], byte[]>> regionList = daughterRegion.getValue();
468
469                // Find a region in the ServerName list that hasn't been moved
470                LOG.debug("Finding a region on " + rsLoc);
471                while (!regionList.isEmpty()) {
472                  dr = regionList.pop();
473
474                  // get current region info
475                  byte[] split = dr.getSecond();
476                  HRegionLocation regionLoc = regionLocator.getRegionLocation(split);
477
478                  // if this region moved locations
479                  ServerName newRs = regionLoc.getServerName();
480                  if (newRs.compareTo(rsLoc) != 0) {
481                    LOG.debug("Region with " + splitAlgo.rowToStr(split) + " moved to " + newRs
482                      + ". Relocating...");
483                    // relocate it, don't use it right now
484                    if (!daughterRegions.containsKey(newRs)) {
485                      LinkedList<Pair<byte[], byte[]>> entry = Lists.newLinkedList();
486                      daughterRegions.put(newRs, entry);
487                    }
488                    daughterRegions.get(newRs).add(dr);
489                    dr = null;
490                    continue;
491                  }
492
493                  // make sure this region wasn't already split
494                  byte[] sk = regionLoc.getRegion().getStartKey();
495                  if (sk.length != 0) {
496                    if (Bytes.equals(split, sk)) {
497                      LOG.debug("Region already split on " + splitAlgo.rowToStr(split)
498                        + ".  Skipping this region...");
499                      ++splitCount;
500                      dr = null;
501                      continue;
502                    }
503                    byte[] start = dr.getFirst();
504                    Preconditions.checkArgument(Bytes.equals(start, sk),
505                      splitAlgo.rowToStr(start) + " != " + splitAlgo.rowToStr(sk));
506                  }
507
508                  // passed all checks! found a good region
509                  break;
510                }
511                if (regionList.isEmpty()) {
512                  daughterRegions.remove(rsLoc);
513                }
514                if (dr == null) continue;
515
516                // we have a good region, time to split!
517                byte[] split = dr.getSecond();
518                LOG.debug("Splitting at " + splitAlgo.rowToStr(split));
519                try (Admin admin = connection.getAdmin()) {
520                  admin.split(tableName, split);
521                }
522
523                LinkedList<Pair<byte[], byte[]>> finished = Lists.newLinkedList();
524                LinkedList<Pair<byte[], byte[]>> local_finished = Lists.newLinkedList();
525                if (conf.getBoolean("split.verify", true)) {
526                  // we need to verify and rate-limit our splits
527                  outstanding.addLast(dr);
528                  // with too many outstanding splits, wait for some to finish
529                  while (outstanding.size() >= MAX_OUTSTANDING) {
530                    LOG.debug("Wait for outstanding splits " + outstanding.size());
531                    local_finished = splitScan(outstanding, connection, tableName, splitAlgo);
532                    if (local_finished.isEmpty()) {
533                      Thread.sleep(30 * 1000);
534                    } else {
535                      finished.addAll(local_finished);
536                      outstanding.removeAll(local_finished);
537                      LOG.debug(local_finished.size() + " outstanding splits finished");
538                    }
539                  }
540                } else {
541                  finished.add(dr);
542                }
543
544                // mark each finished region as successfully split.
545                for (Pair<byte[], byte[]> region : finished) {
546                  splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) + " "
547                    + splitAlgo.rowToStr(region.getSecond()) + "\n");
548                  splitCount++;
549                  if (splitCount % 10 == 0) {
550                    long tDiff = (EnvironmentEdgeManager.currentTime() - startTime) / splitCount;
551                    LOG.debug(
552                      "STATUS UPDATE: " + splitCount + " / " + origCount + ". Avg Time / Split = "
553                        + org.apache.hadoop.util.StringUtils.formatTime(tDiff));
554                  }
555                }
556              }
557            }
558            if (conf.getBoolean("split.verify", true)) {
559              while (!outstanding.isEmpty()) {
560                LOG.debug("Finally Wait for outstanding splits " + outstanding.size());
561                LinkedList<Pair<byte[], byte[]>> finished =
562                  splitScan(outstanding, connection, tableName, splitAlgo);
563                if (finished.isEmpty()) {
564                  Thread.sleep(30 * 1000);
565                } else {
566                  outstanding.removeAll(finished);
567                  for (Pair<byte[], byte[]> region : finished) {
568                    splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) + " "
569                      + splitAlgo.rowToStr(region.getSecond()) + "\n");
570                    splitCount++;
571                  }
572                  LOG.debug("Finally " + finished.size() + " outstanding splits finished");
573                }
574              }
575            }
576            LOG.debug("All regions have been successfully split!");
577          } finally {
578            long tDiff = EnvironmentEdgeManager.currentTime() - startTime;
579            LOG.debug("TOTAL TIME = " + org.apache.hadoop.util.StringUtils.formatTime(tDiff));
580            LOG.debug("Splits = " + splitCount);
581            if (0 < splitCount) {
582              LOG.debug("Avg Time / Split = "
583                + org.apache.hadoop.util.StringUtils.formatTime(tDiff / splitCount));
584            }
585          }
586        } finally {
587          splitOut.close();
588          fs.delete(splitFile, false);
589        }
590      }
591    }
592  }
593
594  /**
595   * @throws IOException if the specified SplitAlgorithm class couldn't be instantiated
596   */
597  public static SplitAlgorithm newSplitAlgoInstance(Configuration conf, String splitClassName)
598    throws IOException {
599    Class<?> splitClass;
600
601    // For split algorithms builtin to RegionSplitter, the user can specify
602    // their simple class name instead of a fully qualified class name.
603    if (splitClassName.equals(HexStringSplit.class.getSimpleName())) {
604      splitClass = HexStringSplit.class;
605    } else if (splitClassName.equals(DecimalStringSplit.class.getSimpleName())) {
606      splitClass = DecimalStringSplit.class;
607    } else if (splitClassName.equals(UniformSplit.class.getSimpleName())) {
608      splitClass = UniformSplit.class;
609    } else {
610      try {
611        splitClass = conf.getClassByName(splitClassName);
612      } catch (ClassNotFoundException e) {
613        throw new IOException("Couldn't load split class " + splitClassName, e);
614      }
615      if (splitClass == null) {
616        throw new IOException("Failed loading split class " + splitClassName);
617      }
618      if (!SplitAlgorithm.class.isAssignableFrom(splitClass)) {
619        throw new IOException("Specified split class doesn't implement SplitAlgorithm");
620      }
621    }
622    try {
623      return splitClass.asSubclass(SplitAlgorithm.class).getDeclaredConstructor().newInstance();
624    } catch (Exception e) {
625      throw new IOException("Problem loading split algorithm: ", e);
626    }
627  }
628
629  static LinkedList<Pair<byte[], byte[]>> splitScan(LinkedList<Pair<byte[], byte[]>> regionList,
630    final Connection connection, final TableName tableName, SplitAlgorithm splitAlgo)
631    throws IOException, InterruptedException {
632    LinkedList<Pair<byte[], byte[]>> finished = Lists.newLinkedList();
633    LinkedList<Pair<byte[], byte[]>> logicalSplitting = Lists.newLinkedList();
634    LinkedList<Pair<byte[], byte[]>> physicalSplitting = Lists.newLinkedList();
635
636    // Get table info
637    Pair<Path, Path> tableDirAndSplitFile =
638      getTableDirAndSplitFile(connection.getConfiguration(), tableName);
639    Path tableDir = tableDirAndSplitFile.getFirst();
640    FileSystem fs = tableDir.getFileSystem(connection.getConfiguration());
641    // Clear the cache to forcibly refresh region information
642    connection.clearRegionLocationCache();
643    TableDescriptor htd = null;
644    try (Table table = connection.getTable(tableName)) {
645      htd = table.getDescriptor();
646    }
647    try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) {
648      // for every region that hasn't been verified as a finished split
649      for (Pair<byte[], byte[]> region : regionList) {
650        byte[] start = region.getFirst();
651        byte[] split = region.getSecond();
652
653        // see if the new split daughter region has come online
654        try {
655          RegionInfo dri = regionLocator.getRegionLocation(split, true).getRegion();
656          if (dri.isOffline() || !Bytes.equals(dri.getStartKey(), split)) {
657            logicalSplitting.add(region);
658            continue;
659          }
660        } catch (NoServerForRegionException nsfre) {
661          // NSFRE will occur if the old hbase:meta entry has no server assigned
662          LOG.info(nsfre.toString(), nsfre);
663          logicalSplitting.add(region);
664          continue;
665        }
666
667        try {
668          // when a daughter region is opened, a compaction is triggered
669          // wait until compaction completes for both daughter regions
670          LinkedList<RegionInfo> check = Lists.newLinkedList();
671          check.add(regionLocator.getRegionLocation(start).getRegion());
672          check.add(regionLocator.getRegionLocation(split).getRegion());
673          for (RegionInfo hri : check.toArray(new RegionInfo[check.size()])) {
674            byte[] sk = hri.getStartKey();
675            if (sk.length == 0) sk = splitAlgo.firstRow();
676
677            HRegionFileSystem regionFs = HRegionFileSystem
678              .openRegionFromFileSystem(connection.getConfiguration(), fs, tableDir, hri, true);
679
680            // Check every Column Family for that region -- check does not have references.
681            boolean refFound = false;
682            for (ColumnFamilyDescriptor c : htd.getColumnFamilies()) {
683              if ((refFound = regionFs.hasReferences(c.getNameAsString()))) {
684                break;
685              }
686            }
687
688            // compaction is completed when all reference files are gone
689            if (!refFound) {
690              check.remove(hri);
691            }
692          }
693          if (check.isEmpty()) {
694            finished.add(region);
695          } else {
696            physicalSplitting.add(region);
697          }
698        } catch (NoServerForRegionException nsfre) {
699          LOG.debug("No Server Exception thrown for: " + splitAlgo.rowToStr(start));
700          physicalSplitting.add(region);
701          connection.clearRegionLocationCache();
702        }
703      }
704
705      LOG.debug("Split Scan: " + finished.size() + " finished / " + logicalSplitting.size()
706        + " split wait / " + physicalSplitting.size() + " reference wait");
707
708      return finished;
709    }
710  }
711
712  /**
713   * @return A Pair where first item is table dir and second is the split file.
714   * @throws IOException if a remote or network exception occurs
715   */
716  private static Pair<Path, Path> getTableDirAndSplitFile(final Configuration conf,
717    final TableName tableName) throws IOException {
718    Path hbDir = CommonFSUtils.getRootDir(conf);
719    Path tableDir = CommonFSUtils.getTableDir(hbDir, tableName);
720    Path splitFile = new Path(tableDir, "_balancedSplit");
721    return new Pair<>(tableDir, splitFile);
722  }
723
724  static LinkedList<Pair<byte[], byte[]>> getSplits(final Connection connection,
725    TableName tableName, SplitAlgorithm splitAlgo) throws IOException {
726    Pair<Path, Path> tableDirAndSplitFile =
727      getTableDirAndSplitFile(connection.getConfiguration(), tableName);
728    Path tableDir = tableDirAndSplitFile.getFirst();
729    Path splitFile = tableDirAndSplitFile.getSecond();
730
731    FileSystem fs = tableDir.getFileSystem(connection.getConfiguration());
732
733    // Using strings because (new byte[]{0}).equals(new byte[]{0}) == false
734    Set<Pair<String, String>> daughterRegions = Sets.newHashSet();
735
736    // Does a split file exist?
737    if (!fs.exists(splitFile)) {
738      // NO = fresh start. calculate splits to make
739      LOG.debug("No " + splitFile.getName() + " file. Calculating splits ");
740
741      // Query meta for all regions in the table
742      Set<Pair<byte[], byte[]>> rows = Sets.newHashSet();
743      Pair<byte[][], byte[][]> tmp = null;
744      try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) {
745        tmp = regionLocator.getStartEndKeys();
746      }
747      Preconditions.checkArgument(tmp.getFirst().length == tmp.getSecond().length,
748        "Start and End rows should be equivalent");
749      for (int i = 0; i < tmp.getFirst().length; ++i) {
750        byte[] start = tmp.getFirst()[i], end = tmp.getSecond()[i];
751        if (start.length == 0) start = splitAlgo.firstRow();
752        if (end.length == 0) end = splitAlgo.lastRow();
753        rows.add(Pair.newPair(start, end));
754      }
755      LOG.debug("Table " + tableName + " has " + rows.size() + " regions that will be split.");
756
757      // prepare the split file
758      Path tmpFile = new Path(tableDir, "_balancedSplit_prepare");
759      FSDataOutputStream tmpOut = fs.create(tmpFile);
760
761      // calculate all the splits == [daughterRegions] = [(start, splitPoint)]
762      for (Pair<byte[], byte[]> r : rows) {
763        byte[] splitPoint = splitAlgo.split(r.getFirst(), r.getSecond());
764        String startStr = splitAlgo.rowToStr(r.getFirst());
765        String splitStr = splitAlgo.rowToStr(splitPoint);
766        daughterRegions.add(Pair.newPair(startStr, splitStr));
767        LOG.debug("Will Split [" + startStr + " , " + splitAlgo.rowToStr(r.getSecond()) + ") at "
768          + splitStr);
769        tmpOut.writeChars("+ " + startStr + splitAlgo.separator() + splitStr + "\n");
770      }
771      tmpOut.close();
772      fs.rename(tmpFile, splitFile);
773    } else {
774      LOG.debug("_balancedSplit file found. Replay log to restore state...");
775      RecoverLeaseFSUtils.recoverFileLease(fs, splitFile, connection.getConfiguration(), null);
776
777      // parse split file and process remaining splits
778      FSDataInputStream tmpIn = fs.open(splitFile);
779      StringBuilder sb = new StringBuilder(tmpIn.available());
780      while (tmpIn.available() > 0) {
781        sb.append(tmpIn.readChar());
782      }
783      tmpIn.close();
784      for (String line : sb.toString().split("\n")) {
785        String[] cmd = line.split(splitAlgo.separator());
786        Preconditions.checkArgument(3 == cmd.length);
787        byte[] start = splitAlgo.strToRow(cmd[1]);
788        String startStr = splitAlgo.rowToStr(start);
789        byte[] splitPoint = splitAlgo.strToRow(cmd[2]);
790        String splitStr = splitAlgo.rowToStr(splitPoint);
791        Pair<String, String> r = Pair.newPair(startStr, splitStr);
792        if (cmd[0].equals("+")) {
793          LOG.debug("Adding: " + r);
794          daughterRegions.add(r);
795        } else {
796          LOG.debug("Removing: " + r);
797          Preconditions.checkArgument(cmd[0].equals("-"), "Unknown option: " + cmd[0]);
798          Preconditions.checkState(daughterRegions.contains(r), "Missing row: " + r);
799          daughterRegions.remove(r);
800        }
801      }
802      LOG.debug("Done reading. " + daughterRegions.size() + " regions left.");
803    }
804    LinkedList<Pair<byte[], byte[]>> ret = Lists.newLinkedList();
805    for (Pair<String, String> r : daughterRegions) {
806      ret.add(Pair.newPair(splitAlgo.strToRow(r.getFirst()), splitAlgo.strToRow(r.getSecond())));
807    }
808    return ret;
809  }
810
811  /**
812   * HexStringSplit is a well-known {@link SplitAlgorithm} for choosing region boundaries. The
813   * format of a HexStringSplit region boundary is the ASCII representation of an MD5 checksum, or
814   * any other uniformly distributed hexadecimal value. Row are hex-encoded long values in the range
815   * <b>"00000000" =&gt; "FFFFFFFF"</b> and are left-padded with zeros to keep the same order
816   * lexicographically as if they were binary. Since this split algorithm uses hex strings as keys,
817   * it is easy to read &amp; write in the shell but takes up more space and may be non-intuitive.
818   */
819  public static class HexStringSplit extends NumberStringSplit {
820    final static String DEFAULT_MIN_HEX = "00000000";
821    final static String DEFAULT_MAX_HEX = "FFFFFFFF";
822    final static int RADIX_HEX = 16;
823
824    public HexStringSplit() {
825      super(DEFAULT_MIN_HEX, DEFAULT_MAX_HEX, RADIX_HEX);
826    }
827
828  }
829
830  /**
831   * The format of a DecimalStringSplit region boundary is the ASCII representation of reversed
832   * sequential number, or any other uniformly distributed decimal value. Row are decimal-encoded
833   * long values in the range <b>"00000000" =&gt; "99999999"</b> and are left-padded with zeros to
834   * keep the same order lexicographically as if they were binary.
835   */
836  public static class DecimalStringSplit extends NumberStringSplit {
837    final static String DEFAULT_MIN_DEC = "00000000";
838    final static String DEFAULT_MAX_DEC = "99999999";
839    final static int RADIX_DEC = 10;
840
841    public DecimalStringSplit() {
842      super(DEFAULT_MIN_DEC, DEFAULT_MAX_DEC, RADIX_DEC);
843    }
844
845  }
846
847  public abstract static class NumberStringSplit implements SplitAlgorithm {
848
849    String firstRow;
850    BigInteger firstRowInt;
851    String lastRow;
852    BigInteger lastRowInt;
853    int rowComparisonLength;
854    int radix;
855
856    NumberStringSplit(String minRow, String maxRow, int radix) {
857      this.firstRow = minRow;
858      this.lastRow = maxRow;
859      this.radix = radix;
860      this.firstRowInt = BigInteger.ZERO;
861      this.lastRowInt = new BigInteger(lastRow, this.radix);
862      this.rowComparisonLength = lastRow.length();
863    }
864
865    @Override
866    public byte[] split(byte[] start, byte[] end) {
867      BigInteger s = convertToBigInteger(start);
868      BigInteger e = convertToBigInteger(end);
869      Preconditions.checkArgument(!e.equals(BigInteger.ZERO));
870      return convertToByte(split2(s, e));
871    }
872
873    @Override
874    public byte[][] split(int n) {
875      Preconditions.checkArgument(lastRowInt.compareTo(firstRowInt) > 0,
876        "last row (%s) is configured less than first row (%s)", lastRow, firstRow);
877      // +1 to range because the last row is inclusive
878      BigInteger range = lastRowInt.subtract(firstRowInt).add(BigInteger.ONE);
879      Preconditions.checkState(range.compareTo(BigInteger.valueOf(n)) >= 0,
880        "split granularity (%s) is greater than the range (%s)", n, range);
881
882      BigInteger[] splits = new BigInteger[n - 1];
883      BigInteger sizeOfEachSplit = range.divide(BigInteger.valueOf(n));
884      for (int i = 1; i < n; i++) {
885        // NOTE: this means the last region gets all the slop.
886        // This is not a big deal if we're assuming n << MAXHEX
887        splits[i - 1] = firstRowInt.add(sizeOfEachSplit.multiply(BigInteger.valueOf(i)));
888      }
889      return convertToBytes(splits);
890    }
891
892    @Override
893    public byte[][] split(byte[] start, byte[] end, int numSplits, boolean inclusive) {
894      BigInteger s = convertToBigInteger(start);
895      BigInteger e = convertToBigInteger(end);
896
897      Preconditions.checkArgument(e.compareTo(s) > 0,
898        "last row (%s) is configured less than first row (%s)", rowToStr(end), end);
899      // +1 to range because the last row is inclusive
900      BigInteger range = e.subtract(s).add(BigInteger.ONE);
901      Preconditions.checkState(range.compareTo(BigInteger.valueOf(numSplits)) >= 0,
902        "split granularity (%s) is greater than the range (%s)", numSplits, range);
903
904      BigInteger[] splits = new BigInteger[numSplits - 1];
905      BigInteger sizeOfEachSplit = range.divide(BigInteger.valueOf(numSplits));
906      for (int i = 1; i < numSplits; i++) {
907        // NOTE: this means the last region gets all the slop.
908        // This is not a big deal if we're assuming n << MAXHEX
909        splits[i - 1] = s.add(sizeOfEachSplit.multiply(BigInteger.valueOf(i)));
910      }
911
912      if (inclusive) {
913        BigInteger[] inclusiveSplitPoints = new BigInteger[numSplits + 1];
914        inclusiveSplitPoints[0] = convertToBigInteger(start);
915        inclusiveSplitPoints[numSplits] = convertToBigInteger(end);
916        System.arraycopy(splits, 0, inclusiveSplitPoints, 1, splits.length);
917        return convertToBytes(inclusiveSplitPoints);
918      } else {
919        return convertToBytes(splits);
920      }
921    }
922
923    @Override
924    public byte[] firstRow() {
925      return convertToByte(firstRowInt);
926    }
927
928    @Override
929    public byte[] lastRow() {
930      return convertToByte(lastRowInt);
931    }
932
933    @Override
934    public void setFirstRow(String userInput) {
935      firstRow = userInput;
936      firstRowInt = new BigInteger(firstRow, radix);
937    }
938
939    @Override
940    public void setLastRow(String userInput) {
941      lastRow = userInput;
942      lastRowInt = new BigInteger(lastRow, radix);
943      // Precondition: lastRow > firstRow, so last's length is the greater
944      rowComparisonLength = lastRow.length();
945    }
946
947    @Override
948    public byte[] strToRow(String in) {
949      return convertToByte(new BigInteger(in, radix));
950    }
951
952    @Override
953    public String rowToStr(byte[] row) {
954      return Bytes.toStringBinary(row);
955    }
956
957    @Override
958    public String separator() {
959      return " ";
960    }
961
962    @Override
963    public void setFirstRow(byte[] userInput) {
964      firstRow = Bytes.toString(userInput);
965    }
966
967    @Override
968    public void setLastRow(byte[] userInput) {
969      lastRow = Bytes.toString(userInput);
970    }
971
972    /**
973     * Divide 2 numbers in half (for split algorithm)
974     * @param a number #1
975     * @param b number #2
976     * @return the midpoint of the 2 numbers
977     */
978    public BigInteger split2(BigInteger a, BigInteger b) {
979      return a.add(b).divide(BigInteger.valueOf(2)).abs();
980    }
981
982    /**
983     * Returns an array of bytes corresponding to an array of BigIntegers
984     * @param bigIntegers numbers to convert
985     * @return bytes corresponding to the bigIntegers
986     */
987    public byte[][] convertToBytes(BigInteger[] bigIntegers) {
988      byte[][] returnBytes = new byte[bigIntegers.length][];
989      for (int i = 0; i < bigIntegers.length; i++) {
990        returnBytes[i] = convertToByte(bigIntegers[i]);
991      }
992      return returnBytes;
993    }
994
995    /**
996     * Returns the bytes corresponding to the BigInteger
997     * @param bigInteger number to convert
998     * @param pad        padding length
999     * @return byte corresponding to input BigInteger
1000     */
1001    public byte[] convertToByte(BigInteger bigInteger, int pad) {
1002      String bigIntegerString = bigInteger.toString(radix);
1003      bigIntegerString = StringUtils.leftPad(bigIntegerString, pad, '0');
1004      return Bytes.toBytes(bigIntegerString);
1005    }
1006
1007    /**
1008     * Returns the bytes corresponding to the BigInteger
1009     * @param bigInteger number to convert
1010     * @return corresponding bytes
1011     */
1012    public byte[] convertToByte(BigInteger bigInteger) {
1013      return convertToByte(bigInteger, rowComparisonLength);
1014    }
1015
1016    /**
1017     * Returns the BigInteger represented by the byte array
1018     * @param row byte array representing row
1019     * @return the corresponding BigInteger
1020     */
1021    public BigInteger convertToBigInteger(byte[] row) {
1022      return (row.length > 0) ? new BigInteger(Bytes.toString(row), radix) : BigInteger.ZERO;
1023    }
1024
1025    @Override
1026    public String toString() {
1027      return this.getClass().getSimpleName() + " [" + rowToStr(firstRow()) + ","
1028        + rowToStr(lastRow()) + "]";
1029    }
1030  }
1031
1032  /**
1033   * A SplitAlgorithm that divides the space of possible keys evenly. Useful when the keys are
1034   * approximately uniform random bytes (e.g. hashes). Rows are raw byte values in the range <b>00
1035   * =&gt; FF</b> and are right-padded with zeros to keep the same memcmp() order. This is the
1036   * natural algorithm to use for a byte[] environment and saves space, but is not necessarily the
1037   * easiest for readability.
1038   */
1039  public static class UniformSplit implements SplitAlgorithm {
1040    static final byte xFF = (byte) 0xFF;
1041    byte[] firstRowBytes = ArrayUtils.EMPTY_BYTE_ARRAY;
1042    byte[] lastRowBytes = new byte[] { xFF, xFF, xFF, xFF, xFF, xFF, xFF, xFF };
1043
1044    @Override
1045    public byte[] split(byte[] start, byte[] end) {
1046      return Bytes.split(start, end, 1)[1];
1047    }
1048
1049    @Override
1050    public byte[][] split(int numRegions) {
1051      Preconditions.checkArgument(Bytes.compareTo(lastRowBytes, firstRowBytes) > 0,
1052        "last row (%s) is configured less than first row (%s)", Bytes.toStringBinary(lastRowBytes),
1053        Bytes.toStringBinary(firstRowBytes));
1054
1055      byte[][] splits = Bytes.split(firstRowBytes, lastRowBytes, true, numRegions - 1);
1056      Preconditions.checkState(splits != null,
1057        "Could not split region with given user input: " + this);
1058
1059      // remove endpoints, which are included in the splits list
1060
1061      return splits == null ? null : Arrays.copyOfRange(splits, 1, splits.length - 1);
1062    }
1063
1064    @Override
1065    public byte[][] split(byte[] start, byte[] end, int numSplits, boolean inclusive) {
1066      if (Arrays.equals(start, HConstants.EMPTY_BYTE_ARRAY)) {
1067        start = firstRowBytes;
1068      }
1069      if (Arrays.equals(end, HConstants.EMPTY_BYTE_ARRAY)) {
1070        end = lastRowBytes;
1071      }
1072      Preconditions.checkArgument(Bytes.compareTo(end, start) > 0,
1073        "last row (%s) is configured less than first row (%s)", Bytes.toStringBinary(end),
1074        Bytes.toStringBinary(start));
1075
1076      byte[][] splits = Bytes.split(start, end, true, numSplits - 1);
1077      Preconditions.checkState(splits != null,
1078        "Could not calculate input splits with given user input: " + this);
1079      if (inclusive) {
1080        return splits;
1081      } else {
1082        // remove endpoints, which are included in the splits list
1083        return Arrays.copyOfRange(splits, 1, splits.length - 1);
1084      }
1085    }
1086
1087    @Override
1088    public byte[] firstRow() {
1089      return firstRowBytes;
1090    }
1091
1092    @Override
1093    public byte[] lastRow() {
1094      return lastRowBytes;
1095    }
1096
1097    @Override
1098    public void setFirstRow(String userInput) {
1099      firstRowBytes = Bytes.toBytesBinary(userInput);
1100    }
1101
1102    @Override
1103    public void setLastRow(String userInput) {
1104      lastRowBytes = Bytes.toBytesBinary(userInput);
1105    }
1106
1107    @Override
1108    public void setFirstRow(byte[] userInput) {
1109      firstRowBytes = userInput;
1110    }
1111
1112    @Override
1113    public void setLastRow(byte[] userInput) {
1114      lastRowBytes = userInput;
1115    }
1116
1117    @Override
1118    public byte[] strToRow(String input) {
1119      return Bytes.toBytesBinary(input);
1120    }
1121
1122    @Override
1123    public String rowToStr(byte[] row) {
1124      return Bytes.toStringBinary(row);
1125    }
1126
1127    @Override
1128    public String separator() {
1129      return ",";
1130    }
1131
1132    @Override
1133    public String toString() {
1134      return this.getClass().getSimpleName() + " [" + rowToStr(firstRow()) + ","
1135        + rowToStr(lastRow()) + "]";
1136    }
1137  }
1138}