View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase;
19  
20  import com.google.common.annotations.VisibleForTesting;
21  import com.google.protobuf.ServiceException;
22  
23  import org.apache.commons.logging.Log;
24  import org.apache.commons.logging.LogFactory;
25  import org.apache.hadoop.conf.Configuration;
26  import org.apache.hadoop.hbase.classification.InterfaceAudience;
27  import org.apache.hadoop.hbase.client.ClusterConnection;
28  import org.apache.hadoop.hbase.client.Connection;
29  import org.apache.hadoop.hbase.client.ConnectionFactory;
30  import org.apache.hadoop.hbase.client.Delete;
31  import org.apache.hadoop.hbase.client.Get;
32  import org.apache.hadoop.hbase.client.HTable;
33  import org.apache.hadoop.hbase.client.Mutation;
34  import org.apache.hadoop.hbase.client.Put;
35  import org.apache.hadoop.hbase.client.RegionLocator;
36  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
37  import org.apache.hadoop.hbase.client.Result;
38  import org.apache.hadoop.hbase.client.ResultScanner;
39  import org.apache.hadoop.hbase.client.Scan;
40  import org.apache.hadoop.hbase.client.Table;
41  import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
42  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
43  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
44  import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
45  import org.apache.hadoop.hbase.util.Bytes;
46  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
47  import org.apache.hadoop.hbase.util.Pair;
48  import org.apache.hadoop.hbase.util.PairOfSameType;
49  import org.apache.hadoop.hbase.util.Threads;
50  
51  import java.io.IOException;
52  import java.io.InterruptedIOException;
53  import java.util.ArrayList;
54  import java.util.List;
55  import java.util.Map;
56  import java.util.NavigableMap;
57  import java.util.Set;
58  import java.util.SortedMap;
59  import java.util.TreeMap;
60  import java.util.regex.Matcher;
61  import java.util.regex.Pattern;
62  
63  /**
64   * Read/write operations on region and assignment information store in
65   * <code>hbase:meta</code>.
66   *
67   * Some of the methods of this class take ZooKeeperWatcher as a param. The only reason
68   * for this is because when used on client-side (like from HBaseAdmin), we want to use
69   * short-living connection (opened before each operation, closed right after), while
70   * when used on HM or HRS (like in AssignmentManager) we want permanent connection.
71   */
72  @InterfaceAudience.Private
73  public class MetaTableAccessor {
74  
75    /*
76     * HBASE-10070 adds a replicaId to HRI, meaning more than one HRI can be defined for the
77     * same table range (table, startKey, endKey). For every range, there will be at least one
78     * HRI defined which is called default replica.
79     *
80     * Meta layout (as of 0.98 + HBASE-10070) is like:
81     * For each table range, there is a single row, formatted like:
82     * <tableName>,<startKey>,<regionId>,<encodedRegionName>. This row corresponds to the regionName
83     * of the default region replica.
84     * Columns are:
85     * info:regioninfo         => contains serialized HRI for the default region replica
86     * info:server             => contains hostname:port (in string form) for the server hosting
87     *                            the default regionInfo replica
88     * info:server_<replicaId> => contains hostname:port (in string form) for the server hosting the
89     *                            regionInfo replica with replicaId
90     * info:serverstartcode    => contains server start code (in binary long form) for the server
91     *                            hosting the default regionInfo replica
92     * info:serverstartcode_<replicaId> => contains server start code (in binary long form) for the
93     *                                     server hosting the regionInfo replica with replicaId
94     * info:seqnumDuringOpen    => contains seqNum (in binary long form) for the region at the time
95     *                             the server opened the region with default replicaId
96     * info:seqnumDuringOpen_<replicaId> => contains seqNum (in binary long form) for the region at
97     *                             the time the server opened the region with replicaId
98     * info:splitA              => contains a serialized HRI for the first daughter region if the
99     *                             region is split
100    * info:splitB              => contains a serialized HRI for the second daughter region if the
101    *                             region is split
102    * info:mergeA              => contains a serialized HRI for the first parent region if the
103    *                             region is the result of a merge
104    * info:mergeB              => contains a serialized HRI for the second parent region if the
105    *                             region is the result of a merge
106    *
107    * The actual layout of meta should be encapsulated inside MetaTableAccessor methods,
108    * and should not leak out of it (through Result objects, etc)
109    */
110 
111   private static final Log LOG = LogFactory.getLog(MetaTableAccessor.class);
112 
113   static final byte [] META_REGION_PREFIX;
114   static {
115     // Copy the prefix from FIRST_META_REGIONINFO into META_REGION_PREFIX.
116     // FIRST_META_REGIONINFO == 'hbase:meta,,1'.  META_REGION_PREFIX == 'hbase:meta,'
117     int len = HRegionInfo.FIRST_META_REGIONINFO.getRegionName().length - 2;
118     META_REGION_PREFIX = new byte [len];
119     System.arraycopy(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), 0,
120       META_REGION_PREFIX, 0, len);
121   }
122 
123   /** The delimiter for meta columns for replicaIds > 0 */
124   protected static final char META_REPLICA_ID_DELIMITER = '_';
125 
126   /** A regex for parsing server columns from meta. See above javadoc for meta layout */
127   private static final Pattern SERVER_COLUMN_PATTERN
128     = Pattern.compile("^server(_[0-9a-fA-F]{4})?$");
129 
130   ////////////////////////
131   // Reading operations //
132   ////////////////////////
133 
134  /**
135    * Performs a full scan of a <code>hbase:meta</code> table.
136    * @return List of {@link org.apache.hadoop.hbase.client.Result}
137    * @throws IOException
138    */
139   public static List<Result> fullScanOfMeta(Connection connection)
140   throws IOException {
141     CollectAllVisitor v = new CollectAllVisitor();
142     fullScan(connection, v, null);
143     return v.getResults();
144   }
145 
146   /**
147    * Performs a full scan of <code>hbase:meta</code>.
148    * @param connection connection we're using
149    * @param visitor Visitor invoked against each row.
150    * @throws IOException
151    */
152   public static void fullScan(Connection connection,
153       final Visitor visitor)
154   throws IOException {
155     fullScan(connection, visitor, null);
156   }
157 
158   /**
159    * Performs a full scan of <code>hbase:meta</code>.
160    * @param connection connection we're using
161    * @return List of {@link Result}
162    * @throws IOException
163    */
164   public static List<Result> fullScan(Connection connection)
165     throws IOException {
166     CollectAllVisitor v = new CollectAllVisitor();
167     fullScan(connection, v, null);
168     return v.getResults();
169   }
170 
171   /**
172    * Callers should call close on the returned {@link Table} instance.
173    * @param connection connection we're using to access Meta
174    * @return An {@link Table} for <code>hbase:meta</code>
175    * @throws IOException
176    */
177   static Table getMetaHTable(final Connection connection)
178   throws IOException {
179     // We used to pass whole CatalogTracker in here, now we just pass in Connection
180     if (connection == null || connection.isClosed()) {
181       throw new NullPointerException("No connection");
182     }
183     // If the passed in 'connection' is 'managed' -- i.e. every second test uses
184     // a Table or an HBaseAdmin with managed connections -- then doing
185     // connection.getTable will throw an exception saying you are NOT to use
186     // managed connections getting tables.  Leaving this as it is for now. Will
187     // revisit when inclined to change all tests.  User code probaby makes use of
188     // managed connections too so don't change it till post hbase 1.0.
189     //
190     // There should still be a way to use this method with an unmanaged connection.
191     if (connection instanceof ClusterConnection) {
192       if (((ClusterConnection) connection).isManaged()) {
193         return new HTable(TableName.META_TABLE_NAME, (ClusterConnection) connection);
194       }
195     }
196     return connection.getTable(TableName.META_TABLE_NAME);
197   }
198 
199   /**
200    * @param t Table to use (will be closed when done).
201    * @param g Get to run
202    * @throws IOException
203    */
204   private static Result get(final Table t, final Get g) throws IOException {
205     try {
206       return t.get(g);
207     } finally {
208       t.close();
209     }
210   }
211 
212   /**
213    * Gets the region info and assignment for the specified region.
214    * @param connection connection we're using
215    * @param regionName Region to lookup.
216    * @return Location and HRegionInfo for <code>regionName</code>
217    * @throws IOException
218    * @deprecated use {@link #getRegionLocation(Connection, byte[])} instead
219    */
220   @Deprecated
221   public static Pair<HRegionInfo, ServerName> getRegion(Connection connection, byte [] regionName)
222     throws IOException {
223     HRegionLocation location = getRegionLocation(connection, regionName);
224     return location == null
225       ? null
226       : new Pair<HRegionInfo, ServerName>(location.getRegionInfo(), location.getServerName());
227   }
228 
229   /**
230    * Returns the HRegionLocation from meta for the given region
231    * @param connection connection we're using
232    * @param regionName region we're looking for
233    * @return HRegionLocation for the given region
234    * @throws IOException
235    */
236   public static HRegionLocation getRegionLocation(Connection connection,
237                                                   byte[] regionName) throws IOException {
238     byte[] row = regionName;
239     HRegionInfo parsedInfo = null;
240     try {
241       parsedInfo = parseRegionInfoFromRegionName(regionName);
242       row = getMetaKeyForRegion(parsedInfo);
243     } catch (Exception parseEx) {
244       // Ignore. This is used with tableName passed as regionName.
245     }
246     Get get = new Get(row);
247     get.addFamily(HConstants.CATALOG_FAMILY);
248     Result r = get(getMetaHTable(connection), get);
249     RegionLocations locations = getRegionLocations(r);
250     return locations == null
251       ? null
252       : locations.getRegionLocation(parsedInfo == null ? 0 : parsedInfo.getReplicaId());
253   }
254 
255   /**
256    * Returns the HRegionLocation from meta for the given region
257    * @param connection connection we're using
258    * @param regionInfo region information
259    * @return HRegionLocation for the given region
260    * @throws IOException
261    */
262   public static HRegionLocation getRegionLocation(Connection connection,
263                                                   HRegionInfo regionInfo) throws IOException {
264     byte[] row = getMetaKeyForRegion(regionInfo);
265     Get get = new Get(row);
266     get.addFamily(HConstants.CATALOG_FAMILY);
267     Result r = get(getMetaHTable(connection), get);
268     return getRegionLocation(r, regionInfo, regionInfo.getReplicaId());
269   }
270 
271   /** Returns the row key to use for this regionInfo */
272   public static byte[] getMetaKeyForRegion(HRegionInfo regionInfo) {
273     return RegionReplicaUtil.getRegionInfoForDefaultReplica(regionInfo).getRegionName();
274   }
275 
276   /** Returns an HRI parsed from this regionName. Not all the fields of the HRI
277    * is stored in the name, so the returned object should only be used for the fields
278    * in the regionName.
279    */
280   protected static HRegionInfo parseRegionInfoFromRegionName(byte[] regionName)
281     throws IOException {
282     byte[][] fields = HRegionInfo.parseRegionName(regionName);
283     long regionId =  Long.parseLong(Bytes.toString(fields[2]));
284     int replicaId = fields.length > 3 ? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0;
285     return new HRegionInfo(
286       TableName.valueOf(fields[0]), fields[1], fields[1], false, regionId, replicaId);
287   }
288 
289   /**
290    * Gets the result in hbase:meta for the specified region.
291    * @param connection connection we're using
292    * @param regionName region we're looking for
293    * @return result of the specified region
294    * @throws IOException
295    */
296   public static Result getRegionResult(Connection connection,
297       byte[] regionName) throws IOException {
298     Get get = new Get(regionName);
299     get.addFamily(HConstants.CATALOG_FAMILY);
300     return get(getMetaHTable(connection), get);
301   }
302 
303   /**
304    * Get regions from the merge qualifier of the specified merged region
305    * @return null if it doesn't contain merge qualifier, else two merge regions
306    * @throws IOException
307    */
308   public static Pair<HRegionInfo, HRegionInfo> getRegionsFromMergeQualifier(
309       Connection connection, byte[] regionName) throws IOException {
310     Result result = getRegionResult(connection, regionName);
311     HRegionInfo mergeA = getHRegionInfo(result, HConstants.MERGEA_QUALIFIER);
312     HRegionInfo mergeB = getHRegionInfo(result, HConstants.MERGEB_QUALIFIER);
313     if (mergeA == null && mergeB == null) {
314       return null;
315     }
316     return new Pair<HRegionInfo, HRegionInfo>(mergeA, mergeB);
317  }
318 
319   /**
320    * Checks if the specified table exists.  Looks at the hbase:meta table hosted on
321    * the specified server.
322    * @param connection connection we're using
323    * @param tableName table to check
324    * @return true if the table exists in meta, false if not
325    * @throws IOException
326    */
327   public static boolean tableExists(Connection connection,
328       final TableName tableName)
329   throws IOException {
330     if (tableName.equals(TableName.META_TABLE_NAME)) {
331       // Catalog tables always exist.
332       return true;
333     }
334     // Make a version of ResultCollectingVisitor that only collects the first
335     CollectingVisitor<HRegionInfo> visitor = new CollectingVisitor<HRegionInfo>() {
336       private HRegionInfo current = null;
337 
338       @Override
339       public boolean visit(Result r) throws IOException {
340         RegionLocations locations = getRegionLocations(r);
341         if (locations == null || locations.getRegionLocation().getRegionInfo() == null) {
342           LOG.warn("No serialized HRegionInfo in " + r);
343           return true;
344         }
345         this.current = locations.getRegionLocation().getRegionInfo();
346         if (this.current == null) {
347           LOG.warn("No serialized HRegionInfo in " + r);
348           return true;
349         }
350         if (!isInsideTable(this.current, tableName)) return false;
351         // Else call super and add this Result to the collection.
352         super.visit(r);
353         // Stop collecting regions from table after we get one.
354         return false;
355       }
356 
357       @Override
358       void add(Result r) {
359         // Add the current HRI.
360         this.results.add(this.current);
361       }
362     };
363     fullScan(connection, visitor, getTableStartRowForMeta(tableName));
364     // If visitor has results >= 1 then table exists.
365     return visitor.getResults().size() >= 1;
366   }
367 
368   /**
369    * Gets all of the regions of the specified table. Do not use this method
370    * to get meta table regions, use methods in MetaTableLocator instead.
371    * @param connection connection we're using
372    * @param tableName table we're looking for
373    * @return Ordered list of {@link HRegionInfo}.
374    * @throws IOException
375    */
376   public static List<HRegionInfo> getTableRegions(Connection connection, TableName tableName)
377   throws IOException {
378     return getTableRegions(connection, tableName, false);
379   }
380 
381   /**
382    * Gets all of the regions of the specified table. Do not use this method
383    * to get meta table regions, use methods in MetaTableLocator instead.
384    * @param connection connection we're using
385    * @param tableName table we're looking for
386    * @param excludeOfflinedSplitParents If true, do not include offlined split
387    * parents in the return.
388    * @return Ordered list of {@link HRegionInfo}.
389    * @throws IOException
390    */
391   public static List<HRegionInfo> getTableRegions(Connection connection,
392       TableName tableName, final boolean excludeOfflinedSplitParents)
393       throws IOException {
394     List<Pair<HRegionInfo, ServerName>> result;
395 
396     result = getTableRegionsAndLocations(connection, tableName,
397       excludeOfflinedSplitParents);
398 
399     return getListOfHRegionInfos(result);
400   }
401 
402   static List<HRegionInfo> getListOfHRegionInfos(final List<Pair<HRegionInfo, ServerName>> pairs) {
403     if (pairs == null || pairs.isEmpty()) return null;
404     List<HRegionInfo> result = new ArrayList<HRegionInfo>(pairs.size());
405     for (Pair<HRegionInfo, ServerName> pair: pairs) {
406       result.add(pair.getFirst());
407     }
408     return result;
409   }
410 
411   /**
412    * @param current region of current table we're working with
413    * @param tableName table we're checking against
414    * @return True if <code>current</code> tablename is equal to
415    * <code>tableName</code>
416    */
417   static boolean isInsideTable(final HRegionInfo current, final TableName tableName) {
418     return tableName.equals(current.getTable());
419   }
420 
421   /**
422    * @param tableName table we're working with
423    * @return Place to start Scan in <code>hbase:meta</code> when passed a
424    * <code>tableName</code>; returns &lt;tableName&rt; &lt;,&rt; &lt;,&rt;
425    */
426   static byte [] getTableStartRowForMeta(TableName tableName) {
427     byte [] startRow = new byte[tableName.getName().length + 2];
428     System.arraycopy(tableName.getName(), 0, startRow, 0, tableName.getName().length);
429     startRow[startRow.length - 2] = HConstants.DELIMITER;
430     startRow[startRow.length - 1] = HConstants.DELIMITER;
431     return startRow;
432   }
433 
434   /**
435    * This method creates a Scan object that will only scan catalog rows that
436    * belong to the specified table. It doesn't specify any columns.
437    * This is a better alternative to just using a start row and scan until
438    * it hits a new table since that requires parsing the HRI to get the table
439    * name.
440    * @param tableName bytes of table's name
441    * @return configured Scan object
442    */
443   public static Scan getScanForTableName(TableName tableName) {
444     String strName = tableName.getNameAsString();
445     // Start key is just the table name with delimiters
446     byte[] startKey = Bytes.toBytes(strName + ",,");
447     // Stop key appends the smallest possible char to the table name
448     byte[] stopKey = Bytes.toBytes(strName + " ,,");
449 
450     Scan scan = new Scan(startKey);
451     scan.setStopRow(stopKey);
452     return scan;
453   }
454 
455   /**
456    * Do not use this method to get meta table regions, use methods in MetaTableLocator instead.
457    * @param connection connection we're using
458    * @param tableName table we're looking for
459    * @return Return list of regioninfos and server.
460    * @throws IOException
461    */
462   public static List<Pair<HRegionInfo, ServerName>>
463     getTableRegionsAndLocations(Connection connection, TableName tableName)
464       throws IOException {
465     return getTableRegionsAndLocations(connection, tableName, true);
466   }
467 
468   /**
469    * Do not use this method to get meta table regions, use methods in MetaTableLocator instead.
470    * @param connection connection we're using
471    * @param tableName table to work with
472    * @return Return list of regioninfos and server addresses.
473    * @throws IOException
474    */
475   public static List<Pair<HRegionInfo, ServerName>> getTableRegionsAndLocations(
476         Connection connection, final TableName tableName,
477       final boolean excludeOfflinedSplitParents) throws IOException {
478     if (tableName.equals(TableName.META_TABLE_NAME)) {
479       throw new IOException("This method can't be used to locate meta regions;"
480         + " use MetaTableLocator instead");
481     }
482     // Make a version of CollectingVisitor that collects HRegionInfo and ServerAddress
483     CollectingVisitor<Pair<HRegionInfo, ServerName>> visitor =
484       new CollectingVisitor<Pair<HRegionInfo, ServerName>>() {
485         private RegionLocations current = null;
486 
487         @Override
488         public boolean visit(Result r) throws IOException {
489           current = getRegionLocations(r);
490           if (current == null || current.getRegionLocation().getRegionInfo() == null) {
491             LOG.warn("No serialized HRegionInfo in " + r);
492             return true;
493           }
494           HRegionInfo hri = current.getRegionLocation().getRegionInfo();
495           if (!isInsideTable(hri, tableName)) return false;
496           if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
497           // Else call super and add this Result to the collection.
498           return super.visit(r);
499         }
500 
501         @Override
502         void add(Result r) {
503           if (current == null) {
504             return;
505           }
506           for (HRegionLocation loc : current.getRegionLocations()) {
507             if (loc != null) {
508               this.results.add(new Pair<HRegionInfo, ServerName>(
509                 loc.getRegionInfo(), loc.getServerName()));
510             }
511           }
512         }
513       };
514     fullScan(connection, visitor, getTableStartRowForMeta(tableName));
515     return visitor.getResults();
516   }
517 
518   /**
519    * @param connection connection we're using
520    * @param serverName server whose regions we're interested in
521    * @return List of user regions installed on this server (does not include
522    * catalog regions).
523    * @throws IOException
524    */
525   public static NavigableMap<HRegionInfo, Result>
526   getServerUserRegions(Connection connection, final ServerName serverName)
527     throws IOException {
528     final NavigableMap<HRegionInfo, Result> hris = new TreeMap<HRegionInfo, Result>();
529     // Fill the above hris map with entries from hbase:meta that have the passed
530     // servername.
531     CollectingVisitor<Result> v = new CollectingVisitor<Result>() {
532       @Override
533       void add(Result r) {
534         if (r == null || r.isEmpty()) return;
535         RegionLocations locations = getRegionLocations(r);
536         if (locations == null) return;
537         for (HRegionLocation loc : locations.getRegionLocations()) {
538           if (loc != null) {
539             if (loc.getServerName() != null && loc.getServerName().equals(serverName)) {
540               hris.put(loc.getRegionInfo(), r);
541             }
542           }
543         }
544       }
545     };
546     fullScan(connection, v);
547     return hris;
548   }
549 
550   public static void fullScanMetaAndPrint(Connection connection)
551     throws IOException {
552     Visitor v = new Visitor() {
553       @Override
554       public boolean visit(Result r) throws IOException {
555         if (r ==  null || r.isEmpty()) return true;
556         LOG.info("fullScanMetaAndPrint.Current Meta Row: " + r);
557         RegionLocations locations = getRegionLocations(r);
558         if (locations == null) return true;
559         for (HRegionLocation loc : locations.getRegionLocations()) {
560           if (loc != null) {
561             LOG.info("fullScanMetaAndPrint.HRI Print= " + loc.getRegionInfo());
562           }
563         }
564         return true;
565       }
566     };
567     fullScan(connection, v);
568   }
569 
570   /**
571    * Performs a full scan of a catalog table.
572    * @param connection connection we're using
573    * @param visitor Visitor invoked against each row.
574    * @param startrow Where to start the scan. Pass null if want to begin scan
575    * at first row.
576    * <code>hbase:meta</code>, the default (pass false to scan hbase:meta)
577    * @throws IOException
578    */
579   public static void fullScan(Connection connection,
580     final Visitor visitor, final byte [] startrow)
581   throws IOException {
582     Scan scan = new Scan();
583     if (startrow != null) scan.setStartRow(startrow);
584     if (startrow == null) {
585       int caching = connection.getConfiguration()
586           .getInt(HConstants.HBASE_META_SCANNER_CACHING, 100);
587       scan.setCaching(caching);
588     }
589     scan.addFamily(HConstants.CATALOG_FAMILY);
590     Table metaTable = getMetaHTable(connection);
591     ResultScanner scanner = null;
592     try {
593       scanner = metaTable.getScanner(scan);
594       Result data;
595       while((data = scanner.next()) != null) {
596         if (data.isEmpty()) continue;
597         // Break if visit returns false.
598         if (!visitor.visit(data)) break;
599       }
600     } finally {
601       if (scanner != null) scanner.close();
602       metaTable.close();
603     }
604   }
605 
606   /**
607    * Returns the column family used for meta columns.
608    * @return HConstants.CATALOG_FAMILY.
609    */
610   protected static byte[] getFamily() {
611     return HConstants.CATALOG_FAMILY;
612   }
613 
614   /**
615    * Returns the column qualifier for serialized region info
616    * @return HConstants.REGIONINFO_QUALIFIER
617    */
618   protected static byte[] getRegionInfoColumn() {
619     return HConstants.REGIONINFO_QUALIFIER;
620   }
621 
622   /**
623    * Returns the column qualifier for server column for replicaId
624    * @param replicaId the replicaId of the region
625    * @return a byte[] for server column qualifier
626    */
627   @VisibleForTesting
628   public static byte[] getServerColumn(int replicaId) {
629     return replicaId == 0
630       ? HConstants.SERVER_QUALIFIER
631       : Bytes.toBytes(HConstants.SERVER_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
632       + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
633   }
634 
635   /**
636    * Returns the column qualifier for server start code column for replicaId
637    * @param replicaId the replicaId of the region
638    * @return a byte[] for server start code column qualifier
639    */
640   @VisibleForTesting
641   public static byte[] getStartCodeColumn(int replicaId) {
642     return replicaId == 0
643       ? HConstants.STARTCODE_QUALIFIER
644       : Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
645       + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
646   }
647 
648   /**
649    * Returns the column qualifier for seqNum column for replicaId
650    * @param replicaId the replicaId of the region
651    * @return a byte[] for seqNum column qualifier
652    */
653   @VisibleForTesting
654   public static byte[] getSeqNumColumn(int replicaId) {
655     return replicaId == 0
656       ? HConstants.SEQNUM_QUALIFIER
657       : Bytes.toBytes(HConstants.SEQNUM_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
658       + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
659   }
660 
661   /**
662    * Parses the replicaId from the server column qualifier. See top of the class javadoc
663    * for the actual meta layout
664    * @param serverColumn the column qualifier
665    * @return an int for the replicaId
666    */
667   @VisibleForTesting
668   static int parseReplicaIdFromServerColumn(byte[] serverColumn) {
669     String serverStr = Bytes.toString(serverColumn);
670 
671     Matcher matcher = SERVER_COLUMN_PATTERN.matcher(serverStr);
672     if (matcher.matches() && matcher.groupCount() > 0) {
673       String group = matcher.group(1);
674       if (group != null && group.length() > 0) {
675         return Integer.parseInt(group.substring(1), 16);
676       } else {
677         return 0;
678       }
679     }
680     return -1;
681   }
682 
683   /**
684    * Returns a {@link ServerName} from catalog table {@link Result}.
685    * @param r Result to pull from
686    * @return A ServerName instance or null if necessary fields not found or empty.
687    */
688   private static ServerName getServerName(final Result r, final int replicaId) {
689     byte[] serverColumn = getServerColumn(replicaId);
690     Cell cell = r.getColumnLatestCell(getFamily(), serverColumn);
691     if (cell == null || cell.getValueLength() == 0) return null;
692     String hostAndPort = Bytes.toString(
693       cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
694     byte[] startcodeColumn = getStartCodeColumn(replicaId);
695     cell = r.getColumnLatestCell(getFamily(), startcodeColumn);
696     if (cell == null || cell.getValueLength() == 0) return null;
697     return ServerName.valueOf(hostAndPort,
698       Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
699   }
700 
701   /**
702    * The latest seqnum that the server writing to meta observed when opening the region.
703    * E.g. the seqNum when the result of {@link #getServerName(Result, int)} was written.
704    * @param r Result to pull the seqNum from
705    * @return SeqNum, or HConstants.NO_SEQNUM if there's no value written.
706    */
707   private static long getSeqNumDuringOpen(final Result r, final int replicaId) {
708     Cell cell = r.getColumnLatestCell(getFamily(), getSeqNumColumn(replicaId));
709     if (cell == null || cell.getValueLength() == 0) return HConstants.NO_SEQNUM;
710     return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
711   }
712 
713   /**
714    * Returns an HRegionLocationList extracted from the result.
715    * @return an HRegionLocationList containing all locations for the region range or null if
716    *  we can't deserialize the result.
717    */
718   public static RegionLocations getRegionLocations(final Result r) {
719     if (r == null) return null;
720     HRegionInfo regionInfo = getHRegionInfo(r, getRegionInfoColumn());
721     if (regionInfo == null) return null;
722 
723     List<HRegionLocation> locations = new ArrayList<HRegionLocation>(1);
724     NavigableMap<byte[],NavigableMap<byte[],byte[]>> familyMap = r.getNoVersionMap();
725 
726     locations.add(getRegionLocation(r, regionInfo, 0));
727 
728     NavigableMap<byte[], byte[]> infoMap = familyMap.get(getFamily());
729     if (infoMap == null) return new RegionLocations(locations);
730 
731     // iterate until all serverName columns are seen
732     int replicaId = 0;
733     byte[] serverColumn = getServerColumn(replicaId);
734     SortedMap<byte[], byte[]> serverMap = infoMap.tailMap(serverColumn, false);
735     if (serverMap.isEmpty()) return new RegionLocations(locations);
736 
737     for (Map.Entry<byte[], byte[]> entry : serverMap.entrySet()) {
738       replicaId = parseReplicaIdFromServerColumn(entry.getKey());
739       if (replicaId < 0) {
740         break;
741       }
742 
743       locations.add(getRegionLocation(r, regionInfo, replicaId));
744     }
745 
746     return new RegionLocations(locations);
747   }
748 
749   /**
750    * Returns the HRegionLocation parsed from the given meta row Result
751    * for the given regionInfo and replicaId. The regionInfo can be the default region info
752    * for the replica.
753    * @param r the meta row result
754    * @param regionInfo RegionInfo for default replica
755    * @param replicaId the replicaId for the HRegionLocation
756    * @return HRegionLocation parsed from the given meta row Result for the given replicaId
757    */
758   private static HRegionLocation getRegionLocation(final Result r, final HRegionInfo regionInfo,
759                                                    final int replicaId) {
760     ServerName serverName = getServerName(r, replicaId);
761     long seqNum = getSeqNumDuringOpen(r, replicaId);
762     HRegionInfo replicaInfo = RegionReplicaUtil.getRegionInfoForReplica(regionInfo, replicaId);
763     return new HRegionLocation(replicaInfo, serverName, seqNum);
764   }
765 
766   /**
767    * Returns HRegionInfo object from the column
768    * HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog
769    * table Result.
770    * @param data a Result object from the catalog table scan
771    * @return HRegionInfo or null
772    */
773   public static HRegionInfo getHRegionInfo(Result data) {
774     return getHRegionInfo(data, HConstants.REGIONINFO_QUALIFIER);
775   }
776 
777   /**
778    * Returns the HRegionInfo object from the column {@link HConstants#CATALOG_FAMILY} and
779    * <code>qualifier</code> of the catalog table result.
780    * @param r a Result object from the catalog table scan
781    * @param qualifier Column family qualifier
782    * @return An HRegionInfo instance or null.
783    */
784   private static HRegionInfo getHRegionInfo(final Result r, byte [] qualifier) {
785     Cell cell = r.getColumnLatestCell(getFamily(), qualifier);
786     if (cell == null) return null;
787     return HRegionInfo.parseFromOrNull(cell.getValueArray(),
788       cell.getValueOffset(), cell.getValueLength());
789   }
790 
791   /**
792    * Returns the daughter regions by reading the corresponding columns of the catalog table
793    * Result.
794    * @param data a Result object from the catalog table scan
795    * @return a pair of HRegionInfo or PairOfSameType(null, null) if the region is not a split
796    * parent
797    */
798   public static PairOfSameType<HRegionInfo> getDaughterRegions(Result data) {
799     HRegionInfo splitA = getHRegionInfo(data, HConstants.SPLITA_QUALIFIER);
800     HRegionInfo splitB = getHRegionInfo(data, HConstants.SPLITB_QUALIFIER);
801 
802     return new PairOfSameType<HRegionInfo>(splitA, splitB);
803   }
804 
805   /**
806    * Returns the merge regions by reading the corresponding columns of the catalog table
807    * Result.
808    * @param data a Result object from the catalog table scan
809    * @return a pair of HRegionInfo or PairOfSameType(null, null) if the region is not a split
810    * parent
811    */
812   public static PairOfSameType<HRegionInfo> getMergeRegions(Result data) {
813     HRegionInfo mergeA = getHRegionInfo(data, HConstants.MERGEA_QUALIFIER);
814     HRegionInfo mergeB = getHRegionInfo(data, HConstants.MERGEB_QUALIFIER);
815 
816     return new PairOfSameType<HRegionInfo>(mergeA, mergeB);
817   }
818 
819   /**
820    * Implementations 'visit' a catalog table row.
821    */
822   public interface Visitor {
823     /**
824      * Visit the catalog table row.
825      * @param r A row from catalog table
826      * @return True if we are to proceed scanning the table, else false if
827      * we are to stop now.
828      */
829     boolean visit(final Result r) throws IOException;
830   }
831 
832   /**
833    * A {@link Visitor} that collects content out of passed {@link Result}.
834    */
835   static abstract class CollectingVisitor<T> implements Visitor {
836     final List<T> results = new ArrayList<T>();
837     @Override
838     public boolean visit(Result r) throws IOException {
839       if (r ==  null || r.isEmpty()) return true;
840       add(r);
841       return true;
842     }
843 
844     abstract void add(Result r);
845 
846     /**
847      * @return Collected results; wait till visits complete to collect all
848      * possible results
849      */
850     List<T> getResults() {
851       return this.results;
852     }
853   }
854 
855   /**
856    * Collects all returned.
857    */
858   static class CollectAllVisitor extends CollectingVisitor<Result> {
859     @Override
860     void add(Result r) {
861       this.results.add(r);
862     }
863   }
864 
865   /**
866    * Count regions in <code>hbase:meta</code> for passed table.
867    * @param c Configuration object
868    * @param tableName table name to count regions for
869    * @return Count or regions in table <code>tableName</code>
870    * @throws IOException
871    */
872   @Deprecated
873   public static int getRegionCount(final Configuration c, final String tableName)
874       throws IOException {
875     return getRegionCount(c, TableName.valueOf(tableName));
876   }
877 
878   /**
879    * Count regions in <code>hbase:meta</code> for passed table.
880    * @param c Configuration object
881    * @param tableName table name to count regions for
882    * @return Count or regions in table <code>tableName</code>
883    * @throws IOException
884    */
885   public static int getRegionCount(final Configuration c, final TableName tableName)
886   throws IOException {
887     try (Connection connection = ConnectionFactory.createConnection(c)) {
888       return getRegionCount(connection, tableName);
889     }
890   }
891 
892   /**
893    * Count regions in <code>hbase:meta</code> for passed table.
894    * @param connection Connection object
895    * @param tableName table name to count regions for
896    * @return Count or regions in table <code>tableName</code>
897    * @throws IOException
898    */
899   public static int getRegionCount(final Connection connection, final TableName tableName)
900   throws IOException {
901     try (RegionLocator locator = connection.getRegionLocator(tableName)) {
902       List<HRegionLocation> locations = locator.getAllRegionLocations();
903       return locations == null? 0: locations.size();
904     }
905   }
906 
907   ////////////////////////
908   // Editing operations //
909   ////////////////////////
910 
911   /**
912    * Generates and returns a Put containing the region into for the catalog table
913    */
914   public static Put makePutFromRegionInfo(HRegionInfo regionInfo)
915     throws IOException {
916     Put put = new Put(regionInfo.getRegionName());
917     addRegionInfo(put, regionInfo);
918     return put;
919   }
920 
921   /**
922    * Generates and returns a Delete containing the region info for the catalog
923    * table
924    */
925   public static Delete makeDeleteFromRegionInfo(HRegionInfo regionInfo) {
926     if (regionInfo == null) {
927       throw new IllegalArgumentException("Can't make a delete for null region");
928     }
929     Delete delete = new Delete(regionInfo.getRegionName());
930     return delete;
931   }
932 
933   /**
934    * Adds split daughters to the Put
935    */
936   public static Put addDaughtersToPut(Put put, HRegionInfo splitA, HRegionInfo splitB) {
937     if (splitA != null) {
938       put.addImmutable(
939         HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, splitA.toByteArray());
940     }
941     if (splitB != null) {
942       put.addImmutable(
943         HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, splitB.toByteArray());
944     }
945     return put;
946   }
947 
948   /**
949    * Put the passed <code>p</code> to the <code>hbase:meta</code> table.
950    * @param connection connection we're using
951    * @param p Put to add to hbase:meta
952    * @throws IOException
953    */
954   static void putToMetaTable(final Connection connection, final Put p)
955     throws IOException {
956     put(getMetaHTable(connection), p);
957   }
958 
959   /**
960    * @param t Table to use (will be closed when done).
961    * @param p put to make
962    * @throws IOException
963    */
964   private static void put(final Table t, final Put p) throws IOException {
965     try {
966       t.put(p);
967     } finally {
968       t.close();
969     }
970   }
971 
972   /**
973    * Put the passed <code>ps</code> to the <code>hbase:meta</code> table.
974    * @param connection connection we're using
975    * @param ps Put to add to hbase:meta
976    * @throws IOException
977    */
978   public static void putsToMetaTable(final Connection connection, final List<Put> ps)
979     throws IOException {
980     Table t = getMetaHTable(connection);
981     try {
982       t.put(ps);
983     } finally {
984       t.close();
985     }
986   }
987 
988   /**
989    * Delete the passed <code>d</code> from the <code>hbase:meta</code> table.
990    * @param connection connection we're using
991    * @param d Delete to add to hbase:meta
992    * @throws IOException
993    */
994   static void deleteFromMetaTable(final Connection connection, final Delete d)
995     throws IOException {
996     List<Delete> dels = new ArrayList<Delete>(1);
997     dels.add(d);
998     deleteFromMetaTable(connection, dels);
999   }
1000 
1001   /**
1002    * Delete the passed <code>deletes</code> from the <code>hbase:meta</code> table.
1003    * @param connection connection we're using
1004    * @param deletes Deletes to add to hbase:meta  This list should support #remove.
1005    * @throws IOException
1006    */
1007   public static void deleteFromMetaTable(final Connection connection, final List<Delete> deletes)
1008     throws IOException {
1009     Table t = getMetaHTable(connection);
1010     try {
1011       t.delete(deletes);
1012     } finally {
1013       t.close();
1014     }
1015   }
1016 
1017   /**
1018    * Deletes some replica columns corresponding to replicas for the passed rows
1019    * @param metaRows rows in hbase:meta
1020    * @param replicaIndexToDeleteFrom the replica ID we would start deleting from
1021    * @param numReplicasToRemove how many replicas to remove
1022    * @param connection connection we're using to access meta table
1023    * @throws IOException
1024    */
1025   public static void removeRegionReplicasFromMeta(Set<byte[]> metaRows,
1026     int replicaIndexToDeleteFrom, int numReplicasToRemove, Connection connection)
1027       throws IOException {
1028     int absoluteIndex = replicaIndexToDeleteFrom + numReplicasToRemove;
1029     for (byte[] row : metaRows) {
1030       Delete deleteReplicaLocations = new Delete(row);
1031       for (int i = replicaIndexToDeleteFrom; i < absoluteIndex; i++) {
1032         deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
1033           getServerColumn(i));
1034         deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
1035           getSeqNumColumn(i));
1036         deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
1037           getStartCodeColumn(i));
1038       }
1039       deleteFromMetaTable(connection, deleteReplicaLocations);
1040     }
1041   }
1042 
1043   /**
1044    * Execute the passed <code>mutations</code> against <code>hbase:meta</code> table.
1045    * @param connection connection we're using
1046    * @param mutations Puts and Deletes to execute on hbase:meta
1047    * @throws IOException
1048    */
1049   public static void mutateMetaTable(final Connection connection,
1050                                      final List<Mutation> mutations)
1051     throws IOException {
1052     Table t = getMetaHTable(connection);
1053     try {
1054       t.batch(mutations);
1055     } catch (InterruptedException e) {
1056       InterruptedIOException ie = new InterruptedIOException(e.getMessage());
1057       ie.initCause(e);
1058       throw ie;
1059     } finally {
1060       t.close();
1061     }
1062   }
1063 
1064   /**
1065    * Adds a hbase:meta row for the specified new region.
1066    * @param connection connection we're using
1067    * @param regionInfo region information
1068    * @throws IOException if problem connecting or updating meta
1069    */
1070   public static void addRegionToMeta(Connection connection,
1071                                      HRegionInfo regionInfo)
1072     throws IOException {
1073     putToMetaTable(connection, makePutFromRegionInfo(regionInfo));
1074     LOG.info("Added " + regionInfo.getRegionNameAsString());
1075   }
1076 
1077   /**
1078    * Adds a hbase:meta row for the specified new region to the given catalog table. The
1079    * Table is not flushed or closed.
1080    * @param meta the Table for META
1081    * @param regionInfo region information
1082    * @throws IOException if problem connecting or updating meta
1083    */
1084   public static void addRegionToMeta(Table meta, HRegionInfo regionInfo) throws IOException {
1085     addRegionToMeta(meta, regionInfo, null, null);
1086   }
1087 
1088   /**
1089    * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this
1090    * does not add its daughter's as different rows, but adds information about the daughters
1091    * in the same row as the parent. Use
1092    * {@link #splitRegion(org.apache.hadoop.hbase.client.Connection,
1093    *   HRegionInfo, HRegionInfo, HRegionInfo, ServerName)}
1094    * if you want to do that.
1095    * @param meta the Table for META
1096    * @param regionInfo region information
1097    * @param splitA first split daughter of the parent regionInfo
1098    * @param splitB second split daughter of the parent regionInfo
1099    * @throws IOException if problem connecting or updating meta
1100    */
1101   public static void addRegionToMeta(Table meta, HRegionInfo regionInfo,
1102                                      HRegionInfo splitA, HRegionInfo splitB) throws IOException {
1103     Put put = makePutFromRegionInfo(regionInfo);
1104     addDaughtersToPut(put, splitA, splitB);
1105     meta.put(put);
1106     if (LOG.isDebugEnabled()) {
1107       LOG.debug("Added " + regionInfo.getRegionNameAsString());
1108     }
1109   }
1110 
1111   /**
1112    * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this
1113    * does not add its daughter's as different rows, but adds information about the daughters
1114    * in the same row as the parent. Use
1115    * {@link #splitRegion(Connection, HRegionInfo, HRegionInfo, HRegionInfo, ServerName)}
1116    * if you want to do that.
1117    * @param connection connection we're using
1118    * @param regionInfo region information
1119    * @param splitA first split daughter of the parent regionInfo
1120    * @param splitB second split daughter of the parent regionInfo
1121    * @throws IOException if problem connecting or updating meta
1122    */
1123   public static void addRegionToMeta(Connection connection, HRegionInfo regionInfo,
1124                                      HRegionInfo splitA, HRegionInfo splitB) throws IOException {
1125     Table meta = getMetaHTable(connection);
1126     try {
1127       addRegionToMeta(meta, regionInfo, splitA, splitB);
1128     } finally {
1129       meta.close();
1130     }
1131   }
1132 
1133   /**
1134    * Adds a hbase:meta row for each of the specified new regions.
1135    * @param connection connection we're using
1136    * @param regionInfos region information list
1137    * @throws IOException if problem connecting or updating meta
1138    */
1139   public static void addRegionsToMeta(Connection connection,
1140                                       List<HRegionInfo> regionInfos)
1141     throws IOException {
1142     List<Put> puts = new ArrayList<Put>();
1143     for (HRegionInfo regionInfo : regionInfos) {
1144       if (RegionReplicaUtil.isDefaultReplica(regionInfo)) {
1145         puts.add(makePutFromRegionInfo(regionInfo));
1146       }
1147     }
1148     putsToMetaTable(connection, puts);
1149     LOG.info("Added " + puts.size());
1150   }
1151 
1152   /**
1153    * Adds a daughter region entry to meta.
1154    * @param regionInfo the region to put
1155    * @param sn the location of the region
1156    * @param openSeqNum the latest sequence number obtained when the region was open
1157    */
1158   public static void addDaughter(final Connection connection,
1159       final HRegionInfo regionInfo, final ServerName sn, final long openSeqNum)
1160       throws NotAllMetaRegionsOnlineException, IOException {
1161     Put put = new Put(regionInfo.getRegionName());
1162     addRegionInfo(put, regionInfo);
1163     if (sn != null) {
1164       addLocation(put, sn, openSeqNum, regionInfo.getReplicaId());
1165     }
1166     putToMetaTable(connection, put);
1167     LOG.info("Added daughter " + regionInfo.getEncodedName() +
1168       (sn == null? ", serverName=null": ", serverName=" + sn.toString()));
1169   }
1170 
1171   /**
1172    * Merge the two regions into one in an atomic operation. Deletes the two
1173    * merging regions in hbase:meta and adds the merged region with the information of
1174    * two merging regions.
1175    * @param connection connection we're using
1176    * @param mergedRegion the merged region
1177    * @param regionA
1178    * @param regionB
1179    * @param sn the location of the region
1180    * @throws IOException
1181    */
1182   public static void mergeRegions(final Connection connection, HRegionInfo mergedRegion,
1183       HRegionInfo regionA, HRegionInfo regionB, ServerName sn) throws IOException {
1184     Table meta = getMetaHTable(connection);
1185     try {
1186       HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);
1187 
1188       // Put for parent
1189       Put putOfMerged = makePutFromRegionInfo(copyOfMerged);
1190       putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER,
1191         regionA.toByteArray());
1192       putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER,
1193         regionB.toByteArray());
1194 
1195       // Deletes for merging regions
1196       Delete deleteA = makeDeleteFromRegionInfo(regionA);
1197       Delete deleteB = makeDeleteFromRegionInfo(regionB);
1198 
1199       // The merged is a new region, openSeqNum = 1 is fine.
1200       addLocation(putOfMerged, sn, 1, mergedRegion.getReplicaId());
1201 
1202       byte[] tableRow = Bytes.toBytes(mergedRegion.getRegionNameAsString()
1203         + HConstants.DELIMITER);
1204       multiMutate(meta, tableRow, putOfMerged, deleteA, deleteB);
1205     } finally {
1206       meta.close();
1207     }
1208   }
1209 
1210   /**
1211    * Splits the region into two in an atomic operation. Offlines the parent
1212    * region with the information that it is split into two, and also adds
1213    * the daughter regions. Does not add the location information to the daughter
1214    * regions since they are not open yet.
1215    * @param connection connection we're using
1216    * @param parent the parent region which is split
1217    * @param splitA Split daughter region A
1218    * @param splitB Split daughter region A
1219    * @param sn the location of the region
1220    */
1221   public static void splitRegion(final Connection connection,
1222                                  HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
1223                                  ServerName sn) throws IOException {
1224     Table meta = getMetaHTable(connection);
1225     try {
1226       HRegionInfo copyOfParent = new HRegionInfo(parent);
1227       copyOfParent.setOffline(true);
1228       copyOfParent.setSplit(true);
1229 
1230       //Put for parent
1231       Put putParent = makePutFromRegionInfo(copyOfParent);
1232       addDaughtersToPut(putParent, splitA, splitB);
1233 
1234       //Puts for daughters
1235       Put putA = makePutFromRegionInfo(splitA);
1236       Put putB = makePutFromRegionInfo(splitB);
1237 
1238       addLocation(putA, sn, 1, splitA.getReplicaId()); //new regions, openSeqNum = 1 is fine.
1239       addLocation(putB, sn, 1, splitB.getReplicaId());
1240 
1241       byte[] tableRow = Bytes.toBytes(parent.getRegionNameAsString() + HConstants.DELIMITER);
1242       multiMutate(meta, tableRow, putParent, putA, putB);
1243     } finally {
1244       meta.close();
1245     }
1246   }
1247 
1248   /**
1249    * Performs an atomic multi-Mutate operation against the given table.
1250    */
1251   private static void multiMutate(Table table, byte[] row, Mutation... mutations)
1252       throws IOException {
1253     CoprocessorRpcChannel channel = table.coprocessorService(row);
1254     MultiRowMutationProtos.MutateRowsRequest.Builder mmrBuilder
1255       = MultiRowMutationProtos.MutateRowsRequest.newBuilder();
1256     for (Mutation mutation : mutations) {
1257       if (mutation instanceof Put) {
1258         mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(
1259           ClientProtos.MutationProto.MutationType.PUT, mutation));
1260       } else if (mutation instanceof Delete) {
1261         mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(
1262           ClientProtos.MutationProto.MutationType.DELETE, mutation));
1263       } else {
1264         throw new DoNotRetryIOException("multi in MetaEditor doesn't support "
1265           + mutation.getClass().getName());
1266       }
1267     }
1268 
1269     MultiRowMutationProtos.MultiRowMutationService.BlockingInterface service =
1270       MultiRowMutationProtos.MultiRowMutationService.newBlockingStub(channel);
1271     try {
1272       service.mutateRows(null, mmrBuilder.build());
1273     } catch (ServiceException ex) {
1274       ProtobufUtil.toIOException(ex);
1275     }
1276   }
1277 
1278   /**
1279    * Updates the location of the specified region in hbase:meta to be the specified
1280    * server hostname and startcode.
1281    * <p>
1282    * Uses passed catalog tracker to get a connection to the server hosting
1283    * hbase:meta and makes edits to that region.
1284    *
1285    * @param connection connection we're using
1286    * @param regionInfo region to update location of
1287    * @param sn Server name
1288    * @throws IOException
1289    */
1290   public static void updateRegionLocation(Connection connection,
1291                                           HRegionInfo regionInfo, ServerName sn, long updateSeqNum)
1292     throws IOException {
1293     updateLocation(connection, regionInfo, sn, updateSeqNum);
1294   }
1295 
1296   /**
1297    * Updates the location of the specified region to be the specified server.
1298    * <p>
1299    * Connects to the specified server which should be hosting the specified
1300    * catalog region name to perform the edit.
1301    *
1302    * @param connection connection we're using
1303    * @param regionInfo region to update location of
1304    * @param sn Server name
1305    * @param openSeqNum the latest sequence number obtained when the region was open
1306    * @throws IOException In particular could throw {@link java.net.ConnectException}
1307    * if the server is down on other end.
1308    */
1309   private static void updateLocation(final Connection connection,
1310                                      HRegionInfo regionInfo, ServerName sn, long openSeqNum)
1311     throws IOException {
1312     // region replicas are kept in the primary region's row
1313     Put put = new Put(getMetaKeyForRegion(regionInfo));
1314     addLocation(put, sn, openSeqNum, regionInfo.getReplicaId());
1315     putToMetaTable(connection, put);
1316     LOG.info("Updated row " + regionInfo.getRegionNameAsString() +
1317       " with server=" + sn);
1318   }
1319 
1320   /**
1321    * Deletes the specified region from META.
1322    * @param connection connection we're using
1323    * @param regionInfo region to be deleted from META
1324    * @throws IOException
1325    */
1326   public static void deleteRegion(Connection connection,
1327                                   HRegionInfo regionInfo)
1328     throws IOException {
1329     Delete delete = new Delete(regionInfo.getRegionName());
1330     deleteFromMetaTable(connection, delete);
1331     LOG.info("Deleted " + regionInfo.getRegionNameAsString());
1332   }
1333 
1334   /**
1335    * Deletes the specified regions from META.
1336    * @param connection connection we're using
1337    * @param regionsInfo list of regions to be deleted from META
1338    * @throws IOException
1339    */
1340   public static void deleteRegions(Connection connection,
1341                                    List<HRegionInfo> regionsInfo) throws IOException {
1342     List<Delete> deletes = new ArrayList<Delete>(regionsInfo.size());
1343     for (HRegionInfo hri: regionsInfo) {
1344       deletes.add(new Delete(hri.getRegionName()));
1345     }
1346     deleteFromMetaTable(connection, deletes);
1347     LOG.info("Deleted " + regionsInfo);
1348   }
1349 
1350   /**
1351    * Adds and Removes the specified regions from hbase:meta
1352    * @param connection connection we're using
1353    * @param regionsToRemove list of regions to be deleted from META
1354    * @param regionsToAdd list of regions to be added to META
1355    * @throws IOException
1356    */
1357   public static void mutateRegions(Connection connection,
1358                                    final List<HRegionInfo> regionsToRemove,
1359                                    final List<HRegionInfo> regionsToAdd)
1360     throws IOException {
1361     List<Mutation> mutation = new ArrayList<Mutation>();
1362     if (regionsToRemove != null) {
1363       for (HRegionInfo hri: regionsToRemove) {
1364         mutation.add(new Delete(hri.getRegionName()));
1365       }
1366     }
1367     if (regionsToAdd != null) {
1368       for (HRegionInfo hri: regionsToAdd) {
1369         mutation.add(makePutFromRegionInfo(hri));
1370       }
1371     }
1372     mutateMetaTable(connection, mutation);
1373     if (regionsToRemove != null && regionsToRemove.size() > 0) {
1374       LOG.debug("Deleted " + regionsToRemove);
1375     }
1376     if (regionsToAdd != null && regionsToAdd.size() > 0) {
1377       LOG.debug("Added " + regionsToAdd);
1378     }
1379   }
1380 
1381   /**
1382    * Overwrites the specified regions from hbase:meta
1383    * @param connection connection we're using
1384    * @param regionInfos list of regions to be added to META
1385    * @throws IOException
1386    */
1387   public static void overwriteRegions(Connection connection,
1388                                       List<HRegionInfo> regionInfos) throws IOException {
1389     deleteRegions(connection, regionInfos);
1390     // Why sleep? This is the easiest way to ensure that the previous deletes does not
1391     // eclipse the following puts, that might happen in the same ts from the server.
1392     // See HBASE-9906, and HBASE-9879. Once either HBASE-9879, HBASE-8770 is fixed,
1393     // or HBASE-9905 is fixed and meta uses seqIds, we do not need the sleep.
1394     Threads.sleep(20);
1395     addRegionsToMeta(connection, regionInfos);
1396     LOG.info("Overwritten " + regionInfos);
1397   }
1398 
1399   /**
1400    * Deletes merge qualifiers for the specified merged region.
1401    * @param connection connection we're using
1402    * @param mergedRegion
1403    * @throws IOException
1404    */
1405   public static void deleteMergeQualifiers(Connection connection,
1406                                            final HRegionInfo mergedRegion) throws IOException {
1407     Delete delete = new Delete(mergedRegion.getRegionName());
1408     delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER);
1409     delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER);
1410     deleteFromMetaTable(connection, delete);
1411     LOG.info("Deleted references in merged region "
1412       + mergedRegion.getRegionNameAsString() + ", qualifier="
1413       + Bytes.toStringBinary(HConstants.MERGEA_QUALIFIER) + " and qualifier="
1414       + Bytes.toStringBinary(HConstants.MERGEB_QUALIFIER));
1415   }
1416 
1417   private static Put addRegionInfo(final Put p, final HRegionInfo hri)
1418     throws IOException {
1419     p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
1420       hri.toByteArray());
1421     return p;
1422   }
1423 
1424   public static Put addLocation(final Put p, final ServerName sn, long openSeqNum, int replicaId){
1425     // using regionserver's local time as the timestamp of Put.
1426     // See: HBASE-11536
1427     long now = EnvironmentEdgeManager.currentTime();
1428     p.addImmutable(HConstants.CATALOG_FAMILY, getServerColumn(replicaId), now,
1429       Bytes.toBytes(sn.getHostAndPort()));
1430     p.addImmutable(HConstants.CATALOG_FAMILY, getStartCodeColumn(replicaId), now,
1431       Bytes.toBytes(sn.getStartcode()));
1432     p.addImmutable(HConstants.CATALOG_FAMILY, getSeqNumColumn(replicaId), now,
1433       Bytes.toBytes(openSeqNum));
1434     return p;
1435   }
1436 }