View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase;
19  
20  import java.io.IOException;
21  import java.io.InterruptedIOException;
22  import java.util.ArrayList;
23  import java.util.List;
24  import java.util.Map;
25  import java.util.NavigableMap;
26  import java.util.Set;
27  import java.util.SortedMap;
28  import java.util.TreeMap;
29  import java.util.regex.Matcher;
30  import java.util.regex.Pattern;
31  
32  import org.apache.commons.logging.Log;
33  import org.apache.commons.logging.LogFactory;
34  import org.apache.hadoop.conf.Configuration;
35  import org.apache.hadoop.hbase.classification.InterfaceAudience;
36  import org.apache.hadoop.hbase.client.Connection;
37  import org.apache.hadoop.hbase.client.Delete;
38  import org.apache.hadoop.hbase.client.Get;
39  import org.apache.hadoop.hbase.client.HTable;
40  import org.apache.hadoop.hbase.client.Mutation;
41  import org.apache.hadoop.hbase.client.Put;
42  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
43  import org.apache.hadoop.hbase.client.Result;
44  import org.apache.hadoop.hbase.client.ResultScanner;
45  import org.apache.hadoop.hbase.client.Scan;
46  import org.apache.hadoop.hbase.client.Table;
47  import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
48  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
49  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
50  import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
51  import org.apache.hadoop.hbase.util.Bytes;
52  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
53  import org.apache.hadoop.hbase.util.Pair;
54  import org.apache.hadoop.hbase.util.PairOfSameType;
55  import org.apache.hadoop.hbase.util.Threads;
56  
57  import com.google.common.annotations.VisibleForTesting;
58  import com.google.protobuf.ServiceException;
59  
60  /**
61   * Read/write operations on region and assignment information store in
62   * <code>hbase:meta</code>.
63   *
64   * Some of the methods of this class take ZooKeeperWatcher as a param. The only reason
65   * for this is because when used on client-side (like from HBaseAdmin), we want to use
66   * short-living connection (opened before each operation, closed right after), while
67   * when used on HM or HRS (like in AssignmentManager) we want permanent connection.
68   */
69  @InterfaceAudience.Private
70  public class MetaTableAccessor {
71  
72    /*
73     * HBASE-10070 adds a replicaId to HRI, meaning more than one HRI can be defined for the
74     * same table range (table, startKey, endKey). For every range, there will be at least one
75     * HRI defined which is called default replica.
76     *
77     * Meta layout (as of 0.98 + HBASE-10070) is like:
78     * For each table range, there is a single row, formatted like:
79     * <tableName>,<startKey>,<regionId>,<encodedRegionName>. This row corresponds to the regionName
80     * of the default region replica.
81     * Columns are:
82     * info:regioninfo         => contains serialized HRI for the default region replica
83     * info:server             => contains hostname:port (in string form) for the server hosting
84     *                            the default regionInfo replica
85     * info:server_<replicaId> => contains hostname:port (in string form) for the server hosting the
86     *                            regionInfo replica with replicaId
87     * info:serverstartcode    => contains server start code (in binary long form) for the server
88     *                            hosting the default regionInfo replica
89     * info:serverstartcode_<replicaId> => contains server start code (in binary long form) for the
90     *                                     server hosting the regionInfo replica with replicaId
91     * info:seqnumDuringOpen    => contains seqNum (in binary long form) for the region at the time
92     *                             the server opened the region with default replicaId
93     * info:seqnumDuringOpen_<replicaId> => contains seqNum (in binary long form) for the region at
94     *                             the time the server opened the region with replicaId
95     * info:splitA              => contains a serialized HRI for the first daughter region if the
96     *                             region is split
97     * info:splitB              => contains a serialized HRI for the second daughter region if the
98     *                             region is split
99     * info:mergeA              => contains a serialized HRI for the first parent region if the
100    *                             region is the result of a merge
101    * info:mergeB              => contains a serialized HRI for the second parent region if the
102    *                             region is the result of a merge
103    *
104    * The actual layout of meta should be encapsulated inside MetaTableAccessor methods,
105    * and should not leak out of it (through Result objects, etc)
106    */
107 
108   private static final Log LOG = LogFactory.getLog(MetaTableAccessor.class);
109 
110   static final byte [] META_REGION_PREFIX;
111   static {
112     // Copy the prefix from FIRST_META_REGIONINFO into META_REGION_PREFIX.
113     // FIRST_META_REGIONINFO == 'hbase:meta,,1'.  META_REGION_PREFIX == 'hbase:meta,'
114     int len = HRegionInfo.FIRST_META_REGIONINFO.getRegionName().length - 2;
115     META_REGION_PREFIX = new byte [len];
116     System.arraycopy(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), 0,
117       META_REGION_PREFIX, 0, len);
118   }
119 
120   /** The delimiter for meta columns for replicaIds > 0 */
121   protected static final char META_REPLICA_ID_DELIMITER = '_';
122 
123   /** A regex for parsing server columns from meta. See above javadoc for meta layout */
124   private static final Pattern SERVER_COLUMN_PATTERN
125     = Pattern.compile("^server(_[0-9a-fA-F]{4})?$");
126 
127   ////////////////////////
128   // Reading operations //
129   ////////////////////////
130 
131  /**
132    * Performs a full scan of a <code>hbase:meta</code> table.
133    * @return List of {@link org.apache.hadoop.hbase.client.Result}
134    * @throws IOException
135    */
136   public static List<Result> fullScanOfMeta(Connection connection)
137   throws IOException {
138     CollectAllVisitor v = new CollectAllVisitor();
139     fullScan(connection, v, null);
140     return v.getResults();
141   }
142 
143   /**
144    * Performs a full scan of <code>hbase:meta</code>.
145    * @param connection connection we're using
146    * @param visitor Visitor invoked against each row.
147    * @throws IOException
148    */
149   public static void fullScan(Connection connection,
150       final Visitor visitor)
151   throws IOException {
152     fullScan(connection, visitor, null);
153   }
154 
155   /**
156    * Performs a full scan of <code>hbase:meta</code>.
157    * @param connection connection we're using
158    * @return List of {@link Result}
159    * @throws IOException
160    */
161   public static List<Result> fullScan(Connection connection)
162     throws IOException {
163     CollectAllVisitor v = new CollectAllVisitor();
164     fullScan(connection, v, null);
165     return v.getResults();
166   }
167 
168   /**
169    * Callers should call close on the returned {@link HTable} instance.
170    * @param connection connection we're using to access table
171    * @param tableName Table to get an {@link org.apache.hadoop.hbase.client.HTable} against.
172    * @return An {@link org.apache.hadoop.hbase.client.HTable} for <code>tableName</code>
173    * @throws IOException
174    * @SuppressWarnings("deprecation")
175    */
176   private static Table getHTable(final Connection connection,
177       final TableName tableName)
178   throws IOException {
179     // We used to pass whole CatalogTracker in here, now we just pass in Connection
180     if (connection == null || connection.isClosed()) {
181       throw new NullPointerException("No connection");
182     }
183     return new HTable(tableName, connection);
184   }
185 
186   /**
187    * Callers should call close on the returned {@link HTable} instance.
188    * @param connection connection we're using to access Meta
189    * @return An {@link HTable} for <code>hbase:meta</code>
190    * @throws IOException
191    */
192   static Table getMetaHTable(final Connection connection)
193   throws IOException {
194     return getHTable(connection, TableName.META_TABLE_NAME);
195   }
196 
197   /**
198    * @param t Table to use (will be closed when done).
199    * @param g Get to run
200    * @throws IOException
201    */
202   private static Result get(final Table t, final Get g) throws IOException {
203     try {
204       return t.get(g);
205     } finally {
206       t.close();
207     }
208   }
209 
210   /**
211    * Gets the region info and assignment for the specified region.
212    * @param connection connection we're using
213    * @param regionName Region to lookup.
214    * @return Location and HRegionInfo for <code>regionName</code>
215    * @throws IOException
216    * @deprecated use {@link #getRegionLocation(Connection, byte[])} instead
217    */
218   @Deprecated
219   public static Pair<HRegionInfo, ServerName> getRegion(
220     Connection connection, byte [] regionName)
221     throws IOException {
222     HRegionLocation location = getRegionLocation(connection, regionName);
223     return location == null
224       ? null
225       : new Pair<HRegionInfo, ServerName>(location.getRegionInfo(), location.getServerName());
226   }
227 
228   /**
229    * Returns the HRegionLocation from meta for the given region
230    * @param connection connection we're using
231    * @param regionName region we're looking for
232    * @return HRegionLocation for the given region
233    * @throws IOException
234    */
235   public static HRegionLocation getRegionLocation(Connection connection,
236                                                   byte[] regionName) throws IOException {
237     byte[] row = regionName;
238     HRegionInfo parsedInfo = null;
239     try {
240       parsedInfo = parseRegionInfoFromRegionName(regionName);
241       row = getMetaKeyForRegion(parsedInfo);
242     } catch (Exception parseEx) {
243       // Ignore. This is used with tableName passed as regionName.
244     }
245     Get get = new Get(row);
246     get.addFamily(HConstants.CATALOG_FAMILY);
247     Result r = get(getMetaHTable(connection), get);
248     RegionLocations locations = getRegionLocations(r);
249     return locations == null
250       ? null
251       : locations.getRegionLocation(parsedInfo == null ? 0 : parsedInfo.getReplicaId());
252   }
253 
254   /**
255    * Returns the HRegionLocation from meta for the given region
256    * @param connection connection we're using
257    * @param regionInfo region information
258    * @return HRegionLocation for the given region
259    * @throws IOException
260    */
261   public static HRegionLocation getRegionLocation(Connection connection,
262                                                   HRegionInfo regionInfo) throws IOException {
263     byte[] row = getMetaKeyForRegion(regionInfo);
264     Get get = new Get(row);
265     get.addFamily(HConstants.CATALOG_FAMILY);
266     Result r = get(getMetaHTable(connection), get);
267     return getRegionLocation(r, regionInfo, regionInfo.getReplicaId());
268   }
269 
270   /** Returns the row key to use for this regionInfo */
271   public static byte[] getMetaKeyForRegion(HRegionInfo regionInfo) {
272     return RegionReplicaUtil.getRegionInfoForDefaultReplica(regionInfo).getRegionName();
273   }
274 
275   /** Returns an HRI parsed from this regionName. Not all the fields of the HRI
276    * is stored in the name, so the returned object should only be used for the fields
277    * in the regionName.
278    */
279   protected static HRegionInfo parseRegionInfoFromRegionName(byte[] regionName)
280     throws IOException {
281     byte[][] fields = HRegionInfo.parseRegionName(regionName);
282     long regionId =  Long.parseLong(Bytes.toString(fields[2]));
283     int replicaId = fields.length > 3 ? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0;
284     return new HRegionInfo(
285       TableName.valueOf(fields[0]), fields[1], fields[1], false, regionId, replicaId);
286   }
287 
288   /**
289    * Gets the result in hbase:meta for the specified region.
290    * @param connection connection we're using
291    * @param regionName region we're looking for
292    * @return result of the specified region
293    * @throws IOException
294    */
295   public static Result getRegionResult(Connection connection,
296       byte[] regionName) throws IOException {
297     Get get = new Get(regionName);
298     get.addFamily(HConstants.CATALOG_FAMILY);
299     return get(getMetaHTable(connection), get);
300   }
301 
302   /**
303    * Get regions from the merge qualifier of the specified merged region
304    * @return null if it doesn't contain merge qualifier, else two merge regions
305    * @throws IOException
306    */
307   public static Pair<HRegionInfo, HRegionInfo> getRegionsFromMergeQualifier(
308       Connection connection, byte[] regionName) throws IOException {
309     Result result = getRegionResult(connection, regionName);
310     HRegionInfo mergeA = getHRegionInfo(result, HConstants.MERGEA_QUALIFIER);
311     HRegionInfo mergeB = getHRegionInfo(result, HConstants.MERGEB_QUALIFIER);
312     if (mergeA == null && mergeB == null) {
313       return null;
314     }
315     return new Pair<HRegionInfo, HRegionInfo>(mergeA, mergeB);
316  }
317 
318   /**
319    * Checks if the specified table exists.  Looks at the hbase:meta table hosted on
320    * the specified server.
321    * @param connection connection we're using
322    * @param tableName table to check
323    * @return true if the table exists in meta, false if not
324    * @throws IOException
325    */
326   public static boolean tableExists(Connection connection,
327       final TableName tableName)
328   throws IOException {
329     if (tableName.equals(TableName.META_TABLE_NAME)) {
330       // Catalog tables always exist.
331       return true;
332     }
333     // Make a version of ResultCollectingVisitor that only collects the first
334     CollectingVisitor<HRegionInfo> visitor = new CollectingVisitor<HRegionInfo>() {
335       private HRegionInfo current = null;
336 
337       @Override
338       public boolean visit(Result r) throws IOException {
339         RegionLocations locations = getRegionLocations(r);
340         if (locations == null || locations.getRegionLocation().getRegionInfo() == null) {
341           LOG.warn("No serialized HRegionInfo in " + r);
342           return true;
343         }
344         this.current = locations.getRegionLocation().getRegionInfo();
345         if (this.current == null) {
346           LOG.warn("No serialized HRegionInfo in " + r);
347           return true;
348         }
349         if (!isInsideTable(this.current, tableName)) return false;
350         // Else call super and add this Result to the collection.
351         super.visit(r);
352         // Stop collecting regions from table after we get one.
353         return false;
354       }
355 
356       @Override
357       void add(Result r) {
358         // Add the current HRI.
359         this.results.add(this.current);
360       }
361     };
362     fullScan(connection, visitor, getTableStartRowForMeta(tableName));
363     // If visitor has results >= 1 then table exists.
364     return visitor.getResults().size() >= 1;
365   }
366 
367   /**
368    * Gets all of the regions of the specified table. Do not use this method
369    * to get meta table regions, use methods in MetaTableLocator instead.
370    * @param connection connection we're using
371    * @param tableName table we're looking for
372    * @return Ordered list of {@link HRegionInfo}.
373    * @throws IOException
374    */
375   public static List<HRegionInfo> getTableRegions(Connection connection, TableName tableName)
376   throws IOException {
377     return getTableRegions(connection, tableName, false);
378   }
379 
380   /**
381    * Gets all of the regions of the specified table. Do not use this method
382    * to get meta table regions, use methods in MetaTableLocator instead.
383    * @param connection connection we're using
384    * @param tableName table we're looking for
385    * @param excludeOfflinedSplitParents If true, do not include offlined split
386    * parents in the return.
387    * @return Ordered list of {@link HRegionInfo}.
388    * @throws IOException
389    */
390   public static List<HRegionInfo> getTableRegions(Connection connection,
391       TableName tableName, final boolean excludeOfflinedSplitParents)
392       throws IOException {
393     List<Pair<HRegionInfo, ServerName>> result;
394     try {
395       result = getTableRegionsAndLocations(connection, tableName,
396         excludeOfflinedSplitParents);
397     } catch (InterruptedException e) {
398       throw (InterruptedIOException)new InterruptedIOException().initCause(e);
399     }
400     return getListOfHRegionInfos(result);
401   }
402 
403   static List<HRegionInfo> getListOfHRegionInfos(final List<Pair<HRegionInfo, ServerName>> pairs) {
404     if (pairs == null || pairs.isEmpty()) return null;
405     List<HRegionInfo> result = new ArrayList<HRegionInfo>(pairs.size());
406     for (Pair<HRegionInfo, ServerName> pair: pairs) {
407       result.add(pair.getFirst());
408     }
409     return result;
410   }
411 
412   /**
413    * @param current region of current table we're working with
414    * @param tableName table we're checking against
415    * @return True if <code>current</code> tablename is equal to
416    * <code>tableName</code>
417    */
418   static boolean isInsideTable(final HRegionInfo current, final TableName tableName) {
419     return tableName.equals(current.getTable());
420   }
421 
422   /**
423    * @param tableName table we're working with
424    * @return Place to start Scan in <code>hbase:meta</code> when passed a
425    * <code>tableName</code>; returns &lt;tableName&rt; &lt;,&rt; &lt;,&rt;
426    */
427   static byte [] getTableStartRowForMeta(TableName tableName) {
428     byte [] startRow = new byte[tableName.getName().length + 2];
429     System.arraycopy(tableName.getName(), 0, startRow, 0, tableName.getName().length);
430     startRow[startRow.length - 2] = HConstants.DELIMITER;
431     startRow[startRow.length - 1] = HConstants.DELIMITER;
432     return startRow;
433   }
434 
435   /**
436    * This method creates a Scan object that will only scan catalog rows that
437    * belong to the specified table. It doesn't specify any columns.
438    * This is a better alternative to just using a start row and scan until
439    * it hits a new table since that requires parsing the HRI to get the table
440    * name.
441    * @param tableName bytes of table's name
442    * @return configured Scan object
443    */
444   public static Scan getScanForTableName(TableName tableName) {
445     String strName = tableName.getNameAsString();
446     // Start key is just the table name with delimiters
447     byte[] startKey = Bytes.toBytes(strName + ",,");
448     // Stop key appends the smallest possible char to the table name
449     byte[] stopKey = Bytes.toBytes(strName + " ,,");
450 
451     Scan scan = new Scan(startKey);
452     scan.setStopRow(stopKey);
453     return scan;
454   }
455 
456   /**
457    * Do not use this method to get meta table regions, use methods in MetaTableLocator instead.
458    * @param connection connection we're using
459    * @param tableName table we're looking for
460    * @return Return list of regioninfos and server.
461    * @throws IOException
462    * @throws InterruptedException
463    */
464   public static List<Pair<HRegionInfo, ServerName>>
465     getTableRegionsAndLocations(Connection connection, TableName tableName)
466       throws IOException, InterruptedException {
467     return getTableRegionsAndLocations(connection, tableName, true);
468   }
469 
470   /**
471    * Do not use this method to get meta table regions, use methods in MetaTableLocator instead.
472    * @param connection connection we're using
473    * @param tableName table to work with
474    * @return Return list of regioninfos and server addresses.
475    * @throws IOException
476    * @throws InterruptedException
477    */
478   public static List<Pair<HRegionInfo, ServerName>> getTableRegionsAndLocations(
479         Connection connection, final TableName tableName,
480       final boolean excludeOfflinedSplitParents) throws IOException, InterruptedException {
481     if (tableName.equals(TableName.META_TABLE_NAME)) {
482       throw new IOException("This method can't be used to locate meta regions;"
483         + " use MetaTableLocator instead");
484     }
485     // Make a version of CollectingVisitor that collects HRegionInfo and ServerAddress
486     CollectingVisitor<Pair<HRegionInfo, ServerName>> visitor =
487       new CollectingVisitor<Pair<HRegionInfo, ServerName>>() {
488         private RegionLocations current = null;
489 
490         @Override
491         public boolean visit(Result r) throws IOException {
492           current = getRegionLocations(r);
493           if (current == null || current.getRegionLocation().getRegionInfo() == null) {
494             LOG.warn("No serialized HRegionInfo in " + r);
495             return true;
496           }
497           HRegionInfo hri = current.getRegionLocation().getRegionInfo();
498           if (!isInsideTable(hri, tableName)) return false;
499           if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
500           // Else call super and add this Result to the collection.
501           return super.visit(r);
502         }
503 
504         @Override
505         void add(Result r) {
506           if (current == null) {
507             return;
508           }
509           for (HRegionLocation loc : current.getRegionLocations()) {
510             if (loc != null) {
511               this.results.add(new Pair<HRegionInfo, ServerName>(
512                 loc.getRegionInfo(), loc.getServerName()));
513             }
514           }
515         }
516       };
517     fullScan(connection, visitor, getTableStartRowForMeta(tableName));
518     return visitor.getResults();
519   }
520 
521   /**
522    * @param connection connection we're using
523    * @param serverName server whose regions we're interested in
524    * @return List of user regions installed on this server (does not include
525    * catalog regions).
526    * @throws IOException
527    */
528   public static NavigableMap<HRegionInfo, Result>
529   getServerUserRegions(Connection connection, final ServerName serverName)
530     throws IOException {
531     final NavigableMap<HRegionInfo, Result> hris = new TreeMap<HRegionInfo, Result>();
532     // Fill the above hris map with entries from hbase:meta that have the passed
533     // servername.
534     CollectingVisitor<Result> v = new CollectingVisitor<Result>() {
535       @Override
536       void add(Result r) {
537         if (r == null || r.isEmpty()) return;
538         RegionLocations locations = getRegionLocations(r);
539         if (locations == null) return;
540         for (HRegionLocation loc : locations.getRegionLocations()) {
541           if (loc != null) {
542             if (loc.getServerName() != null && loc.getServerName().equals(serverName)) {
543               hris.put(loc.getRegionInfo(), r);
544             }
545           }
546         }
547       }
548     };
549     fullScan(connection, v);
550     return hris;
551   }
552 
553   public static void fullScanMetaAndPrint(Connection connection)
554     throws IOException {
555     Visitor v = new Visitor() {
556       @Override
557       public boolean visit(Result r) throws IOException {
558         if (r ==  null || r.isEmpty()) return true;
559         LOG.info("fullScanMetaAndPrint.Current Meta Row: " + r);
560         RegionLocations locations = getRegionLocations(r);
561         if (locations == null) return true;
562         for (HRegionLocation loc : locations.getRegionLocations()) {
563           if (loc != null) {
564             LOG.info("fullScanMetaAndPrint.HRI Print= " + loc.getRegionInfo());
565           }
566         }
567         return true;
568       }
569     };
570     fullScan(connection, v);
571   }
572 
573   /**
574    * Performs a full scan of a catalog table.
575    * @param connection connection we're using
576    * @param visitor Visitor invoked against each row.
577    * @param startrow Where to start the scan. Pass null if want to begin scan
578    * at first row.
579    * <code>hbase:meta</code>, the default (pass false to scan hbase:meta)
580    * @throws IOException
581    */
582   public static void fullScan(Connection connection,
583     final Visitor visitor, final byte [] startrow)
584   throws IOException {
585     Scan scan = new Scan();
586     if (startrow != null) scan.setStartRow(startrow);
587     if (startrow == null) {
588       int caching = connection.getConfiguration()
589           .getInt(HConstants.HBASE_META_SCANNER_CACHING, 100);
590       scan.setCaching(caching);
591     }
592     scan.addFamily(HConstants.CATALOG_FAMILY);
593     Table metaTable = getMetaHTable(connection);
594     ResultScanner scanner = null;
595     try {
596       scanner = metaTable.getScanner(scan);
597       Result data;
598       while((data = scanner.next()) != null) {
599         if (data.isEmpty()) continue;
600         // Break if visit returns false.
601         if (!visitor.visit(data)) break;
602       }
603     } finally {
604       if (scanner != null) scanner.close();
605       metaTable.close();
606     }
607   }
608 
609   /**
610    * Returns the column family used for meta columns.
611    * @return HConstants.CATALOG_FAMILY.
612    */
613   protected static byte[] getFamily() {
614     return HConstants.CATALOG_FAMILY;
615   }
616 
617   /**
618    * Returns the column qualifier for serialized region info
619    * @return HConstants.REGIONINFO_QUALIFIER
620    */
621   protected static byte[] getRegionInfoColumn() {
622     return HConstants.REGIONINFO_QUALIFIER;
623   }
624 
625   /**
626    * Returns the column qualifier for server column for replicaId
627    * @param replicaId the replicaId of the region
628    * @return a byte[] for server column qualifier
629    */
630   @VisibleForTesting
631   public static byte[] getServerColumn(int replicaId) {
632     return replicaId == 0
633       ? HConstants.SERVER_QUALIFIER
634       : Bytes.toBytes(HConstants.SERVER_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
635       + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
636   }
637 
638   /**
639    * Returns the column qualifier for server start code column for replicaId
640    * @param replicaId the replicaId of the region
641    * @return a byte[] for server start code column qualifier
642    */
643   @VisibleForTesting
644   public static byte[] getStartCodeColumn(int replicaId) {
645     return replicaId == 0
646       ? HConstants.STARTCODE_QUALIFIER
647       : Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
648       + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
649   }
650 
651   /**
652    * Returns the column qualifier for seqNum column for replicaId
653    * @param replicaId the replicaId of the region
654    * @return a byte[] for seqNum column qualifier
655    */
656   @VisibleForTesting
657   public static byte[] getSeqNumColumn(int replicaId) {
658     return replicaId == 0
659       ? HConstants.SEQNUM_QUALIFIER
660       : Bytes.toBytes(HConstants.SEQNUM_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
661       + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
662   }
663 
664   /**
665    * Parses the replicaId from the server column qualifier. See top of the class javadoc
666    * for the actual meta layout
667    * @param serverColumn the column qualifier
668    * @return an int for the replicaId
669    */
670   @VisibleForTesting
671   static int parseReplicaIdFromServerColumn(byte[] serverColumn) {
672     String serverStr = Bytes.toString(serverColumn);
673 
674     Matcher matcher = SERVER_COLUMN_PATTERN.matcher(serverStr);
675     if (matcher.matches() && matcher.groupCount() > 0) {
676       String group = matcher.group(1);
677       if (group != null && group.length() > 0) {
678         return Integer.parseInt(group.substring(1), 16);
679       } else {
680         return 0;
681       }
682     }
683     return -1;
684   }
685 
686   /**
687    * Returns a {@link ServerName} from catalog table {@link Result}.
688    * @param r Result to pull from
689    * @return A ServerName instance or null if necessary fields not found or empty.
690    */
691   private static ServerName getServerName(final Result r, final int replicaId) {
692     byte[] serverColumn = getServerColumn(replicaId);
693     Cell cell = r.getColumnLatestCell(getFamily(), serverColumn);
694     if (cell == null || cell.getValueLength() == 0) return null;
695     String hostAndPort = Bytes.toString(
696       cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
697     byte[] startcodeColumn = getStartCodeColumn(replicaId);
698     cell = r.getColumnLatestCell(getFamily(), startcodeColumn);
699     if (cell == null || cell.getValueLength() == 0) return null;
700     return ServerName.valueOf(hostAndPort,
701       Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
702   }
703 
704   /**
705    * The latest seqnum that the server writing to meta observed when opening the region.
706    * E.g. the seqNum when the result of {@link #getServerName(Result, int)} was written.
707    * @param r Result to pull the seqNum from
708    * @return SeqNum, or HConstants.NO_SEQNUM if there's no value written.
709    */
710   private static long getSeqNumDuringOpen(final Result r, final int replicaId) {
711     Cell cell = r.getColumnLatestCell(getFamily(), getSeqNumColumn(replicaId));
712     if (cell == null || cell.getValueLength() == 0) return HConstants.NO_SEQNUM;
713     return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
714   }
715 
716   /**
717    * Returns an HRegionLocationList extracted from the result.
718    * @return an HRegionLocationList containing all locations for the region range or null if
719    *  we can't deserialize the result.
720    */
721   public static RegionLocations getRegionLocations(final Result r) {
722     if (r == null) return null;
723     HRegionInfo regionInfo = getHRegionInfo(r, getRegionInfoColumn());
724     if (regionInfo == null) return null;
725 
726     List<HRegionLocation> locations = new ArrayList<HRegionLocation>(1);
727     NavigableMap<byte[],NavigableMap<byte[],byte[]>> familyMap = r.getNoVersionMap();
728 
729     locations.add(getRegionLocation(r, regionInfo, 0));
730 
731     NavigableMap<byte[], byte[]> infoMap = familyMap.get(getFamily());
732     if (infoMap == null) return new RegionLocations(locations);
733 
734     // iterate until all serverName columns are seen
735     int replicaId = 0;
736     byte[] serverColumn = getServerColumn(replicaId);
737     SortedMap<byte[], byte[]> serverMap = infoMap.tailMap(serverColumn, false);
738     if (serverMap.isEmpty()) return new RegionLocations(locations);
739 
740     for (Map.Entry<byte[], byte[]> entry : serverMap.entrySet()) {
741       replicaId = parseReplicaIdFromServerColumn(entry.getKey());
742       if (replicaId < 0) {
743         break;
744       }
745 
746       locations.add(getRegionLocation(r, regionInfo, replicaId));
747     }
748 
749     return new RegionLocations(locations);
750   }
751 
752   /**
753    * Returns the HRegionLocation parsed from the given meta row Result
754    * for the given regionInfo and replicaId. The regionInfo can be the default region info
755    * for the replica.
756    * @param r the meta row result
757    * @param regionInfo RegionInfo for default replica
758    * @param replicaId the replicaId for the HRegionLocation
759    * @return HRegionLocation parsed from the given meta row Result for the given replicaId
760    */
761   private static HRegionLocation getRegionLocation(final Result r, final HRegionInfo regionInfo,
762                                                    final int replicaId) {
763     ServerName serverName = getServerName(r, replicaId);
764     long seqNum = getSeqNumDuringOpen(r, replicaId);
765     HRegionInfo replicaInfo = RegionReplicaUtil.getRegionInfoForReplica(regionInfo, replicaId);
766     return new HRegionLocation(replicaInfo, serverName, seqNum);
767   }
768 
769   /**
770    * Returns HRegionInfo object from the column
771    * HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog
772    * table Result.
773    * @param data a Result object from the catalog table scan
774    * @return HRegionInfo or null
775    */
776   public static HRegionInfo getHRegionInfo(Result data) {
777     return getHRegionInfo(data, HConstants.REGIONINFO_QUALIFIER);
778   }
779 
780   /**
781    * Returns the HRegionInfo object from the column {@link HConstants#CATALOG_FAMILY} and
782    * <code>qualifier</code> of the catalog table result.
783    * @param r a Result object from the catalog table scan
784    * @param qualifier Column family qualifier
785    * @return An HRegionInfo instance or null.
786    */
787   private static HRegionInfo getHRegionInfo(final Result r, byte [] qualifier) {
788     Cell cell = r.getColumnLatestCell(getFamily(), qualifier);
789     if (cell == null) return null;
790     return HRegionInfo.parseFromOrNull(cell.getValueArray(),
791       cell.getValueOffset(), cell.getValueLength());
792   }
793 
794   /**
795    * Returns the daughter regions by reading the corresponding columns of the catalog table
796    * Result.
797    * @param data a Result object from the catalog table scan
798    * @return a pair of HRegionInfo or PairOfSameType(null, null) if the region is not a split
799    * parent
800    */
801   public static PairOfSameType<HRegionInfo> getDaughterRegions(Result data) throws IOException {
802     HRegionInfo splitA = getHRegionInfo(data, HConstants.SPLITA_QUALIFIER);
803     HRegionInfo splitB = getHRegionInfo(data, HConstants.SPLITB_QUALIFIER);
804 
805     return new PairOfSameType<HRegionInfo>(splitA, splitB);
806   }
807 
808   /**
809    * Returns the merge regions by reading the corresponding columns of the catalog table
810    * Result.
811    * @param data a Result object from the catalog table scan
812    * @return a pair of HRegionInfo or PairOfSameType(null, null) if the region is not a split
813    * parent
814    */
815   public static PairOfSameType<HRegionInfo> getMergeRegions(Result data) throws IOException {
816     HRegionInfo mergeA = getHRegionInfo(data, HConstants.MERGEA_QUALIFIER);
817     HRegionInfo mergeB = getHRegionInfo(data, HConstants.MERGEB_QUALIFIER);
818 
819     return new PairOfSameType<HRegionInfo>(mergeA, mergeB);
820   }
821 
822   /**
823    * Implementations 'visit' a catalog table row.
824    */
825   public interface Visitor {
826     /**
827      * Visit the catalog table row.
828      * @param r A row from catalog table
829      * @return True if we are to proceed scanning the table, else false if
830      * we are to stop now.
831      */
832     boolean visit(final Result r) throws IOException;
833   }
834 
835   /**
836    * A {@link Visitor} that collects content out of passed {@link Result}.
837    */
838   static abstract class CollectingVisitor<T> implements Visitor {
839     final List<T> results = new ArrayList<T>();
840     @Override
841     public boolean visit(Result r) throws IOException {
842       if (r ==  null || r.isEmpty()) return true;
843       add(r);
844       return true;
845     }
846 
847     abstract void add(Result r);
848 
849     /**
850      * @return Collected results; wait till visits complete to collect all
851      * possible results
852      */
853     List<T> getResults() {
854       return this.results;
855     }
856   }
857 
858   /**
859    * Collects all returned.
860    */
861   static class CollectAllVisitor extends CollectingVisitor<Result> {
862     @Override
863     void add(Result r) {
864       this.results.add(r);
865     }
866   }
867 
868   /**
869    * Count regions in <code>hbase:meta</code> for passed table.
870    * @param c Configuration object
871    * @param tableName table name to count regions for
872    * @return Count or regions in table <code>tableName</code>
873    * @throws IOException
874    */
875   @Deprecated
876   public static int getRegionCount(final Configuration c, final String tableName)
877       throws IOException {
878     return getRegionCount(c, TableName.valueOf(tableName));
879   }
880 
881   /**
882    * Count regions in <code>hbase:meta</code> for passed table.
883    * @param c Configuration object
884    * @param tableName table name to count regions for
885    * @return Count or regions in table <code>tableName</code>
886    * @throws IOException
887    */
888   public static int getRegionCount(final Configuration c, final TableName tableName)
889       throws IOException {
890     HTable t = new HTable(c, tableName);
891     try {
892       return t.getRegionLocations().size();
893     } finally {
894       t.close();
895     }
896   }
897 
898   ////////////////////////
899   // Editing operations //
900   ////////////////////////
901 
902   /**
903    * Generates and returns a Put containing the region into for the catalog table
904    */
905   public static Put makePutFromRegionInfo(HRegionInfo regionInfo)
906     throws IOException {
907     Put put = new Put(regionInfo.getRegionName());
908     addRegionInfo(put, regionInfo);
909     return put;
910   }
911 
912   /**
913    * Generates and returns a Delete containing the region info for the catalog
914    * table
915    */
916   public static Delete makeDeleteFromRegionInfo(HRegionInfo regionInfo) {
917     if (regionInfo == null) {
918       throw new IllegalArgumentException("Can't make a delete for null region");
919     }
920     Delete delete = new Delete(regionInfo.getRegionName());
921     return delete;
922   }
923 
924   /**
925    * Adds split daughters to the Put
926    */
927   public static Put addDaughtersToPut(Put put, HRegionInfo splitA, HRegionInfo splitB) {
928     if (splitA != null) {
929       put.addImmutable(
930         HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, splitA.toByteArray());
931     }
932     if (splitB != null) {
933       put.addImmutable(
934         HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, splitB.toByteArray());
935     }
936     return put;
937   }
938 
939   /**
940    * Put the passed <code>p</code> to the <code>hbase:meta</code> table.
941    * @param connection connection we're using
942    * @param p Put to add to hbase:meta
943    * @throws IOException
944    */
945   static void putToMetaTable(final Connection connection, final Put p)
946     throws IOException {
947     put(getMetaHTable(connection), p);
948   }
949 
950   /**
951    * @param t Table to use (will be closed when done).
952    * @param p put to make
953    * @throws IOException
954    */
955   private static void put(final Table t, final Put p) throws IOException {
956     try {
957       t.put(p);
958     } finally {
959       t.close();
960     }
961   }
962 
963   /**
964    * Put the passed <code>ps</code> to the <code>hbase:meta</code> table.
965    * @param connection connection we're using
966    * @param ps Put to add to hbase:meta
967    * @throws IOException
968    */
969   public static void putsToMetaTable(final Connection connection, final List<Put> ps)
970     throws IOException {
971     Table t = getMetaHTable(connection);
972     try {
973       t.put(ps);
974     } finally {
975       t.close();
976     }
977   }
978 
979   /**
980    * Delete the passed <code>d</code> from the <code>hbase:meta</code> table.
981    * @param connection connection we're using
982    * @param d Delete to add to hbase:meta
983    * @throws IOException
984    */
985   static void deleteFromMetaTable(final Connection connection, final Delete d)
986     throws IOException {
987     List<Delete> dels = new ArrayList<Delete>(1);
988     dels.add(d);
989     deleteFromMetaTable(connection, dels);
990   }
991 
992   /**
993    * Delete the passed <code>deletes</code> from the <code>hbase:meta</code> table.
994    * @param connection connection we're using
995    * @param deletes Deletes to add to hbase:meta  This list should support #remove.
996    * @throws IOException
997    */
998   public static void deleteFromMetaTable(final Connection connection, final List<Delete> deletes)
999     throws IOException {
1000     Table t = getMetaHTable(connection);
1001     try {
1002       t.delete(deletes);
1003     } finally {
1004       t.close();
1005     }
1006   }
1007 
1008   /**
1009    * Deletes some replica columns corresponding to replicas for the passed rows
1010    * @param metaRows rows in hbase:meta
1011    * @param replicaIndexToDeleteFrom the replica ID we would start deleting from
1012    * @param numReplicasToRemove how many replicas to remove
1013    * @param connection connection we're using to access meta table
1014    * @throws IOException
1015    */
1016   public static void removeRegionReplicasFromMeta(Set<byte[]> metaRows,
1017     int replicaIndexToDeleteFrom, int numReplicasToRemove, Connection connection)
1018       throws IOException {
1019     int absoluteIndex = replicaIndexToDeleteFrom + numReplicasToRemove;
1020     for (byte[] row : metaRows) {
1021       Delete deleteReplicaLocations = new Delete(row);
1022       for (int i = replicaIndexToDeleteFrom; i < absoluteIndex; i++) {
1023         deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
1024           getServerColumn(i));
1025         deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
1026           getSeqNumColumn(i));
1027         deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
1028           getStartCodeColumn(i));
1029       }
1030       deleteFromMetaTable(connection, deleteReplicaLocations);
1031     }
1032   }
1033 
1034   /**
1035    * Execute the passed <code>mutations</code> against <code>hbase:meta</code> table.
1036    * @param connection connection we're using
1037    * @param mutations Puts and Deletes to execute on hbase:meta
1038    * @throws IOException
1039    */
1040   public static void mutateMetaTable(final Connection connection,
1041                                      final List<Mutation> mutations)
1042     throws IOException {
1043     Table t = getMetaHTable(connection);
1044     try {
1045       t.batch(mutations);
1046     } catch (InterruptedException e) {
1047       InterruptedIOException ie = new InterruptedIOException(e.getMessage());
1048       ie.initCause(e);
1049       throw ie;
1050     } finally {
1051       t.close();
1052     }
1053   }
1054 
1055   /**
1056    * Adds a hbase:meta row for the specified new region.
1057    * @param connection connection we're using
1058    * @param regionInfo region information
1059    * @throws IOException if problem connecting or updating meta
1060    */
1061   public static void addRegionToMeta(Connection connection,
1062                                      HRegionInfo regionInfo)
1063     throws IOException {
1064     putToMetaTable(connection, makePutFromRegionInfo(regionInfo));
1065     LOG.info("Added " + regionInfo.getRegionNameAsString());
1066   }
1067 
1068   /**
1069    * Adds a hbase:meta row for the specified new region to the given catalog table. The
1070    * HTable is not flushed or closed.
1071    * @param meta the HTable for META
1072    * @param regionInfo region information
1073    * @throws IOException if problem connecting or updating meta
1074    */
1075   public static void addRegionToMeta(Table meta, HRegionInfo regionInfo) throws IOException {
1076     addRegionToMeta(meta, regionInfo, null, null);
1077   }
1078 
1079   /**
1080    * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this
1081    * does not add its daughter's as different rows, but adds information about the daughters
1082    * in the same row as the parent. Use
1083    * {@link #splitRegion(org.apache.hadoop.hbase.client.Connection,
1084    *   HRegionInfo, HRegionInfo, HRegionInfo, ServerName)}
1085    * if you want to do that.
1086    * @param meta the HTable for META
1087    * @param regionInfo region information
1088    * @param splitA first split daughter of the parent regionInfo
1089    * @param splitB second split daughter of the parent regionInfo
1090    * @throws IOException if problem connecting or updating meta
1091    */
1092   public static void addRegionToMeta(Table meta, HRegionInfo regionInfo,
1093                                      HRegionInfo splitA, HRegionInfo splitB) throws IOException {
1094     Put put = makePutFromRegionInfo(regionInfo);
1095     addDaughtersToPut(put, splitA, splitB);
1096     meta.put(put);
1097     if (LOG.isDebugEnabled()) {
1098       LOG.debug("Added " + regionInfo.getRegionNameAsString());
1099     }
1100   }
1101 
1102   /**
1103    * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this
1104    * does not add its daughter's as different rows, but adds information about the daughters
1105    * in the same row as the parent. Use
1106    * {@link #splitRegion(Connection, HRegionInfo, HRegionInfo, HRegionInfo, ServerName)}
1107    * if you want to do that.
1108    * @param connection connection we're using
1109    * @param regionInfo region information
1110    * @param splitA first split daughter of the parent regionInfo
1111    * @param splitB second split daughter of the parent regionInfo
1112    * @throws IOException if problem connecting or updating meta
1113    */
1114   public static void addRegionToMeta(Connection connection, HRegionInfo regionInfo,
1115                                      HRegionInfo splitA, HRegionInfo splitB) throws IOException {
1116     Table meta = getMetaHTable(connection);
1117     try {
1118       addRegionToMeta(meta, regionInfo, splitA, splitB);
1119     } finally {
1120       meta.close();
1121     }
1122   }
1123 
1124   /**
1125    * Adds a hbase:meta row for each of the specified new regions.
1126    * @param connection connection we're using
1127    * @param regionInfos region information list
1128    * @throws IOException if problem connecting or updating meta
1129    */
1130   public static void addRegionsToMeta(Connection connection,
1131                                       List<HRegionInfo> regionInfos)
1132     throws IOException {
1133     List<Put> puts = new ArrayList<Put>();
1134     for (HRegionInfo regionInfo : regionInfos) {
1135       if (RegionReplicaUtil.isDefaultReplica(regionInfo)) {
1136         puts.add(makePutFromRegionInfo(regionInfo));
1137       }
1138     }
1139     putsToMetaTable(connection, puts);
1140     LOG.info("Added " + puts.size());
1141   }
1142 
1143   /**
1144    * Adds a daughter region entry to meta.
1145    * @param regionInfo the region to put
1146    * @param sn the location of the region
1147    * @param openSeqNum the latest sequence number obtained when the region was open
1148    */
1149   public static void addDaughter(final Connection connection,
1150       final HRegionInfo regionInfo, final ServerName sn, final long openSeqNum)
1151       throws NotAllMetaRegionsOnlineException, IOException {
1152     Put put = new Put(regionInfo.getRegionName());
1153     addRegionInfo(put, regionInfo);
1154     if (sn != null) {
1155       addLocation(put, sn, openSeqNum, regionInfo.getReplicaId());
1156     }
1157     putToMetaTable(connection, put);
1158     LOG.info("Added daughter " + regionInfo.getEncodedName() +
1159       (sn == null? ", serverName=null": ", serverName=" + sn.toString()));
1160   }
1161 
1162   /**
1163    * Merge the two regions into one in an atomic operation. Deletes the two
1164    * merging regions in hbase:meta and adds the merged region with the information of
1165    * two merging regions.
1166    * @param connection connection we're using
1167    * @param mergedRegion the merged region
1168    * @param regionA
1169    * @param regionB
1170    * @param sn the location of the region
1171    * @throws IOException
1172    */
1173   public static void mergeRegions(final Connection connection, HRegionInfo mergedRegion,
1174       HRegionInfo regionA, HRegionInfo regionB, ServerName sn) throws IOException {
1175     Table meta = getMetaHTable(connection);
1176     try {
1177       HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);
1178 
1179       // Put for parent
1180       Put putOfMerged = makePutFromRegionInfo(copyOfMerged);
1181       putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER,
1182         regionA.toByteArray());
1183       putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER,
1184         regionB.toByteArray());
1185 
1186       // Deletes for merging regions
1187       Delete deleteA = makeDeleteFromRegionInfo(regionA);
1188       Delete deleteB = makeDeleteFromRegionInfo(regionB);
1189 
1190       // The merged is a new region, openSeqNum = 1 is fine.
1191       addLocation(putOfMerged, sn, 1, mergedRegion.getReplicaId());
1192 
1193       byte[] tableRow = Bytes.toBytes(mergedRegion.getRegionNameAsString()
1194         + HConstants.DELIMITER);
1195       multiMutate(meta, tableRow, putOfMerged, deleteA, deleteB);
1196     } finally {
1197       meta.close();
1198     }
1199   }
1200 
1201   /**
1202    * Splits the region into two in an atomic operation. Offlines the parent
1203    * region with the information that it is split into two, and also adds
1204    * the daughter regions. Does not add the location information to the daughter
1205    * regions since they are not open yet.
1206    * @param connection connection we're using
1207    * @param parent the parent region which is split
1208    * @param splitA Split daughter region A
1209    * @param splitB Split daughter region A
1210    * @param sn the location of the region
1211    */
1212   public static void splitRegion(final Connection connection,
1213                                  HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
1214                                  ServerName sn) throws IOException {
1215     Table meta = getMetaHTable(connection);
1216     try {
1217       HRegionInfo copyOfParent = new HRegionInfo(parent);
1218       copyOfParent.setOffline(true);
1219       copyOfParent.setSplit(true);
1220 
1221       //Put for parent
1222       Put putParent = makePutFromRegionInfo(copyOfParent);
1223       addDaughtersToPut(putParent, splitA, splitB);
1224 
1225       //Puts for daughters
1226       Put putA = makePutFromRegionInfo(splitA);
1227       Put putB = makePutFromRegionInfo(splitB);
1228 
1229       addLocation(putA, sn, 1, splitA.getReplicaId()); //new regions, openSeqNum = 1 is fine.
1230       addLocation(putB, sn, 1, splitB.getReplicaId());
1231 
1232       byte[] tableRow = Bytes.toBytes(parent.getRegionNameAsString() + HConstants.DELIMITER);
1233       multiMutate(meta, tableRow, putParent, putA, putB);
1234     } finally {
1235       meta.close();
1236     }
1237   }
1238 
1239   /**
1240    * Performs an atomic multi-Mutate operation against the given table.
1241    */
1242   private static void multiMutate(Table table, byte[] row, Mutation... mutations)
1243       throws IOException {
1244     CoprocessorRpcChannel channel = table.coprocessorService(row);
1245     MultiRowMutationProtos.MutateRowsRequest.Builder mmrBuilder
1246       = MultiRowMutationProtos.MutateRowsRequest.newBuilder();
1247     for (Mutation mutation : mutations) {
1248       if (mutation instanceof Put) {
1249         mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(
1250           ClientProtos.MutationProto.MutationType.PUT, mutation));
1251       } else if (mutation instanceof Delete) {
1252         mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(
1253           ClientProtos.MutationProto.MutationType.DELETE, mutation));
1254       } else {
1255         throw new DoNotRetryIOException("multi in MetaEditor doesn't support "
1256           + mutation.getClass().getName());
1257       }
1258     }
1259 
1260     MultiRowMutationProtos.MultiRowMutationService.BlockingInterface service =
1261       MultiRowMutationProtos.MultiRowMutationService.newBlockingStub(channel);
1262     try {
1263       service.mutateRows(null, mmrBuilder.build());
1264     } catch (ServiceException ex) {
1265       ProtobufUtil.toIOException(ex);
1266     }
1267   }
1268 
1269   /**
1270    * Updates the location of the specified region in hbase:meta to be the specified
1271    * server hostname and startcode.
1272    * <p>
1273    * Uses passed catalog tracker to get a connection to the server hosting
1274    * hbase:meta and makes edits to that region.
1275    *
1276    * @param connection connection we're using
1277    * @param regionInfo region to update location of
1278    * @param sn Server name
1279    * @throws IOException
1280    */
1281   public static void updateRegionLocation(Connection connection,
1282                                           HRegionInfo regionInfo, ServerName sn, long updateSeqNum)
1283     throws IOException {
1284     updateLocation(connection, regionInfo, sn, updateSeqNum);
1285   }
1286 
1287   /**
1288    * Updates the location of the specified region to be the specified server.
1289    * <p>
1290    * Connects to the specified server which should be hosting the specified
1291    * catalog region name to perform the edit.
1292    *
1293    * @param connection connection we're using
1294    * @param regionInfo region to update location of
1295    * @param sn Server name
1296    * @param openSeqNum the latest sequence number obtained when the region was open
1297    * @throws IOException In particular could throw {@link java.net.ConnectException}
1298    * if the server is down on other end.
1299    */
1300   private static void updateLocation(final Connection connection,
1301                                      HRegionInfo regionInfo, ServerName sn, long openSeqNum)
1302     throws IOException {
1303     // region replicas are kept in the primary region's row
1304     Put put = new Put(getMetaKeyForRegion(regionInfo));
1305     addLocation(put, sn, openSeqNum, regionInfo.getReplicaId());
1306     putToMetaTable(connection, put);
1307     LOG.info("Updated row " + regionInfo.getRegionNameAsString() +
1308       " with server=" + sn);
1309   }
1310 
1311   /**
1312    * Deletes the specified region from META.
1313    * @param connection connection we're using
1314    * @param regionInfo region to be deleted from META
1315    * @throws IOException
1316    */
1317   public static void deleteRegion(Connection connection,
1318                                   HRegionInfo regionInfo)
1319     throws IOException {
1320     Delete delete = new Delete(regionInfo.getRegionName());
1321     deleteFromMetaTable(connection, delete);
1322     LOG.info("Deleted " + regionInfo.getRegionNameAsString());
1323   }
1324 
1325   /**
1326    * Deletes the specified regions from META.
1327    * @param connection connection we're using
1328    * @param regionsInfo list of regions to be deleted from META
1329    * @throws IOException
1330    */
1331   public static void deleteRegions(Connection connection,
1332                                    List<HRegionInfo> regionsInfo) throws IOException {
1333     List<Delete> deletes = new ArrayList<Delete>(regionsInfo.size());
1334     for (HRegionInfo hri: regionsInfo) {
1335       deletes.add(new Delete(hri.getRegionName()));
1336     }
1337     deleteFromMetaTable(connection, deletes);
1338     LOG.info("Deleted " + regionsInfo);
1339   }
1340 
1341   /**
1342    * Adds and Removes the specified regions from hbase:meta
1343    * @param connection connection we're using
1344    * @param regionsToRemove list of regions to be deleted from META
1345    * @param regionsToAdd list of regions to be added to META
1346    * @throws IOException
1347    */
1348   public static void mutateRegions(Connection connection,
1349                                    final List<HRegionInfo> regionsToRemove,
1350                                    final List<HRegionInfo> regionsToAdd)
1351     throws IOException {
1352     List<Mutation> mutation = new ArrayList<Mutation>();
1353     if (regionsToRemove != null) {
1354       for (HRegionInfo hri: regionsToRemove) {
1355         mutation.add(new Delete(hri.getRegionName()));
1356       }
1357     }
1358     if (regionsToAdd != null) {
1359       for (HRegionInfo hri: regionsToAdd) {
1360         mutation.add(makePutFromRegionInfo(hri));
1361       }
1362     }
1363     mutateMetaTable(connection, mutation);
1364     if (regionsToRemove != null && regionsToRemove.size() > 0) {
1365       LOG.debug("Deleted " + regionsToRemove);
1366     }
1367     if (regionsToAdd != null && regionsToAdd.size() > 0) {
1368       LOG.debug("Added " + regionsToAdd);
1369     }
1370   }
1371 
1372   /**
1373    * Overwrites the specified regions from hbase:meta
1374    * @param connection connection we're using
1375    * @param regionInfos list of regions to be added to META
1376    * @throws IOException
1377    */
1378   public static void overwriteRegions(Connection connection,
1379                                       List<HRegionInfo> regionInfos) throws IOException {
1380     deleteRegions(connection, regionInfos);
1381     // Why sleep? This is the easiest way to ensure that the previous deletes does not
1382     // eclipse the following puts, that might happen in the same ts from the server.
1383     // See HBASE-9906, and HBASE-9879. Once either HBASE-9879, HBASE-8770 is fixed,
1384     // or HBASE-9905 is fixed and meta uses seqIds, we do not need the sleep.
1385     Threads.sleep(20);
1386     addRegionsToMeta(connection, regionInfos);
1387     LOG.info("Overwritten " + regionInfos);
1388   }
1389 
1390   /**
1391    * Deletes merge qualifiers for the specified merged region.
1392    * @param connection connection we're using
1393    * @param mergedRegion
1394    * @throws IOException
1395    */
1396   public static void deleteMergeQualifiers(Connection connection,
1397                                            final HRegionInfo mergedRegion) throws IOException {
1398     Delete delete = new Delete(mergedRegion.getRegionName());
1399     delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER);
1400     delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER);
1401     deleteFromMetaTable(connection, delete);
1402     LOG.info("Deleted references in merged region "
1403       + mergedRegion.getRegionNameAsString() + ", qualifier="
1404       + Bytes.toStringBinary(HConstants.MERGEA_QUALIFIER) + " and qualifier="
1405       + Bytes.toStringBinary(HConstants.MERGEB_QUALIFIER));
1406   }
1407 
1408   private static Put addRegionInfo(final Put p, final HRegionInfo hri)
1409     throws IOException {
1410     p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
1411       hri.toByteArray());
1412     return p;
1413   }
1414 
1415   public static Put addLocation(final Put p, final ServerName sn, long openSeqNum, int replicaId){
1416     // using regionserver's local time as the timestamp of Put.
1417     // See: HBASE-11536
1418     long now = EnvironmentEdgeManager.currentTime();
1419     p.addImmutable(HConstants.CATALOG_FAMILY, getServerColumn(replicaId), now,
1420       Bytes.toBytes(sn.getHostAndPort()));
1421     p.addImmutable(HConstants.CATALOG_FAMILY, getStartCodeColumn(replicaId), now,
1422       Bytes.toBytes(sn.getStartcode()));
1423     p.addImmutable(HConstants.CATALOG_FAMILY, getSeqNumColumn(replicaId), now,
1424       Bytes.toBytes(openSeqNum));
1425     return p;
1426   }
1427 }