View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase;
19  
20  import com.google.common.annotations.VisibleForTesting;
21  import com.google.protobuf.ServiceException;
22  
23  import org.apache.commons.logging.Log;
24  import org.apache.commons.logging.LogFactory;
25  import org.apache.hadoop.classification.InterfaceAudience;
26  import org.apache.hadoop.conf.Configuration;
27  import org.apache.hadoop.hbase.client.Delete;
28  import org.apache.hadoop.hbase.client.Get;
29  import org.apache.hadoop.hbase.client.HConnection;
30  import org.apache.hadoop.hbase.client.HTable;
31  import org.apache.hadoop.hbase.client.Mutation;
32  import org.apache.hadoop.hbase.client.Put;
33  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
34  import org.apache.hadoop.hbase.client.Result;
35  import org.apache.hadoop.hbase.client.ResultScanner;
36  import org.apache.hadoop.hbase.client.Scan;
37  import org.apache.hadoop.hbase.client.Table;
38  import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
39  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
40  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
41  import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
42  import org.apache.hadoop.hbase.util.Bytes;
43  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
44  import org.apache.hadoop.hbase.util.Pair;
45  import org.apache.hadoop.hbase.util.PairOfSameType;
46  import org.apache.hadoop.hbase.util.Threads;
47  import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
48  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
49  
50  import java.io.IOException;
51  import java.io.InterruptedIOException;
52  import java.util.ArrayList;
53  import java.util.List;
54  import java.util.Map;
55  import java.util.NavigableMap;
56  import java.util.Set;
57  import java.util.SortedMap;
58  import java.util.TreeMap;
59  import java.util.regex.Matcher;
60  import java.util.regex.Pattern;
61  
62  /**
63   * Read/write operations on region and assignment information store in
64   * <code>hbase:meta</code>.
65   *
66   * Some of the methods of this class take ZooKeeperWatcher as a param. The only reason
67   * for this is because when used on client-side (like from HBaseAdmin), we want to use
68   * short-living connection (opened before each operation, closed right after), while
69   * when used on HM or HRS (like in AssignmentManager) we want permanent connection.
70   */
71  @InterfaceAudience.Private
72  public class MetaTableAccessor {
73  
74    /*
75     * HBASE-10070 adds a replicaId to HRI, meaning more than one HRI can be defined for the
76     * same table range (table, startKey, endKey). For every range, there will be at least one
77     * HRI defined which is called default replica.
78     *
79     * Meta layout (as of 0.98 + HBASE-10070) is like:
80     * For each table range, there is a single row, formatted like:
81     * <tableName>,<startKey>,<regionId>,<encodedRegionName>. This row corresponds to the regionName
82     * of the default region replica.
83     * Columns are:
84     * info:regioninfo         => contains serialized HRI for the default region replica
85     * info:server             => contains hostname:port (in string form) for the server hosting
86     *                            the default regionInfo replica
87     * info:server_<replicaId> => contains hostname:port (in string form) for the server hosting the
88     *                            regionInfo replica with replicaId
89     * info:serverstartcode    => contains server start code (in binary long form) for the server
90     *                            hosting the default regionInfo replica
91     * info:serverstartcode_<replicaId> => contains server start code (in binary long form) for the
92     *                                     server hosting the regionInfo replica with replicaId
93     * info:seqnumDuringOpen    => contains seqNum (in binary long form) for the region at the time
94     *                             the server opened the region with default replicaId
95     * info:seqnumDuringOpen_<replicaId> => contains seqNum (in binary long form) for the region at
96     *                             the time the server opened the region with replicaId
97     * info:splitA              => contains a serialized HRI for the first daughter region if the
98     *                             region is split
99     * info:splitB              => contains a serialized HRI for the second daughter region if the
100    *                             region is split
101    * info:mergeA              => contains a serialized HRI for the first parent region if the
102    *                             region is the result of a merge
103    * info:mergeB              => contains a serialized HRI for the second parent region if the
104    *                             region is the result of a merge
105    *
106    * The actual layout of meta should be encapsulated inside MetaTableAccessor methods,
107    * and should not leak out of it (through Result objects, etc)
108    */
109 
110   private static final Log LOG = LogFactory.getLog(MetaTableAccessor.class);
111 
112   static final byte [] META_REGION_PREFIX;
113   static {
114     // Copy the prefix from FIRST_META_REGIONINFO into META_REGION_PREFIX.
115     // FIRST_META_REGIONINFO == 'hbase:meta,,1'.  META_REGION_PREFIX == 'hbase:meta,'
116     int len = HRegionInfo.FIRST_META_REGIONINFO.getRegionName().length - 2;
117     META_REGION_PREFIX = new byte [len];
118     System.arraycopy(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), 0,
119       META_REGION_PREFIX, 0, len);
120   }
121 
122   /** The delimiter for meta columns for replicaIds > 0 */
123   protected static final char META_REPLICA_ID_DELIMITER = '_';
124 
125   /** A regex for parsing server columns from meta. See above javadoc for meta layout */
126   private static final Pattern SERVER_COLUMN_PATTERN
127     = Pattern.compile("^server(_[0-9a-fA-F]{4})?$");
128 
129   ////////////////////////
130   // Reading operations //
131   ////////////////////////
132 
133  /**
134    * Performs a full scan of a <code>hbase:meta</code> table.
135    * @return List of {@link org.apache.hadoop.hbase.client.Result}
136    * @throws IOException
137    */
138   public static List<Result> fullScanOfMeta(HConnection hConnection)
139   throws IOException {
140     CollectAllVisitor v = new CollectAllVisitor();
141     fullScan(hConnection, v, null);
142     return v.getResults();
143   }
144 
145   /**
146    * Performs a full scan of <code>hbase:meta</code>.
147    * @param hConnection connection we're using
148    * @param visitor Visitor invoked against each row.
149    * @throws IOException
150    */
151   public static void fullScan(HConnection hConnection,
152       final Visitor visitor)
153   throws IOException {
154     fullScan(hConnection, visitor, null);
155   }
156 
157   /**
158    * Performs a full scan of <code>hbase:meta</code>.
159    * @param hConnection connection we're using
160    * @return List of {@link Result}
161    * @throws IOException
162    */
163   public static List<Result> fullScan(HConnection hConnection)
164     throws IOException {
165     CollectAllVisitor v = new CollectAllVisitor();
166     fullScan(hConnection, v, null);
167     return v.getResults();
168   }
169 
170   /**
171    * Callers should call close on the returned {@link HTable} instance.
172    * @param hConnection connection we're using to access table
173    * @param tableName Table to get an {@link org.apache.hadoop.hbase.client.HTable} against.
174    * @return An {@link org.apache.hadoop.hbase.client.HTable} for <code>tableName</code>
175    * @throws IOException
176    * @SuppressWarnings("deprecation")
177    */
178   private static Table getHTable(final HConnection hConnection,
179       final TableName tableName)
180   throws IOException {
181     // We used to pass whole CatalogTracker in here, now we just pass in HConnection
182     if (hConnection == null || hConnection.isClosed()) {
183       throw new NullPointerException("No connection");
184     }
185     return new HTable(tableName, hConnection);
186   }
187 
188   /**
189    * Callers should call close on the returned {@link HTable} instance.
190    * @param hConnection connection we're using to access Meta
191    * @return An {@link HTable} for <code>hbase:meta</code>
192    * @throws IOException
193    */
194   static Table getMetaHTable(final HConnection hConnection)
195   throws IOException {
196     return getHTable(hConnection, TableName.META_TABLE_NAME);
197   }
198 
199   /**
200    * @param t Table to use (will be closed when done).
201    * @param g Get to run
202    * @throws IOException
203    */
204   private static Result get(final Table t, final Get g) throws IOException {
205     try {
206       return t.get(g);
207     } finally {
208       t.close();
209     }
210   }
211 
212   /**
213    * Gets the region info and assignment for the specified region.
214    * @param hConnection connection we're using
215    * @param regionName Region to lookup.
216    * @return Location and HRegionInfo for <code>regionName</code>
217    * @throws IOException
218    * @deprecated use {@link #getRegionLocation(HConnection, byte[])} instead
219    */
220   @Deprecated
221   public static Pair<HRegionInfo, ServerName> getRegion(
222     HConnection hConnection, byte [] regionName)
223     throws IOException {
224     HRegionLocation location = getRegionLocation(hConnection, regionName);
225     return location == null
226       ? null
227       : new Pair<HRegionInfo, ServerName>(location.getRegionInfo(), location.getServerName());
228   }
229 
230   /**
231    * Returns the HRegionLocation from meta for the given region
232    * @param hConnection connection we're using
233    * @param regionName region we're looking for
234    * @return HRegionLocation for the given region
235    * @throws IOException
236    */
237   public static HRegionLocation getRegionLocation(HConnection hConnection,
238                                                   byte[] regionName) throws IOException {
239     byte[] row = regionName;
240     HRegionInfo parsedInfo = null;
241     try {
242       parsedInfo = parseRegionInfoFromRegionName(regionName);
243       row = getMetaKeyForRegion(parsedInfo);
244     } catch (Exception parseEx) {
245       // Ignore. This is used with tableName passed as regionName.
246     }
247     Get get = new Get(row);
248     get.addFamily(HConstants.CATALOG_FAMILY);
249     Result r = get(getMetaHTable(hConnection), get);
250     RegionLocations locations = getRegionLocations(r);
251     return locations == null
252       ? null
253       : locations.getRegionLocation(parsedInfo == null ? 0 : parsedInfo.getReplicaId());
254   }
255 
256   /**
257    * Returns the HRegionLocation from meta for the given region
258    * @param hConnection connection we're using
259    * @param regionInfo region information
260    * @return HRegionLocation for the given region
261    * @throws IOException
262    */
263   public static HRegionLocation getRegionLocation(HConnection hConnection,
264                                                   HRegionInfo regionInfo) throws IOException {
265     byte[] row = getMetaKeyForRegion(regionInfo);
266     Get get = new Get(row);
267     get.addFamily(HConstants.CATALOG_FAMILY);
268     Result r = get(getMetaHTable(hConnection), get);
269     return getRegionLocation(r, regionInfo, regionInfo.getReplicaId());
270   }
271 
272   /** Returns the row key to use for this regionInfo */
273   public static byte[] getMetaKeyForRegion(HRegionInfo regionInfo) {
274     return RegionReplicaUtil.getRegionInfoForDefaultReplica(regionInfo).getRegionName();
275   }
276 
277   /** Returns an HRI parsed from this regionName. Not all the fields of the HRI
278    * is stored in the name, so the returned object should only be used for the fields
279    * in the regionName.
280    */
281   protected static HRegionInfo parseRegionInfoFromRegionName(byte[] regionName)
282     throws IOException {
283     byte[][] fields = HRegionInfo.parseRegionName(regionName);
284     long regionId =  Long.parseLong(Bytes.toString(fields[2]));
285     int replicaId = fields.length > 3 ? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0;
286     return new HRegionInfo(
287       TableName.valueOf(fields[0]), fields[1], fields[1], false, regionId, replicaId);
288   }
289 
290   /**
291    * Gets the result in hbase:meta for the specified region.
292    * @param hConnection connection we're using
293    * @param regionName region we're looking for
294    * @return result of the specified region
295    * @throws IOException
296    */
297   public static Result getRegionResult(HConnection hConnection,
298       byte[] regionName) throws IOException {
299     Get get = new Get(regionName);
300     get.addFamily(HConstants.CATALOG_FAMILY);
301     return get(getMetaHTable(hConnection), get);
302   }
303 
304   /**
305    * Get regions from the merge qualifier of the specified merged region
306    * @return null if it doesn't contain merge qualifier, else two merge regions
307    * @throws IOException
308    */
309   public static Pair<HRegionInfo, HRegionInfo> getRegionsFromMergeQualifier(
310       HConnection hConnection, byte[] regionName) throws IOException {
311     Result result = getRegionResult(hConnection, regionName);
312     HRegionInfo mergeA = getHRegionInfo(result, HConstants.MERGEA_QUALIFIER);
313     HRegionInfo mergeB = getHRegionInfo(result, HConstants.MERGEB_QUALIFIER);
314     if (mergeA == null && mergeB == null) {
315       return null;
316     }
317     return new Pair<HRegionInfo, HRegionInfo>(mergeA, mergeB);
318  }
319 
320   /**
321    * Checks if the specified table exists.  Looks at the hbase:meta table hosted on
322    * the specified server.
323    * @param hConnection connection we're using
324    * @param tableName table to check
325    * @return true if the table exists in meta, false if not
326    * @throws IOException
327    */
328   public static boolean tableExists(HConnection hConnection,
329       final TableName tableName)
330   throws IOException {
331     if (tableName.equals(HTableDescriptor.META_TABLEDESC.getTableName())) {
332       // Catalog tables always exist.
333       return true;
334     }
335     // Make a version of ResultCollectingVisitor that only collects the first
336     CollectingVisitor<HRegionInfo> visitor = new CollectingVisitor<HRegionInfo>() {
337       private HRegionInfo current = null;
338 
339       @Override
340       public boolean visit(Result r) throws IOException {
341         RegionLocations locations = getRegionLocations(r);
342         if (locations == null || locations.getRegionLocation().getRegionInfo() == null) {
343           LOG.warn("No serialized HRegionInfo in " + r);
344           return true;
345         }
346         this.current = locations.getRegionLocation().getRegionInfo();
347         if (this.current == null) {
348           LOG.warn("No serialized HRegionInfo in " + r);
349           return true;
350         }
351         if (!isInsideTable(this.current, tableName)) return false;
352         // Else call super and add this Result to the collection.
353         super.visit(r);
354         // Stop collecting regions from table after we get one.
355         return false;
356       }
357 
358       @Override
359       void add(Result r) {
360         // Add the current HRI.
361         this.results.add(this.current);
362       }
363     };
364     fullScan(hConnection, visitor, getTableStartRowForMeta(tableName));
365     // If visitor has results >= 1 then table exists.
366     return visitor.getResults().size() >= 1;
367   }
368 
369   /**
370    * Gets all of the regions of the specified table.
371    * @param zkw zookeeper connection to access meta table
372    * @param hConnection connection we're using
373    * @param tableName table we're looking for
374    * @return Ordered list of {@link HRegionInfo}.
375    * @throws IOException
376    */
377   public static List<HRegionInfo> getTableRegions(ZooKeeperWatcher zkw,
378       HConnection hConnection, TableName tableName)
379   throws IOException {
380     return getTableRegions(zkw, hConnection, tableName, false);
381   }
382 
383   /**
384    * Gets all of the regions of the specified table.
385    * @param zkw zookeeper connection to access meta table
386    * @param hConnection connection we're using
387    * @param tableName table we're looking for
388    * @param excludeOfflinedSplitParents If true, do not include offlined split
389    * parents in the return.
390    * @return Ordered list of {@link HRegionInfo}.
391    * @throws IOException
392    */
393   public static List<HRegionInfo> getTableRegions(ZooKeeperWatcher zkw,
394       HConnection hConnection, TableName tableName, final boolean excludeOfflinedSplitParents)
395         throws IOException {
396     List<Pair<HRegionInfo, ServerName>> result = null;
397     try {
398       result = getTableRegionsAndLocations(zkw, hConnection, tableName,
399         excludeOfflinedSplitParents);
400     } catch (InterruptedException e) {
401       throw (InterruptedIOException)new InterruptedIOException().initCause(e);
402     }
403     return getListOfHRegionInfos(result);
404   }
405 
406   static List<HRegionInfo> getListOfHRegionInfos(final List<Pair<HRegionInfo, ServerName>> pairs) {
407     if (pairs == null || pairs.isEmpty()) return null;
408     List<HRegionInfo> result = new ArrayList<HRegionInfo>(pairs.size());
409     for (Pair<HRegionInfo, ServerName> pair: pairs) {
410       result.add(pair.getFirst());
411     }
412     return result;
413   }
414 
415   /**
416    * @param current region of current table we're working with
417    * @param tableName table we're checking against
418    * @return True if <code>current</code> tablename is equal to
419    * <code>tableName</code>
420    */
421   static boolean isInsideTable(final HRegionInfo current, final TableName tableName) {
422     return tableName.equals(current.getTable());
423   }
424 
425   /**
426    * @param tableName table we're working with
427    * @return Place to start Scan in <code>hbase:meta</code> when passed a
428    * <code>tableName</code>; returns &lt;tableName&rt; &lt;,&rt; &lt;,&rt;
429    */
430   static byte [] getTableStartRowForMeta(TableName tableName) {
431     byte [] startRow = new byte[tableName.getName().length + 2];
432     System.arraycopy(tableName.getName(), 0, startRow, 0, tableName.getName().length);
433     startRow[startRow.length - 2] = HConstants.DELIMITER;
434     startRow[startRow.length - 1] = HConstants.DELIMITER;
435     return startRow;
436   }
437 
438   /**
439    * This method creates a Scan object that will only scan catalog rows that
440    * belong to the specified table. It doesn't specify any columns.
441    * This is a better alternative to just using a start row and scan until
442    * it hits a new table since that requires parsing the HRI to get the table
443    * name.
444    * @param tableName bytes of table's name
445    * @return configured Scan object
446    */
447   public static Scan getScanForTableName(TableName tableName) {
448     String strName = tableName.getNameAsString();
449     // Start key is just the table name with delimiters
450     byte[] startKey = Bytes.toBytes(strName + ",,");
451     // Stop key appends the smallest possible char to the table name
452     byte[] stopKey = Bytes.toBytes(strName + " ,,");
453 
454     Scan scan = new Scan(startKey);
455     scan.setStopRow(stopKey);
456     return scan;
457   }
458 
459   /**
460    * @param zkw zookeeper connection to access meta table
461    * @param hConnection connection we're using
462    * @param tableName table we're looking for
463    * @return Return list of regioninfos and server.
464    * @throws IOException
465    * @throws InterruptedException
466    */
467   public static List<Pair<HRegionInfo, ServerName>>
468   getTableRegionsAndLocations(ZooKeeperWatcher zkw,
469                               HConnection hConnection, TableName tableName)
470   throws IOException, InterruptedException {
471     return getTableRegionsAndLocations(zkw, hConnection, tableName, true);
472   }
473 
474   /**
475    * @param zkw ZooKeeperWatcher instance we're using to get hbase:meta location
476    * @param hConnection connection we're using
477    * @param tableName table to work with
478    * @return Return list of regioninfos and server addresses.
479    * @throws IOException
480    * @throws InterruptedException
481    */
482   public static List<Pair<HRegionInfo, ServerName>> getTableRegionsAndLocations(
483       ZooKeeperWatcher zkw, HConnection hConnection, final TableName tableName,
484       final boolean excludeOfflinedSplitParents) throws IOException, InterruptedException {
485 
486     if (tableName.equals(TableName.META_TABLE_NAME)) {
487       // If meta, do a bit of special handling.
488       ServerName serverName = new MetaTableLocator().getMetaRegionLocation(zkw);
489       List<Pair<HRegionInfo, ServerName>> list =
490         new ArrayList<Pair<HRegionInfo, ServerName>>();
491       list.add(new Pair<HRegionInfo, ServerName>(HRegionInfo.FIRST_META_REGIONINFO,
492         serverName));
493       return list;
494     }
495     // Make a version of CollectingVisitor that collects HRegionInfo and ServerAddress
496     CollectingVisitor<Pair<HRegionInfo, ServerName>> visitor =
497       new CollectingVisitor<Pair<HRegionInfo, ServerName>>() {
498         private RegionLocations current = null;
499 
500         @Override
501         public boolean visit(Result r) throws IOException {
502           current = getRegionLocations(r);
503           if (current == null || current.getRegionLocation().getRegionInfo() == null) {
504             LOG.warn("No serialized HRegionInfo in " + r);
505             return true;
506           }
507           HRegionInfo hri = current.getRegionLocation().getRegionInfo();
508           if (!isInsideTable(hri, tableName)) return false;
509           if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
510           // Else call super and add this Result to the collection.
511           return super.visit(r);
512         }
513 
514         @Override
515         void add(Result r) {
516           if (current == null) {
517             return;
518           }
519           for (HRegionLocation loc : current.getRegionLocations()) {
520             if (loc != null) {
521               this.results.add(new Pair<HRegionInfo, ServerName>(
522                 loc.getRegionInfo(), loc.getServerName()));
523             }
524           }
525         }
526       };
527     fullScan(hConnection, visitor, getTableStartRowForMeta(tableName));
528     return visitor.getResults();
529   }
530 
531   /**
532    * @param hConnection connection we're using
533    * @param serverName server whose regions we're interested in
534    * @return List of user regions installed on this server (does not include
535    * catalog regions).
536    * @throws IOException
537    */
538   public static NavigableMap<HRegionInfo, Result>
539   getServerUserRegions(HConnection hConnection, final ServerName serverName)
540     throws IOException {
541     final NavigableMap<HRegionInfo, Result> hris = new TreeMap<HRegionInfo, Result>();
542     // Fill the above hris map with entries from hbase:meta that have the passed
543     // servername.
544     CollectingVisitor<Result> v = new CollectingVisitor<Result>() {
545       @Override
546       void add(Result r) {
547         if (r == null || r.isEmpty()) return;
548         RegionLocations locations = getRegionLocations(r);
549         if (locations == null) return;
550         for (HRegionLocation loc : locations.getRegionLocations()) {
551           if (loc != null) {
552             if (loc.getServerName() != null && loc.getServerName().equals(serverName)) {
553               hris.put(loc.getRegionInfo(), r);
554             }
555           }
556         }
557       }
558     };
559     fullScan(hConnection, v);
560     return hris;
561   }
562 
563   public static void fullScanMetaAndPrint(HConnection hConnection)
564     throws IOException {
565     Visitor v = new Visitor() {
566       @Override
567       public boolean visit(Result r) throws IOException {
568         if (r ==  null || r.isEmpty()) return true;
569         LOG.info("fullScanMetaAndPrint.Current Meta Row: " + r);
570         RegionLocations locations = getRegionLocations(r);
571         if (locations == null) return true;
572         for (HRegionLocation loc : locations.getRegionLocations()) {
573           if (loc != null) {
574             LOG.info("fullScanMetaAndPrint.HRI Print= " + loc.getRegionInfo());
575           }
576         }
577         return true;
578       }
579     };
580     fullScan(hConnection, v);
581   }
582 
583   /**
584    * Performs a full scan of a catalog table.
585    * @param hConnection connection we're using
586    * @param visitor Visitor invoked against each row.
587    * @param startrow Where to start the scan. Pass null if want to begin scan
588    * at first row.
589    * <code>hbase:meta</code>, the default (pass false to scan hbase:meta)
590    * @throws IOException
591    */
592   public static void fullScan(HConnection hConnection,
593     final Visitor visitor, final byte [] startrow)
594   throws IOException {
595     Scan scan = new Scan();
596     if (startrow != null) scan.setStartRow(startrow);
597     if (startrow == null) {
598       int caching = hConnection.getConfiguration()
599           .getInt(HConstants.HBASE_META_SCANNER_CACHING, 100);
600       scan.setCaching(caching);
601     }
602     scan.addFamily(HConstants.CATALOG_FAMILY);
603     Table metaTable = getMetaHTable(hConnection);
604     ResultScanner scanner = null;
605     try {
606       scanner = metaTable.getScanner(scan);
607       Result data;
608       while((data = scanner.next()) != null) {
609         if (data.isEmpty()) continue;
610         // Break if visit returns false.
611         if (!visitor.visit(data)) break;
612       }
613     } finally {
614       if (scanner != null) scanner.close();
615       metaTable.close();
616     }
617   }
618 
619   /**
620    * Returns the column family used for meta columns.
621    * @return HConstants.CATALOG_FAMILY.
622    */
623   protected static byte[] getFamily() {
624     return HConstants.CATALOG_FAMILY;
625   }
626 
627   /**
628    * Returns the column qualifier for serialized region info
629    * @return HConstants.REGIONINFO_QUALIFIER
630    */
631   protected static byte[] getRegionInfoColumn() {
632     return HConstants.REGIONINFO_QUALIFIER;
633   }
634 
635   /**
636    * Returns the column qualifier for server column for replicaId
637    * @param replicaId the replicaId of the region
638    * @return a byte[] for server column qualifier
639    */
640   @VisibleForTesting
641   public static byte[] getServerColumn(int replicaId) {
642     return replicaId == 0
643       ? HConstants.SERVER_QUALIFIER
644       : Bytes.toBytes(HConstants.SERVER_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
645       + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
646   }
647 
648   /**
649    * Returns the column qualifier for server start code column for replicaId
650    * @param replicaId the replicaId of the region
651    * @return a byte[] for server start code column qualifier
652    */
653   @VisibleForTesting
654   public static byte[] getStartCodeColumn(int replicaId) {
655     return replicaId == 0
656       ? HConstants.STARTCODE_QUALIFIER
657       : Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
658       + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
659   }
660 
661   /**
662    * Returns the column qualifier for seqNum column for replicaId
663    * @param replicaId the replicaId of the region
664    * @return a byte[] for seqNum column qualifier
665    */
666   @VisibleForTesting
667   public static byte[] getSeqNumColumn(int replicaId) {
668     return replicaId == 0
669       ? HConstants.SEQNUM_QUALIFIER
670       : Bytes.toBytes(HConstants.SEQNUM_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
671       + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
672   }
673 
674   /**
675    * Parses the replicaId from the server column qualifier. See top of the class javadoc
676    * for the actual meta layout
677    * @param serverColumn the column qualifier
678    * @return an int for the replicaId
679    */
680   @VisibleForTesting
681   static int parseReplicaIdFromServerColumn(byte[] serverColumn) {
682     String serverStr = Bytes.toString(serverColumn);
683 
684     Matcher matcher = SERVER_COLUMN_PATTERN.matcher(serverStr);
685     if (matcher.matches() && matcher.groupCount() > 0) {
686       String group = matcher.group(1);
687       if (group != null && group.length() > 0) {
688         return Integer.parseInt(group.substring(1), 16);
689       } else {
690         return 0;
691       }
692     }
693     return -1;
694   }
695 
696   /**
697    * Returns a {@link ServerName} from catalog table {@link Result}.
698    * @param r Result to pull from
699    * @return A ServerName instance or null if necessary fields not found or empty.
700    */
701   private static ServerName getServerName(final Result r, final int replicaId) {
702     byte[] serverColumn = getServerColumn(replicaId);
703     Cell cell = r.getColumnLatestCell(getFamily(), serverColumn);
704     if (cell == null || cell.getValueLength() == 0) return null;
705     String hostAndPort = Bytes.toString(
706       cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
707     byte[] startcodeColumn = getStartCodeColumn(replicaId);
708     cell = r.getColumnLatestCell(getFamily(), startcodeColumn);
709     if (cell == null || cell.getValueLength() == 0) return null;
710     return ServerName.valueOf(hostAndPort,
711       Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
712   }
713 
714   /**
715    * The latest seqnum that the server writing to meta observed when opening the region.
716    * E.g. the seqNum when the result of {@link #getServerName(Result, int)} was written.
717    * @param r Result to pull the seqNum from
718    * @return SeqNum, or HConstants.NO_SEQNUM if there's no value written.
719    */
720   private static long getSeqNumDuringOpen(final Result r, final int replicaId) {
721     Cell cell = r.getColumnLatestCell(getFamily(), getSeqNumColumn(replicaId));
722     if (cell == null || cell.getValueLength() == 0) return HConstants.NO_SEQNUM;
723     return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
724   }
725 
726   /**
727    * Returns an HRegionLocationList extracted from the result.
728    * @return an HRegionLocationList containing all locations for the region range or null if
729    *  we can't deserialize the result.
730    */
731   public static RegionLocations getRegionLocations(final Result r) {
732     if (r == null) return null;
733     HRegionInfo regionInfo = getHRegionInfo(r, getRegionInfoColumn());
734     if (regionInfo == null) return null;
735 
736     List<HRegionLocation> locations = new ArrayList<HRegionLocation>(1);
737     NavigableMap<byte[],NavigableMap<byte[],byte[]>> familyMap = r.getNoVersionMap();
738 
739     locations.add(getRegionLocation(r, regionInfo, 0));
740 
741     NavigableMap<byte[], byte[]> infoMap = familyMap.get(getFamily());
742     if (infoMap == null) return new RegionLocations(locations);
743 
744     // iterate until all serverName columns are seen
745     int replicaId = 0;
746     byte[] serverColumn = getServerColumn(replicaId);
747     SortedMap<byte[], byte[]> serverMap = infoMap.tailMap(serverColumn, false);
748     if (serverMap.isEmpty()) return new RegionLocations(locations);
749 
750     for (Map.Entry<byte[], byte[]> entry : serverMap.entrySet()) {
751       replicaId = parseReplicaIdFromServerColumn(entry.getKey());
752       if (replicaId < 0) {
753         break;
754       }
755 
756       locations.add(getRegionLocation(r, regionInfo, replicaId));
757     }
758 
759     return new RegionLocations(locations);
760   }
761 
762   /**
763    * Returns the HRegionLocation parsed from the given meta row Result
764    * for the given regionInfo and replicaId. The regionInfo can be the default region info
765    * for the replica.
766    * @param r the meta row result
767    * @param regionInfo RegionInfo for default replica
768    * @param replicaId the replicaId for the HRegionLocation
769    * @return HRegionLocation parsed from the given meta row Result for the given replicaId
770    */
771   private static HRegionLocation getRegionLocation(final Result r, final HRegionInfo regionInfo,
772                                                    final int replicaId) {
773     ServerName serverName = getServerName(r, replicaId);
774     long seqNum = getSeqNumDuringOpen(r, replicaId);
775     HRegionInfo replicaInfo = RegionReplicaUtil.getRegionInfoForReplica(regionInfo, replicaId);
776     return new HRegionLocation(replicaInfo, serverName, seqNum);
777   }
778 
779   /**
780    * Returns HRegionInfo object from the column
781    * HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog
782    * table Result.
783    * @param data a Result object from the catalog table scan
784    * @return HRegionInfo or null
785    */
786   public static HRegionInfo getHRegionInfo(Result data) {
787     return getHRegionInfo(data, HConstants.REGIONINFO_QUALIFIER);
788   }
789 
790   /**
791    * Returns the HRegionInfo object from the column {@link HConstants#CATALOG_FAMILY} and
792    * <code>qualifier</code> of the catalog table result.
793    * @param r a Result object from the catalog table scan
794    * @param qualifier Column family qualifier
795    * @return An HRegionInfo instance or null.
796    */
797   private static HRegionInfo getHRegionInfo(final Result r, byte [] qualifier) {
798     Cell cell = r.getColumnLatestCell(getFamily(), qualifier);
799     if (cell == null) return null;
800     return HRegionInfo.parseFromOrNull(cell.getValueArray(),
801       cell.getValueOffset(), cell.getValueLength());
802   }
803 
804   /**
805    * Returns the daughter regions by reading the corresponding columns of the catalog table
806    * Result.
807    * @param data a Result object from the catalog table scan
808    * @return a pair of HRegionInfo or PairOfSameType(null, null) if the region is not a split
809    * parent
810    */
811   public static PairOfSameType<HRegionInfo> getDaughterRegions(Result data) throws IOException {
812     HRegionInfo splitA = getHRegionInfo(data, HConstants.SPLITA_QUALIFIER);
813     HRegionInfo splitB = getHRegionInfo(data, HConstants.SPLITB_QUALIFIER);
814 
815     return new PairOfSameType<HRegionInfo>(splitA, splitB);
816   }
817 
818   /**
819    * Returns the merge regions by reading the corresponding columns of the catalog table
820    * Result.
821    * @param data a Result object from the catalog table scan
822    * @return a pair of HRegionInfo or PairOfSameType(null, null) if the region is not a split
823    * parent
824    */
825   public static PairOfSameType<HRegionInfo> getMergeRegions(Result data) throws IOException {
826     HRegionInfo mergeA = getHRegionInfo(data, HConstants.MERGEA_QUALIFIER);
827     HRegionInfo mergeB = getHRegionInfo(data, HConstants.MERGEB_QUALIFIER);
828 
829     return new PairOfSameType<HRegionInfo>(mergeA, mergeB);
830   }
831 
832   /**
833    * Implementations 'visit' a catalog table row.
834    */
835   public interface Visitor {
836     /**
837      * Visit the catalog table row.
838      * @param r A row from catalog table
839      * @return True if we are to proceed scanning the table, else false if
840      * we are to stop now.
841      */
842     boolean visit(final Result r) throws IOException;
843   }
844 
845   /**
846    * A {@link Visitor} that collects content out of passed {@link Result}.
847    */
848   static abstract class CollectingVisitor<T> implements Visitor {
849     final List<T> results = new ArrayList<T>();
850     @Override
851     public boolean visit(Result r) throws IOException {
852       if (r ==  null || r.isEmpty()) return true;
853       add(r);
854       return true;
855     }
856 
857     abstract void add(Result r);
858 
859     /**
860      * @return Collected results; wait till visits complete to collect all
861      * possible results
862      */
863     List<T> getResults() {
864       return this.results;
865     }
866   }
867 
868   /**
869    * Collects all returned.
870    */
871   static class CollectAllVisitor extends CollectingVisitor<Result> {
872     @Override
873     void add(Result r) {
874       this.results.add(r);
875     }
876   }
877 
878   /**
879    * Count regions in <code>hbase:meta</code> for passed table.
880    * @param c Configuration object
881    * @param tableName table name to count regions for
882    * @return Count or regions in table <code>tableName</code>
883    * @throws IOException
884    */
885   public static int getRegionCount(final Configuration c, final String tableName)
886       throws IOException {
887     HTable t = new HTable(c, tableName);
888     try {
889       return t.getRegionLocations().size();
890     } finally {
891       t.close();
892     }
893   }
894 
895   ////////////////////////
896   // Editing operations //
897   ////////////////////////
898 
899   /**
900    * Generates and returns a Put containing the region into for the catalog table
901    */
902   public static Put makePutFromRegionInfo(HRegionInfo regionInfo)
903     throws IOException {
904     Put put = new Put(regionInfo.getRegionName());
905     addRegionInfo(put, regionInfo);
906     return put;
907   }
908 
909   /**
910    * Generates and returns a Delete containing the region info for the catalog
911    * table
912    */
913   public static Delete makeDeleteFromRegionInfo(HRegionInfo regionInfo) {
914     if (regionInfo == null) {
915       throw new IllegalArgumentException("Can't make a delete for null region");
916     }
917     Delete delete = new Delete(regionInfo.getRegionName());
918     return delete;
919   }
920 
921   /**
922    * Adds split daughters to the Put
923    */
924   public static Put addDaughtersToPut(Put put, HRegionInfo splitA, HRegionInfo splitB) {
925     if (splitA != null) {
926       put.addImmutable(
927         HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, splitA.toByteArray());
928     }
929     if (splitB != null) {
930       put.addImmutable(
931         HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, splitB.toByteArray());
932     }
933     return put;
934   }
935 
936   /**
937    * Put the passed <code>p</code> to the <code>hbase:meta</code> table.
938    * @param hConnection connection we're using
939    * @param p Put to add to hbase:meta
940    * @throws IOException
941    */
942   static void putToMetaTable(final HConnection hConnection, final Put p)
943     throws IOException {
944     put(getMetaHTable(hConnection), p);
945   }
946 
947   /**
948    * @param t Table to use (will be closed when done).
949    * @param p put to make
950    * @throws IOException
951    */
952   private static void put(final Table t, final Put p) throws IOException {
953     try {
954       t.put(p);
955     } finally {
956       t.close();
957     }
958   }
959 
960   /**
961    * Put the passed <code>ps</code> to the <code>hbase:meta</code> table.
962    * @param hConnection connection we're using
963    * @param ps Put to add to hbase:meta
964    * @throws IOException
965    */
966   public static void putsToMetaTable(final HConnection hConnection, final List<Put> ps)
967     throws IOException {
968     Table t = getMetaHTable(hConnection);
969     try {
970       t.put(ps);
971     } finally {
972       t.close();
973     }
974   }
975 
976   /**
977    * Delete the passed <code>d</code> from the <code>hbase:meta</code> table.
978    * @param hConnection connection we're using
979    * @param d Delete to add to hbase:meta
980    * @throws IOException
981    */
982   static void deleteFromMetaTable(final HConnection hConnection, final Delete d)
983     throws IOException {
984     List<Delete> dels = new ArrayList<Delete>(1);
985     dels.add(d);
986     deleteFromMetaTable(hConnection, dels);
987   }
988 
989   /**
990    * Delete the passed <code>deletes</code> from the <code>hbase:meta</code> table.
991    * @param hConnection connection we're using
992    * @param deletes Deletes to add to hbase:meta  This list should support #remove.
993    * @throws IOException
994    */
995   public static void deleteFromMetaTable(final HConnection hConnection, final List<Delete> deletes)
996     throws IOException {
997     Table t = getMetaHTable(hConnection);
998     try {
999       t.delete(deletes);
1000     } finally {
1001       t.close();
1002     }
1003   }
1004 
1005   /**
1006    * Deletes some replica columns corresponding to replicas for the passed rows
1007    * @param metaRows rows in hbase:meta
1008    * @param replicaIndexToDeleteFrom the replica ID we would start deleting from
1009    * @param numReplicasToRemove how many replicas to remove
1010    * @param hConnection connection we're using to access meta table
1011    * @throws IOException
1012    */
1013   public static void removeRegionReplicasFromMeta(Set<byte[]> metaRows,
1014     int replicaIndexToDeleteFrom, int numReplicasToRemove, HConnection hConnection)
1015       throws IOException {
1016     int absoluteIndex = replicaIndexToDeleteFrom + numReplicasToRemove;
1017     for (byte[] row : metaRows) {
1018       Delete deleteReplicaLocations = new Delete(row);
1019       for (int i = replicaIndexToDeleteFrom; i < absoluteIndex; i++) {
1020         deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
1021           getServerColumn(i));
1022         deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
1023           getSeqNumColumn(i));
1024         deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
1025           getStartCodeColumn(i));
1026       }
1027       deleteFromMetaTable(hConnection, deleteReplicaLocations);
1028     }
1029   }
1030 
1031   /**
1032    * Execute the passed <code>mutations</code> against <code>hbase:meta</code> table.
1033    * @param hConnection connection we're using
1034    * @param mutations Puts and Deletes to execute on hbase:meta
1035    * @throws IOException
1036    */
1037   public static void mutateMetaTable(final HConnection hConnection,
1038                                      final List<Mutation> mutations)
1039     throws IOException {
1040     Table t = getMetaHTable(hConnection);
1041     try {
1042       t.batch(mutations);
1043     } catch (InterruptedException e) {
1044       InterruptedIOException ie = new InterruptedIOException(e.getMessage());
1045       ie.initCause(e);
1046       throw ie;
1047     } finally {
1048       t.close();
1049     }
1050   }
1051 
1052   /**
1053    * Adds a hbase:meta row for the specified new region.
1054    * @param hConnection connection we're using
1055    * @param regionInfo region information
1056    * @throws IOException if problem connecting or updating meta
1057    */
1058   public static void addRegionToMeta(HConnection hConnection,
1059                                      HRegionInfo regionInfo)
1060     throws IOException {
1061     putToMetaTable(hConnection, makePutFromRegionInfo(regionInfo));
1062     LOG.info("Added " + regionInfo.getRegionNameAsString());
1063   }
1064 
1065   /**
1066    * Adds a hbase:meta row for the specified new region to the given catalog table. The
1067    * HTable is not flushed or closed.
1068    * @param meta the HTable for META
1069    * @param regionInfo region information
1070    * @throws IOException if problem connecting or updating meta
1071    */
1072   public static void addRegionToMeta(Table meta, HRegionInfo regionInfo) throws IOException {
1073     addRegionToMeta(meta, regionInfo, null, null);
1074   }
1075 
1076   /**
1077    * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this
1078    * does not add its daughter's as different rows, but adds information about the daughters
1079    * in the same row as the parent. Use
1080    * {@link #splitRegion(org.apache.hadoop.hbase.client.HConnection,
1081    *   HRegionInfo, HRegionInfo, HRegionInfo, ServerName)}
1082    * if you want to do that.
1083    * @param meta the HTable for META
1084    * @param regionInfo region information
1085    * @param splitA first split daughter of the parent regionInfo
1086    * @param splitB second split daughter of the parent regionInfo
1087    * @throws IOException if problem connecting or updating meta
1088    */
1089   public static void addRegionToMeta(Table meta, HRegionInfo regionInfo,
1090                                      HRegionInfo splitA, HRegionInfo splitB) throws IOException {
1091     Put put = makePutFromRegionInfo(regionInfo);
1092     addDaughtersToPut(put, splitA, splitB);
1093     meta.put(put);
1094     if (LOG.isDebugEnabled()) {
1095       LOG.debug("Added " + regionInfo.getRegionNameAsString());
1096     }
1097   }
1098 
1099   /**
1100    * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this
1101    * does not add its daughter's as different rows, but adds information about the daughters
1102    * in the same row as the parent. Use
1103    * {@link #splitRegion(HConnection, HRegionInfo, HRegionInfo, HRegionInfo, ServerName)}
1104    * if you want to do that.
1105    * @param hConnection connection we're using
1106    * @param regionInfo region information
1107    * @param splitA first split daughter of the parent regionInfo
1108    * @param splitB second split daughter of the parent regionInfo
1109    * @throws IOException if problem connecting or updating meta
1110    */
1111   public static void addRegionToMeta(HConnection hConnection, HRegionInfo regionInfo,
1112                                      HRegionInfo splitA, HRegionInfo splitB) throws IOException {
1113     Table meta = getMetaHTable(hConnection);
1114     try {
1115       addRegionToMeta(meta, regionInfo, splitA, splitB);
1116     } finally {
1117       meta.close();
1118     }
1119   }
1120 
1121   /**
1122    * Adds a hbase:meta row for each of the specified new regions.
1123    * @param hConnection connection we're using
1124    * @param regionInfos region information list
1125    * @throws IOException if problem connecting or updating meta
1126    */
1127   public static void addRegionsToMeta(HConnection hConnection,
1128                                       List<HRegionInfo> regionInfos)
1129     throws IOException {
1130     List<Put> puts = new ArrayList<Put>();
1131     for (HRegionInfo regionInfo : regionInfos) {
1132       if (RegionReplicaUtil.isDefaultReplica(regionInfo)) {
1133         puts.add(makePutFromRegionInfo(regionInfo));
1134       }
1135     }
1136     putsToMetaTable(hConnection, puts);
1137     LOG.info("Added " + puts.size());
1138   }
1139 
1140   /**
1141    * Adds a daughter region entry to meta.
1142    * @param regionInfo the region to put
1143    * @param sn the location of the region
1144    * @param openSeqNum the latest sequence number obtained when the region was open
1145    */
1146   public static void addDaughter(final HConnection hConnection,
1147       final HRegionInfo regionInfo, final ServerName sn, final long openSeqNum)
1148       throws NotAllMetaRegionsOnlineException, IOException {
1149     Put put = new Put(regionInfo.getRegionName());
1150     addRegionInfo(put, regionInfo);
1151     if (sn != null) {
1152       addLocation(put, sn, openSeqNum, regionInfo.getReplicaId());
1153     }
1154     putToMetaTable(hConnection, put);
1155     LOG.info("Added daughter " + regionInfo.getEncodedName() +
1156       (sn == null? ", serverName=null": ", serverName=" + sn.toString()));
1157   }
1158 
1159   /**
1160    * Merge the two regions into one in an atomic operation. Deletes the two
1161    * merging regions in hbase:meta and adds the merged region with the information of
1162    * two merging regions.
1163    * @param hConnection connection we're using
1164    * @param mergedRegion the merged region
1165    * @param regionA
1166    * @param regionB
1167    * @param sn the location of the region
1168    * @throws IOException
1169    */
1170   public static void mergeRegions(final HConnection hConnection, HRegionInfo mergedRegion,
1171       HRegionInfo regionA, HRegionInfo regionB, ServerName sn) throws IOException {
1172     Table meta = getMetaHTable(hConnection);
1173     try {
1174       HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);
1175 
1176       // Put for parent
1177       Put putOfMerged = makePutFromRegionInfo(copyOfMerged);
1178       putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER,
1179         regionA.toByteArray());
1180       putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER,
1181         regionB.toByteArray());
1182 
1183       // Deletes for merging regions
1184       Delete deleteA = makeDeleteFromRegionInfo(regionA);
1185       Delete deleteB = makeDeleteFromRegionInfo(regionB);
1186 
1187       // The merged is a new region, openSeqNum = 1 is fine.
1188       addLocation(putOfMerged, sn, 1, mergedRegion.getReplicaId());
1189 
1190       byte[] tableRow = Bytes.toBytes(mergedRegion.getRegionNameAsString()
1191         + HConstants.DELIMITER);
1192       multiMutate(meta, tableRow, putOfMerged, deleteA, deleteB);
1193     } finally {
1194       meta.close();
1195     }
1196   }
1197 
1198   /**
1199    * Splits the region into two in an atomic operation. Offlines the parent
1200    * region with the information that it is split into two, and also adds
1201    * the daughter regions. Does not add the location information to the daughter
1202    * regions since they are not open yet.
1203    * @param hConnection connection we're using
1204    * @param parent the parent region which is split
1205    * @param splitA Split daughter region A
1206    * @param splitB Split daughter region A
1207    * @param sn the location of the region
1208    */
1209   public static void splitRegion(final HConnection hConnection,
1210                                  HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
1211                                  ServerName sn) throws IOException {
1212     Table meta = getMetaHTable(hConnection);
1213     try {
1214       HRegionInfo copyOfParent = new HRegionInfo(parent);
1215       copyOfParent.setOffline(true);
1216       copyOfParent.setSplit(true);
1217 
1218       //Put for parent
1219       Put putParent = makePutFromRegionInfo(copyOfParent);
1220       addDaughtersToPut(putParent, splitA, splitB);
1221 
1222       //Puts for daughters
1223       Put putA = makePutFromRegionInfo(splitA);
1224       Put putB = makePutFromRegionInfo(splitB);
1225 
1226       addLocation(putA, sn, 1, splitA.getReplicaId()); //new regions, openSeqNum = 1 is fine.
1227       addLocation(putB, sn, 1, splitB.getReplicaId());
1228 
1229       byte[] tableRow = Bytes.toBytes(parent.getRegionNameAsString() + HConstants.DELIMITER);
1230       multiMutate(meta, tableRow, putParent, putA, putB);
1231     } finally {
1232       meta.close();
1233     }
1234   }
1235 
1236   /**
1237    * Performs an atomic multi-Mutate operation against the given table.
1238    */
1239   private static void multiMutate(Table table, byte[] row, Mutation... mutations)
1240       throws IOException {
1241     CoprocessorRpcChannel channel = table.coprocessorService(row);
1242     MultiRowMutationProtos.MutateRowsRequest.Builder mmrBuilder
1243       = MultiRowMutationProtos.MutateRowsRequest.newBuilder();
1244     for (Mutation mutation : mutations) {
1245       if (mutation instanceof Put) {
1246         mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(
1247           ClientProtos.MutationProto.MutationType.PUT, mutation));
1248       } else if (mutation instanceof Delete) {
1249         mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(
1250           ClientProtos.MutationProto.MutationType.DELETE, mutation));
1251       } else {
1252         throw new DoNotRetryIOException("multi in MetaEditor doesn't support "
1253           + mutation.getClass().getName());
1254       }
1255     }
1256 
1257     MultiRowMutationProtos.MultiRowMutationService.BlockingInterface service =
1258       MultiRowMutationProtos.MultiRowMutationService.newBlockingStub(channel);
1259     try {
1260       service.mutateRows(null, mmrBuilder.build());
1261     } catch (ServiceException ex) {
1262       ProtobufUtil.toIOException(ex);
1263     }
1264   }
1265 
1266   /**
1267    * Updates the location of the specified region in hbase:meta to be the specified
1268    * server hostname and startcode.
1269    * <p>
1270    * Uses passed catalog tracker to get a connection to the server hosting
1271    * hbase:meta and makes edits to that region.
1272    *
1273    * @param hConnection connection we're using
1274    * @param regionInfo region to update location of
1275    * @param sn Server name
1276    * @throws IOException
1277    */
1278   public static void updateRegionLocation(HConnection hConnection,
1279                                           HRegionInfo regionInfo, ServerName sn, long updateSeqNum)
1280     throws IOException {
1281     updateLocation(hConnection, regionInfo, sn, updateSeqNum);
1282   }
1283 
1284   /**
1285    * Updates the location of the specified region to be the specified server.
1286    * <p>
1287    * Connects to the specified server which should be hosting the specified
1288    * catalog region name to perform the edit.
1289    *
1290    * @param hConnection connection we're using
1291    * @param regionInfo region to update location of
1292    * @param sn Server name
1293    * @param openSeqNum the latest sequence number obtained when the region was open
1294    * @throws IOException In particular could throw {@link java.net.ConnectException}
1295    * if the server is down on other end.
1296    */
1297   private static void updateLocation(final HConnection hConnection,
1298                                      HRegionInfo regionInfo, ServerName sn, long openSeqNum)
1299     throws IOException {
1300     // region replicas are kept in the primary region's row
1301     Put put = new Put(getMetaKeyForRegion(regionInfo));
1302     addLocation(put, sn, openSeqNum, regionInfo.getReplicaId());
1303     putToMetaTable(hConnection, put);
1304     LOG.info("Updated row " + regionInfo.getRegionNameAsString() +
1305       " with server=" + sn);
1306   }
1307 
1308   /**
1309    * Deletes the specified region from META.
1310    * @param hConnection connection we're using
1311    * @param regionInfo region to be deleted from META
1312    * @throws IOException
1313    */
1314   public static void deleteRegion(HConnection hConnection,
1315                                   HRegionInfo regionInfo)
1316     throws IOException {
1317     Delete delete = new Delete(regionInfo.getRegionName());
1318     deleteFromMetaTable(hConnection, delete);
1319     LOG.info("Deleted " + regionInfo.getRegionNameAsString());
1320   }
1321 
1322   /**
1323    * Deletes the specified regions from META.
1324    * @param hConnection connection we're using
1325    * @param regionsInfo list of regions to be deleted from META
1326    * @throws IOException
1327    */
1328   public static void deleteRegions(HConnection hConnection,
1329                                    List<HRegionInfo> regionsInfo) throws IOException {
1330     List<Delete> deletes = new ArrayList<Delete>(regionsInfo.size());
1331     for (HRegionInfo hri: regionsInfo) {
1332       deletes.add(new Delete(hri.getRegionName()));
1333     }
1334     deleteFromMetaTable(hConnection, deletes);
1335     LOG.info("Deleted " + regionsInfo);
1336   }
1337 
1338   /**
1339    * Adds and Removes the specified regions from hbase:meta
1340    * @param hConnection connection we're using
1341    * @param regionsToRemove list of regions to be deleted from META
1342    * @param regionsToAdd list of regions to be added to META
1343    * @throws IOException
1344    */
1345   public static void mutateRegions(HConnection hConnection,
1346                                    final List<HRegionInfo> regionsToRemove,
1347                                    final List<HRegionInfo> regionsToAdd)
1348     throws IOException {
1349     List<Mutation> mutation = new ArrayList<Mutation>();
1350     if (regionsToRemove != null) {
1351       for (HRegionInfo hri: regionsToRemove) {
1352         mutation.add(new Delete(hri.getRegionName()));
1353       }
1354     }
1355     if (regionsToAdd != null) {
1356       for (HRegionInfo hri: regionsToAdd) {
1357         mutation.add(makePutFromRegionInfo(hri));
1358       }
1359     }
1360     mutateMetaTable(hConnection, mutation);
1361     if (regionsToRemove != null && regionsToRemove.size() > 0) {
1362       LOG.debug("Deleted " + regionsToRemove);
1363     }
1364     if (regionsToAdd != null && regionsToAdd.size() > 0) {
1365       LOG.debug("Added " + regionsToAdd);
1366     }
1367   }
1368 
1369   /**
1370    * Overwrites the specified regions from hbase:meta
1371    * @param hConnection connection we're using
1372    * @param regionInfos list of regions to be added to META
1373    * @throws IOException
1374    */
1375   public static void overwriteRegions(HConnection hConnection,
1376                                       List<HRegionInfo> regionInfos) throws IOException {
1377     deleteRegions(hConnection, regionInfos);
1378     // Why sleep? This is the easiest way to ensure that the previous deletes does not
1379     // eclipse the following puts, that might happen in the same ts from the server.
1380     // See HBASE-9906, and HBASE-9879. Once either HBASE-9879, HBASE-8770 is fixed,
1381     // or HBASE-9905 is fixed and meta uses seqIds, we do not need the sleep.
1382     Threads.sleep(20);
1383     addRegionsToMeta(hConnection, regionInfos);
1384     LOG.info("Overwritten " + regionInfos);
1385   }
1386 
1387   /**
1388    * Deletes merge qualifiers for the specified merged region.
1389    * @param hConnection connection we're using
1390    * @param mergedRegion
1391    * @throws IOException
1392    */
1393   public static void deleteMergeQualifiers(HConnection hConnection,
1394                                            final HRegionInfo mergedRegion) throws IOException {
1395     Delete delete = new Delete(mergedRegion.getRegionName());
1396     delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER);
1397     delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER);
1398     deleteFromMetaTable(hConnection, delete);
1399     LOG.info("Deleted references in merged region "
1400       + mergedRegion.getRegionNameAsString() + ", qualifier="
1401       + Bytes.toStringBinary(HConstants.MERGEA_QUALIFIER) + " and qualifier="
1402       + Bytes.toStringBinary(HConstants.MERGEB_QUALIFIER));
1403   }
1404 
1405   private static Put addRegionInfo(final Put p, final HRegionInfo hri)
1406     throws IOException {
1407     p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
1408       hri.toByteArray());
1409     return p;
1410   }
1411 
1412   public static Put addLocation(final Put p, final ServerName sn, long openSeqNum, int replicaId){
1413     // using regionserver's local time as the timestamp of Put.
1414     // See: HBASE-11536
1415     long now = EnvironmentEdgeManager.currentTime();
1416     p.addImmutable(HConstants.CATALOG_FAMILY, getServerColumn(replicaId), now,
1417       Bytes.toBytes(sn.getHostAndPort()));
1418     p.addImmutable(HConstants.CATALOG_FAMILY, getStartCodeColumn(replicaId), now,
1419       Bytes.toBytes(sn.getStartcode()));
1420     p.addImmutable(HConstants.CATALOG_FAMILY, getSeqNumColumn(replicaId), now,
1421       Bytes.toBytes(openSeqNum));
1422     return p;
1423   }
1424 }