View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.snapshot;
20  
21  import java.io.InputStream;
22  import java.io.IOException;
23  import java.io.OutputStream;
24  import java.util.Arrays;
25  import java.util.HashMap;
26  import java.util.HashSet;
27  import java.util.LinkedList;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.Set;
31  import java.util.TreeMap;
32  
33  import org.apache.commons.logging.Log;
34  import org.apache.commons.logging.LogFactory;
35  import org.apache.hadoop.classification.InterfaceAudience;
36  import org.apache.hadoop.conf.Configuration;
37  import org.apache.hadoop.fs.FileStatus;
38  import org.apache.hadoop.fs.FileSystem;
39  import org.apache.hadoop.fs.Path;
40  import org.apache.hadoop.hbase.HColumnDescriptor;
41  import org.apache.hadoop.hbase.HRegionInfo;
42  import org.apache.hadoop.hbase.HTableDescriptor;
43  import org.apache.hadoop.hbase.backup.HFileArchiver;
44  import org.apache.hadoop.hbase.catalog.CatalogTracker;
45  import org.apache.hadoop.hbase.catalog.MetaEditor;
46  import org.apache.hadoop.hbase.monitoring.MonitoredTask;
47  import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
48  import org.apache.hadoop.hbase.io.HFileLink;
49  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
50  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
51  import org.apache.hadoop.hbase.regionserver.HRegion;
52  import org.apache.hadoop.hbase.regionserver.StoreFile;
53  import org.apache.hadoop.hbase.util.Bytes;
54  import org.apache.hadoop.hbase.util.FSUtils;
55  import org.apache.hadoop.hbase.util.FSVisitor;
56  import org.apache.hadoop.hbase.util.ModifyRegionUtils;
57  import org.apache.hadoop.hbase.util.Pair;
58  import org.apache.hadoop.io.IOUtils;
59  
60  /**
61   * Helper to Restore/Clone a Snapshot
62   *
63   * <p>The helper assumes that a table is already created, and by calling restore()
64   * the content present in the snapshot will be restored as the new content of the table.
65   *
66   * <p>Clone from Snapshot: If the target table is empty, the restore operation
67   * is just a "clone operation", where the only operations are:
68   * <ul>
69   *  <li>for each region in the snapshot create a new region
70   *    (note that the region will have a different name, since the encoding contains the table name)
71   *  <li>for each file in the region create a new HFileLink to point to the original file.
72   *  <li>restore the logs, if any
73   * </ul>
74   *
75   * <p>Restore from Snapshot:
76   * <ul>
77   *  <li>for each region in the table verify which are available in the snapshot and which are not
78   *    <ul>
79   *    <li>if the region is not present in the snapshot, remove it.
80   *    <li>if the region is present in the snapshot
81   *      <ul>
82   *      <li>for each file in the table region verify which are available in the snapshot
83   *        <ul>
84   *          <li>if the hfile is not present in the snapshot, remove it
85   *          <li>if the hfile is present, keep it (nothing to do)
86   *        </ul>
87   *      <li>for each file in the snapshot region but not in the table
88   *        <ul>
89   *          <li>create a new HFileLink that point to the original file
90   *        </ul>
91   *      </ul>
92   *    </ul>
93   *  <li>for each region in the snapshot not present in the current table state
94   *    <ul>
95   *    <li>create a new region and for each file in the region create a new HFileLink
96   *      (This is the same as the clone operation)
97   *    </ul>
98   *  <li>restore the logs, if any
99   * </ul>
100  */
101 @InterfaceAudience.Private
102 public class RestoreSnapshotHelper {
103   private static final Log LOG = LogFactory.getLog(RestoreSnapshotHelper.class);
104 
105   private final Map<byte[], byte[]> regionsMap =
106         new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
107 
108   private final Map<String, Pair<String, String> > parentsMap =
109       new HashMap<String, Pair<String, String> >();
110 
111   private final ForeignExceptionDispatcher monitor;
112   private final MonitoredTask status;
113 
114   private final SnapshotDescription snapshotDesc;
115   private final Path snapshotDir;
116 
117   private final HTableDescriptor tableDesc;
118   private final Path tableDir;
119 
120   private final Configuration conf;
121   private final FileSystem fs;
122 
123   public RestoreSnapshotHelper(final Configuration conf, final FileSystem fs,
124       final SnapshotDescription snapshotDescription, final Path snapshotDir,
125       final HTableDescriptor tableDescriptor, final Path tableDir,
126       final ForeignExceptionDispatcher monitor, final MonitoredTask status)
127   {
128     this.fs = fs;
129     this.conf = conf;
130     this.snapshotDesc = snapshotDescription;
131     this.snapshotDir = snapshotDir;
132     this.tableDesc = tableDescriptor;
133     this.tableDir = tableDir;
134     this.monitor = monitor;
135     this.status = status;
136   }
137 
138   /**
139    * Restore the on-disk table to a specified snapshot state.
140    * @return the set of regions touched by the restore operation
141    */
142   public RestoreMetaChanges restoreHdfsRegions() throws IOException {
143     LOG.debug("starting restore");
144     Set<String> snapshotRegionNames = SnapshotReferenceUtil.getSnapshotRegionNames(fs, snapshotDir);
145     if (snapshotRegionNames == null) {
146       LOG.warn("Nothing to restore. Snapshot " + snapshotDesc + " looks empty");
147       return null;
148     }
149 
150     RestoreMetaChanges metaChanges = new RestoreMetaChanges(parentsMap);
151 
152     // Identify which region are still available and which not.
153     // NOTE: we rely upon the region name as: "table name, start key, end key"
154     List<HRegionInfo> tableRegions = getTableRegions();
155     if (tableRegions != null) {
156       monitor.rethrowException();
157       for (HRegionInfo regionInfo: tableRegions) {
158         String regionName = regionInfo.getEncodedName();
159         if (snapshotRegionNames.contains(regionName)) {
160           LOG.info("region to restore: " + regionName);
161           snapshotRegionNames.remove(regionName);
162           metaChanges.addRegionToRestore(regionInfo);
163         } else {
164           LOG.info("region to remove: " + regionName);
165           metaChanges.addRegionToRemove(regionInfo);
166         }
167       }
168 
169       // Restore regions using the snapshot data
170       monitor.rethrowException();
171       status.setStatus("Restoring table regions...");
172       restoreHdfsRegions(metaChanges.getRegionsToRestore());
173       status.setStatus("Finished restoring all table regions.");
174 
175       // Remove regions from the current table
176       monitor.rethrowException();
177       status.setStatus("Starting to delete excess regions from table");
178       removeHdfsRegions(metaChanges.getRegionsToRemove());
179       status.setStatus("Finished deleting excess regions from table.");
180     }
181 
182     // Regions to Add: present in the snapshot but not in the current table
183     if (snapshotRegionNames.size() > 0) {
184       List<HRegionInfo> regionsToAdd = new LinkedList<HRegionInfo>();
185 
186       monitor.rethrowException();
187       for (String regionName: snapshotRegionNames) {
188         LOG.info("region to add: " + regionName);
189         Path regionDir = new Path(snapshotDir, regionName);
190         regionsToAdd.add(HRegion.loadDotRegionInfoFileContent(fs, regionDir));
191       }
192 
193       // Create new regions cloning from the snapshot
194       monitor.rethrowException();
195       status.setStatus("Cloning regions...");
196       HRegionInfo[] clonedRegions = cloneHdfsRegions(regionsToAdd);
197       metaChanges.setNewRegions(clonedRegions);
198       status.setStatus("Finished cloning regions.");
199     }
200 
201     // Restore WALs
202     monitor.rethrowException();
203     status.setStatus("Restoring WALs to table...");
204     restoreWALs();
205     status.setStatus("Finished restoring WALs to table.");
206 
207     return metaChanges;
208   }
209 
210   /**
211    * Describe the set of operations needed to update META after restore.
212    */
213   public static class RestoreMetaChanges {
214     private final Map<String, Pair<String, String> > parentsMap;
215 
216     private List<HRegionInfo> regionsToRestore = null;
217     private List<HRegionInfo> regionsToRemove = null;
218     private List<HRegionInfo> regionsToAdd = null;
219 
220     RestoreMetaChanges(final Map<String, Pair<String, String> > parentsMap) {
221       this.parentsMap = parentsMap;
222     }
223 
224     /**
225      * @return true if there're new regions
226      */
227     public boolean hasRegionsToAdd() {
228       return this.regionsToAdd != null && this.regionsToAdd.size() > 0;
229     }
230 
231     /**
232      * Returns the list of new regions added during the on-disk restore.
233      * The caller is responsible to add the regions to META.
234      * e.g MetaEditor.addRegionsToMeta(...)
235      * @return the list of regions to add to META
236      */
237     public List<HRegionInfo> getRegionsToAdd() {
238       return this.regionsToAdd;
239     }
240 
241     /**
242      * @return true if there're regions to restore
243      */
244     public boolean hasRegionsToRestore() {
245       return this.regionsToRestore != null && this.regionsToRestore.size() > 0;
246     }
247 
248     /**
249      * Returns the list of 'restored regions' during the on-disk restore.
250      * The caller is responsible to add the regions to META if not present.
251      * @return the list of regions restored
252      */
253     public List<HRegionInfo> getRegionsToRestore() {
254       return this.regionsToRestore;
255     }
256 
257     /**
258      * @return true if there're regions to remove
259      */
260     public boolean hasRegionsToRemove() {
261       return this.regionsToRemove != null && this.regionsToRemove.size() > 0;
262     }
263 
264     /**
265      * Returns the list of regions removed during the on-disk restore.
266      * The caller is responsible to remove the regions from META.
267      * e.g. MetaEditor.deleteRegions(...)
268      * @return the list of regions to remove from META
269      */
270     public List<HRegionInfo> getRegionsToRemove() {
271       return this.regionsToRemove;
272     }
273 
274     void setNewRegions(final HRegionInfo[] hris) {
275       if (hris != null) {
276         regionsToAdd = Arrays.asList(hris);
277       } else {
278         regionsToAdd = null;
279       }
280     }
281 
282     void addRegionToRemove(final HRegionInfo hri) {
283       if (regionsToRemove == null) {
284         regionsToRemove = new LinkedList<HRegionInfo>();
285       }
286       regionsToRemove.add(hri);
287     }
288 
289     void addRegionToRestore(final HRegionInfo hri) {
290       if (regionsToRestore == null) {
291         regionsToRestore = new LinkedList<HRegionInfo>();
292       }
293       regionsToRestore.add(hri);
294     }
295 
296     public void updateMetaParentRegions(final CatalogTracker catalogTracker,
297         final List<HRegionInfo> regionInfos) throws IOException {
298       if (regionInfos == null || parentsMap.isEmpty()) return;
299 
300       // Extract region names and offlined regions
301       Map<String, HRegionInfo> regionsByName = new HashMap<String, HRegionInfo>(regionInfos.size());
302       List<HRegionInfo> parentRegions = new LinkedList();
303       for (HRegionInfo regionInfo: regionInfos) {
304         if (regionInfo.isSplitParent()) {
305           parentRegions.add(regionInfo);
306         } else {
307           regionsByName.put(regionInfo.getEncodedName(), regionInfo);
308         }
309       }
310 
311       // Update Offline parents
312       for (HRegionInfo regionInfo: parentRegions) {
313         Pair<String, String> daughters = parentsMap.get(regionInfo.getEncodedName());
314 
315         // TODO-REMOVE-ME: HConnectionManager.isTableAvailable() is checking the SERVER_QUALIFIER
316         // also on offline regions, so to keep the compatibility with older clients we must add
317         // a location to this region even if it will never be assigned. (See HBASE-9233)
318         MetaEditor.updateRegionLocation(catalogTracker, regionInfo,
319                                         catalogTracker.getMetaLocation());
320 
321         if (daughters == null) {
322           // The snapshot contains an unreferenced region.
323           // It will be removed by the CatalogJanitor.
324           LOG.warn("Skip update of unreferenced offline parent: " + regionInfo);
325           continue;
326         }
327 
328         // One side of the split is already compacted
329         if (daughters.getSecond() == null) {
330           daughters.setSecond(daughters.getFirst());
331         }
332 
333         LOG.debug("Update splits parent " + regionInfo.getEncodedName() + " -> " + daughters);
334         MetaEditor.offlineParentInMeta(catalogTracker, regionInfo,
335             regionsByName.get(daughters.getFirst()),
336             regionsByName.get(daughters.getSecond()));
337       }
338     }
339   }
340 
341   /**
342    * Remove specified regions from the file-system, using the archiver.
343    */
344   private void removeHdfsRegions(final List<HRegionInfo> regions) throws IOException {
345     if (regions != null && regions.size() > 0) {
346       for (HRegionInfo hri: regions) {
347         HFileArchiver.archiveRegion(conf, fs, hri);
348       }
349     }
350   }
351 
352   /**
353    * Restore specified regions by restoring content to the snapshot state.
354    */
355   private void restoreHdfsRegions(final List<HRegionInfo> regions) throws IOException {
356     if (regions == null || regions.size() == 0) return;
357     for (HRegionInfo hri: regions) restoreRegion(hri);
358   }
359 
360   /**
361    * Restore region by removing files not in the snapshot
362    * and adding the missing ones from the snapshot.
363    */
364   private void restoreRegion(HRegionInfo regionInfo) throws IOException {
365     Path snapshotRegionDir = new Path(snapshotDir, regionInfo.getEncodedName());
366     Map<String, List<String>> snapshotFiles =
367                 SnapshotReferenceUtil.getRegionHFileReferences(fs, snapshotRegionDir);
368     Path regionDir = new Path(tableDir, regionInfo.getEncodedName());
369     String tableName = tableDesc.getNameAsString();
370 
371     // Restore families present in the table
372     for (Path familyDir: FSUtils.getFamilyDirs(fs, regionDir)) {
373       byte[] family = Bytes.toBytes(familyDir.getName());
374       Set<String> familyFiles = getTableRegionFamilyFiles(familyDir);
375       List<String> snapshotFamilyFiles = snapshotFiles.remove(familyDir.getName());
376       if (snapshotFamilyFiles != null) {
377         List<String> hfilesToAdd = new LinkedList<String>();
378         for (String hfileName: snapshotFamilyFiles) {
379           if (familyFiles.contains(hfileName)) {
380             // HFile already present
381             familyFiles.remove(hfileName);
382           } else {
383             // HFile missing
384             hfilesToAdd.add(hfileName);
385           }
386         }
387 
388         // Remove hfiles not present in the snapshot
389         for (String hfileName: familyFiles) {
390           Path hfile = new Path(familyDir, hfileName);
391           LOG.trace("Removing hfile=" + hfile +
392             " from region=" + regionInfo.getEncodedName() + " table=" + tableName);
393           HFileArchiver.archiveStoreFile(fs, regionInfo, conf, tableDir, family, hfile);
394         }
395 
396         // Restore Missing files
397         for (String hfileName: hfilesToAdd) {
398           LOG.trace("Adding HFileLink " + hfileName +
399             " to region=" + regionInfo.getEncodedName() + " table=" + tableName);
400           restoreStoreFile(familyDir, regionInfo, hfileName);
401         }
402       } else {
403         // Family doesn't exists in the snapshot
404         LOG.trace("Removing family=" + Bytes.toString(family) +
405           " from region=" + regionInfo.getEncodedName() + " table=" + tableName);
406         HFileArchiver.archiveFamily(fs, conf, regionInfo, tableDir, family);
407         fs.delete(familyDir, true);
408       }
409     }
410 
411     // Add families not present in the table
412     for (Map.Entry<String, List<String>> familyEntry: snapshotFiles.entrySet()) {
413       Path familyDir = new Path(regionDir, familyEntry.getKey());
414       if (!fs.mkdirs(familyDir)) {
415         throw new IOException("Unable to create familyDir=" + familyDir);
416       }
417 
418       for (String hfileName: familyEntry.getValue()) {
419         LOG.trace("Adding HFileLink " + hfileName + " to table=" + tableName);
420         restoreStoreFile(familyDir, regionInfo, hfileName);
421       }
422     }
423   }
424 
425   /**
426    * @return The set of files in the specified family directory.
427    */
428   private Set<String> getTableRegionFamilyFiles(final Path familyDir) throws IOException {
429     Set<String> familyFiles = new HashSet<String>();
430 
431     FileStatus[] hfiles = FSUtils.listStatus(fs, familyDir);
432     if (hfiles == null) return familyFiles;
433 
434     for (FileStatus hfileRef: hfiles) {
435       String hfileName = hfileRef.getPath().getName();
436       familyFiles.add(hfileName);
437     }
438 
439     return familyFiles;
440   }
441 
442   /**
443    * Clone specified regions. For each region create a new region
444    * and create a HFileLink for each hfile.
445    */
446   private HRegionInfo[] cloneHdfsRegions(final List<HRegionInfo> regions) throws IOException {
447     if (regions == null || regions.size() == 0) return null;
448 
449     final Map<String, HRegionInfo> snapshotRegions =
450       new HashMap<String, HRegionInfo>(regions.size());
451 
452     // clone region info (change embedded tableName with the new one)
453     HRegionInfo[] clonedRegionsInfo = new HRegionInfo[regions.size()];
454     for (int i = 0; i < clonedRegionsInfo.length; ++i) {
455       // clone the region info from the snapshot region info
456       HRegionInfo snapshotRegionInfo = regions.get(i);
457       clonedRegionsInfo[i] = cloneRegionInfo(snapshotRegionInfo);
458 
459       // add the region name mapping between snapshot and cloned
460       String snapshotRegionName = snapshotRegionInfo.getEncodedName();
461       String clonedRegionName = clonedRegionsInfo[i].getEncodedName();
462       regionsMap.put(Bytes.toBytes(snapshotRegionName), Bytes.toBytes(clonedRegionName));
463       LOG.info("clone region=" + snapshotRegionName + " as " + clonedRegionName);
464 
465       // Add mapping between cloned region name and snapshot region info
466       snapshotRegions.put(clonedRegionName, snapshotRegionInfo);
467     }
468 
469     // create the regions on disk
470     ModifyRegionUtils.createRegions(conf, tableDir.getParent(),
471       tableDesc, clonedRegionsInfo, new ModifyRegionUtils.RegionFillTask() {
472         public void fillRegion(final HRegion region) throws IOException {
473           cloneRegion(region, snapshotRegions.get(region.getRegionInfo().getEncodedName()));
474         }
475       });
476 
477     return clonedRegionsInfo;
478   }
479 
480   /**
481    * Clone region directory content from the snapshot info.
482    *
483    * Each region is encoded with the table name, so the cloned region will have
484    * a different region name.
485    *
486    * Instead of copying the hfiles a HFileLink is created.
487    *
488    * @param region {@link HRegion} cloned
489    * @param snapshotRegionInfo
490    */
491   private void cloneRegion(final HRegion region, final HRegionInfo snapshotRegionInfo)
492       throws IOException {
493     final Path snapshotRegionDir = new Path(snapshotDir, snapshotRegionInfo.getEncodedName());
494     final Path regionDir = new Path(tableDir, region.getRegionInfo().getEncodedName());
495     final String tableName = tableDesc.getNameAsString();
496     SnapshotReferenceUtil.visitRegionStoreFiles(fs, snapshotRegionDir,
497       new FSVisitor.StoreFileVisitor() {
498         public void storeFile (final String region, final String family, final String hfile)
499             throws IOException {
500           LOG.info("Adding HFileLink " + hfile + " to table=" + tableName);
501           Path familyDir = new Path(regionDir, family);
502           restoreStoreFile(familyDir, snapshotRegionInfo, hfile);
503         }
504     });
505   }
506 
507   /**
508    * Create a new {@link HFileLink} to reference the store file.
509    * <p>The store file in the snapshot can be a simple hfile, an HFileLink or a reference.
510    * <ul>
511    *   <li>hfile: abc -> table=region-abc
512    *   <li>reference: abc.1234 -> table=region-abc.1234
513    *   <li>hfilelink: table=region-hfile -> table=region-hfile
514    * </ul>
515    * @param familyDir destination directory for the store file
516    * @param regionInfo destination region info for the table
517    * @param hfileName store file name (can be a Reference, HFileLink or simple HFile)
518    */
519   private void restoreStoreFile(final Path familyDir, final HRegionInfo regionInfo,
520       final String hfileName) throws IOException {
521     if (HFileLink.isHFileLink(hfileName)) {
522       HFileLink.createFromHFileLink(conf, fs, familyDir, hfileName);
523     } else if (StoreFile.isReference(hfileName)) {
524       restoreReferenceFile(familyDir, regionInfo, hfileName);
525     } else {
526       HFileLink.create(conf, fs, familyDir, regionInfo, hfileName);
527     }
528   }
529 
530   /**
531    * Create a new {@link Reference} as copy of the source one.
532    * <p><blockquote><pre>
533    * The source table looks like:
534    *    1234/abc      (original file)
535    *    5678/abc.1234 (reference file)
536    *
537    * After the clone operation looks like:
538    *   wxyz/table=1234-abc
539    *   stuv/table=1234-abc.wxyz
540    *
541    * NOTE that the region name in the clone changes (md5 of regioninfo)
542    * and the reference should reflect that change.
543    * </pre></blockquote>
544    * @param familyDir destination directory for the store file
545    * @param regionInfo destination region info for the table
546    * @param hfileName reference file name
547    */
548   private void restoreReferenceFile(final Path familyDir, final HRegionInfo regionInfo,
549       final String hfileName) throws IOException {
550     // Extract the referred information (hfile name and parent region)
551     String snapshotTable = snapshotDesc.getTable();
552     Path refPath = StoreFile.getReferredToFile(new Path(new Path(new Path(
553         snapshotTable, regionInfo.getEncodedName()), familyDir.getName()),
554         hfileName));
555     String snapshotRegionName = refPath.getParent().getParent().getName();
556     String fileName = refPath.getName();
557 
558     // The new reference should have the cloned region name as parent, if it is a clone.
559     String clonedRegionName = Bytes.toString(regionsMap.get(Bytes.toBytes(snapshotRegionName)));
560     if (clonedRegionName == null) clonedRegionName = snapshotRegionName;
561 
562     // The output file should be a reference link table=snapshotRegion-fileName.clonedRegionName
563     Path linkPath = null;
564     String refLink = fileName;
565     if (!HFileLink.isHFileLink(fileName)) {
566       refLink = HFileLink.createHFileLinkName(snapshotTable, snapshotRegionName, fileName);
567       linkPath = new Path(familyDir,
568         HFileLink.createHFileLinkName(snapshotTable, regionInfo.getEncodedName(), hfileName));
569     }
570 
571     Path outPath = new Path(familyDir, refLink + '.' + clonedRegionName);
572 
573     // Create the new reference
574     InputStream in;
575     if (linkPath != null) {
576       in = new HFileLink(conf, linkPath).open(fs);
577     } else {
578       linkPath = new Path(new Path(HRegion.getRegionDir(snapshotDir, regionInfo.getEncodedName()),
579                       familyDir.getName()), hfileName);
580       in = fs.open(linkPath);
581     }
582     OutputStream out = fs.create(outPath);
583     IOUtils.copyBytes(in, out, conf);
584 
585     // Add the daughter region to the map
586     String regionName = Bytes.toString(regionsMap.get(regionInfo.getEncodedNameAsBytes()));
587     LOG.debug("Restore reference " + regionName + " to " + clonedRegionName);
588     synchronized (parentsMap) {
589       Pair<String, String> daughters = parentsMap.get(clonedRegionName);
590       if (daughters == null) {
591         daughters = new Pair<String, String>(regionName, null);
592         parentsMap.put(clonedRegionName, daughters);
593       } else if (!regionName.equals(daughters.getFirst())) {
594         daughters.setSecond(regionName);
595       }
596     }
597   }
598 
599   /**
600    * Create a new {@link HRegionInfo} from the snapshot region info.
601    * Keep the same startKey, endKey, regionId and split information but change
602    * the table name.
603    *
604    * @param snapshotRegionInfo Info for region to clone.
605    * @return the new HRegion instance
606    */
607   public HRegionInfo cloneRegionInfo(final HRegionInfo snapshotRegionInfo) {
608     HRegionInfo regionInfo = new HRegionInfo(tableDesc.getName(),
609                       snapshotRegionInfo.getStartKey(), snapshotRegionInfo.getEndKey(),
610                       snapshotRegionInfo.isSplit(), snapshotRegionInfo.getRegionId());
611     regionInfo.setOffline(snapshotRegionInfo.isOffline());
612     return regionInfo;
613   }
614 
615   /**
616    * Restore snapshot WALs.
617    *
618    * Global Snapshot keep a reference to region servers logs present during the snapshot.
619    * (/hbase/.snapshot/snapshotName/.logs/hostName/logName)
620    *
621    * Since each log contains different tables data, logs must be split to
622    * extract the table that we are interested in.
623    */
624   private void restoreWALs() throws IOException {
625     final SnapshotLogSplitter logSplitter = new SnapshotLogSplitter(conf, fs, tableDir,
626                                 Bytes.toBytes(snapshotDesc.getTable()), regionsMap);
627     try {
628       // Recover.Edits
629       SnapshotReferenceUtil.visitRecoveredEdits(fs, snapshotDir,
630           new FSVisitor.RecoveredEditsVisitor() {
631         public void recoveredEdits (final String region, final String logfile) throws IOException {
632           Path path = SnapshotReferenceUtil.getRecoveredEdits(snapshotDir, region, logfile);
633           logSplitter.splitRecoveredEdit(path);
634         }
635       });
636 
637       // Region Server Logs
638       SnapshotReferenceUtil.visitLogFiles(fs, snapshotDir, new FSVisitor.LogFileVisitor() {
639         public void logFile (final String server, final String logfile) throws IOException {
640           logSplitter.splitLog(server, logfile);
641         }
642       });
643     } finally {
644       logSplitter.close();
645     }
646   }
647 
648   /**
649    * @return the set of the regions contained in the table
650    */
651   private List<HRegionInfo> getTableRegions() throws IOException {
652     LOG.debug("get table regions: " + tableDir);
653     FileStatus[] regionDirs = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs));
654     if (regionDirs == null) return null;
655 
656     List<HRegionInfo> regions = new LinkedList<HRegionInfo>();
657     for (FileStatus regionDir: regionDirs) {
658       HRegionInfo hri = HRegion.loadDotRegionInfoFileContent(fs, regionDir.getPath());
659       regions.add(hri);
660     }
661     LOG.debug("found " + regions.size() + " regions for table=" + tableDesc.getNameAsString());
662     return regions;
663   }
664 
665   /**
666    * Create a new table descriptor cloning the snapshot table schema.
667    *
668    * @param snapshotTableDescriptor
669    * @param tableName
670    * @return cloned table descriptor
671    * @throws IOException
672    */
673   public static HTableDescriptor cloneTableSchema(final HTableDescriptor snapshotTableDescriptor,
674       final byte[] tableName) throws IOException {
675     HTableDescriptor htd = new HTableDescriptor(tableName);
676     for (HColumnDescriptor hcd: snapshotTableDescriptor.getColumnFamilies()) {
677       htd.addFamily(hcd);
678     }
679     for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
680         snapshotTableDescriptor.getValues().entrySet()) {
681       htd.setValue(e.getKey(), e.getValue());
682     }
683     return htd;
684   }
685 }