View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.master.snapshot;
19  
20  import java.io.IOException;
21  import java.util.HashSet;
22  import java.util.List;
23  import java.util.Set;
24  
25  import org.apache.commons.logging.Log;
26  import org.apache.commons.logging.LogFactory;
27  import org.apache.hadoop.classification.InterfaceAudience;
28  import org.apache.hadoop.classification.InterfaceStability;
29  import org.apache.hadoop.fs.Path;
30  import org.apache.hadoop.hbase.HRegionInfo;
31  import org.apache.hadoop.hbase.ServerName;
32  import org.apache.hadoop.hbase.errorhandling.ForeignException;
33  import org.apache.hadoop.hbase.errorhandling.TimeoutExceptionInjector;
34  import org.apache.hadoop.hbase.master.MasterServices;
35  import org.apache.hadoop.hbase.master.MetricsMaster;
36  import org.apache.hadoop.hbase.monitoring.MonitoredTask;
37  import org.apache.hadoop.hbase.monitoring.TaskMonitor;
38  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
39  import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
40  import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
41  import org.apache.hadoop.hbase.snapshot.TableInfoCopyTask;
42  import org.apache.hadoop.hbase.snapshot.TakeSnapshotUtils;
43  import org.apache.hadoop.hbase.util.FSUtils;
44  import org.apache.hadoop.hbase.util.Pair;
45  import org.apache.zookeeper.KeeperException;
46  
47  /**
48   * Take a snapshot of a disabled table.
49   * <p>
50   * Table must exist when taking the snapshot, or results are undefined.
51   */
52  @InterfaceAudience.Private
53  @InterfaceStability.Evolving
54  public class DisabledTableSnapshotHandler extends TakeSnapshotHandler {
55    private static final Log LOG = LogFactory.getLog(DisabledTableSnapshotHandler.class);
56    private final TimeoutExceptionInjector timeoutInjector;
57  
58    /**
59     * @param snapshot descriptor of the snapshot to take
60     * @param masterServices master services provider
61     */
62    public DisabledTableSnapshotHandler(SnapshotDescription snapshot,
63        final MasterServices masterServices) {
64      super(snapshot, masterServices);
65  
66      // setup the timer
67      timeoutInjector = TakeSnapshotUtils.getMasterTimerAndBindToMonitor(snapshot, conf, monitor);
68    }
69  
70    @Override
71    public DisabledTableSnapshotHandler prepare() throws Exception {
72      return (DisabledTableSnapshotHandler) super.prepare();
73    }
74  
75    // TODO consider parallelizing these operations since they are independent. Right now its just
76    // easier to keep them serial though
77    @Override
78    public void snapshotRegions(List<Pair<HRegionInfo, ServerName>> regionsAndLocations)
79        throws IOException, KeeperException {
80      try {
81        timeoutInjector.start();
82  
83        Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
84  
85        // 1. get all the regions hosting this table.
86  
87        // extract each pair to separate lists
88        Set<HRegionInfo> regions = new HashSet<HRegionInfo>();
89        for (Pair<HRegionInfo, ServerName> p : regionsAndLocations) {
90          regions.add(p.getFirst());
91        }
92  
93        // 2. for each region, write all the info to disk
94        String msg = "Starting to write region info and WALs for regions for offline snapshot:"
95            + ClientSnapshotDescriptionUtils.toString(snapshot);
96        LOG.info(msg);
97        status.setStatus(msg);
98        for (HRegionInfo regionInfo : regions) {
99          snapshotDisabledRegion(regionInfo);
100       }
101 
102       // 3. write the table info to disk
103       LOG.info("Starting to copy tableinfo for offline snapshot: " +
104       ClientSnapshotDescriptionUtils.toString(snapshot));
105       TableInfoCopyTask tableInfoCopyTask = new TableInfoCopyTask(this.monitor, snapshot, fs,
106           FSUtils.getRootDir(conf));
107       tableInfoCopyTask.call();
108       monitor.rethrowException();
109       status.setStatus("Finished copying tableinfo for snapshot of table: " +
110           snapshotTable);
111     } catch (Exception e) {
112       // make sure we capture the exception to propagate back to the client later
113       String reason = "Failed snapshot " + ClientSnapshotDescriptionUtils.toString(snapshot)
114           + " due to exception:" + e.getMessage();
115       ForeignException ee = new ForeignException(reason, e);
116       monitor.receive(ee);
117       status.abort("Snapshot of table: "+ snapshotTable +
118           " failed because " + e.getMessage());
119     } finally {
120       LOG.debug("Marking snapshot" + ClientSnapshotDescriptionUtils.toString(snapshot)
121           + " as finished.");
122 
123       // 6. mark the timer as finished - even if we got an exception, we don't need to time the
124       // operation any further
125       timeoutInjector.complete();
126     }
127   }
128 }