001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.client; 019 020import java.io.IOException; 021import java.util.ArrayList; 022import java.util.List; 023import java.util.UUID; 024import org.apache.hadoop.conf.Configuration; 025import org.apache.hadoop.fs.FileSystem; 026import org.apache.hadoop.fs.Path; 027import org.apache.hadoop.hbase.PrivateCellUtil; 028import org.apache.hadoop.hbase.regionserver.MemStoreLAB; 029import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; 030import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; 031import org.apache.hadoop.hbase.snapshot.SnapshotManifest; 032import org.apache.hadoop.hbase.util.CommonFSUtils; 033import org.apache.yetus.audience.InterfaceAudience; 034import org.slf4j.Logger; 035import org.slf4j.LoggerFactory; 036 037import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 038import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; 039import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; 040 041/** 042 * A Scanner which performs a scan over snapshot files. Using this class requires copying the 043 * snapshot to a temporary empty directory, which will copy the snapshot reference files into that 044 * directory. Actual data files are not copied. 045 * <p> 046 * This also allows one to run the scan from an online or offline hbase cluster. The snapshot files 047 * can be exported by using the org.apache.hadoop.hbase.snapshot.ExportSnapshot tool, to a pure-hdfs 048 * cluster, and this scanner can be used to run the scan directly over the snapshot files. The 049 * snapshot should not be deleted while there are open scanners reading from snapshot files. 050 * <p> 051 * An internal RegionScanner is used to execute the {@link Scan} obtained from the user for each 052 * region in the snapshot. 053 * <p> 054 * HBase owns all the data and snapshot files on the filesystem. Only the HBase user can read from 055 * snapshot files and data files. HBase also enforces security because all the requests are handled 056 * by the server layer, and the user cannot read from the data files directly. To read from snapshot 057 * files directly from the file system, the user who is running the MR job must have sufficient 058 * permissions to access snapshot and reference files. This means that to run mapreduce over 059 * snapshot files, the job has to be run as the HBase user or the user must have group or other 060 * priviledges in the filesystem (See HBASE-8369). Note that, given other users access to read from 061 * snapshot/data files will completely circumvent the access control enforced by HBase. See 062 * org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat. 063 */ 064@InterfaceAudience.Private 065public class TableSnapshotScanner extends AbstractClientScanner { 066 067 private static final Logger LOG = LoggerFactory.getLogger(TableSnapshotScanner.class); 068 069 private Configuration conf; 070 private String snapshotName; 071 private FileSystem fs; 072 private Path rootDir; 073 private Path restoreDir; 074 private Scan scan; 075 private ArrayList<RegionInfo> regions; 076 private TableDescriptor htd; 077 private final boolean snapshotAlreadyRestored; 078 079 private ClientSideRegionScanner currentRegionScanner = null; 080 private int currentRegion = -1; 081 082 private int numOfCompleteRows = 0; 083 084 /** 085 * Creates a TableSnapshotScanner. 086 * @param conf the configuration 087 * @param restoreDir a temporary directory to copy the snapshot files into. Current user should 088 * have write permissions to this directory, and this should not be a 089 * subdirectory of rootDir. The scanner deletes the contents of the directory 090 * once the scanner is closed. 091 * @param snapshotName the name of the snapshot to read from 092 * @param scan a Scan representing scan parameters 093 * @throws IOException in case of error 094 */ 095 public TableSnapshotScanner(Configuration conf, Path restoreDir, String snapshotName, Scan scan) 096 throws IOException { 097 this(conf, CommonFSUtils.getRootDir(conf), restoreDir, snapshotName, scan); 098 } 099 100 public TableSnapshotScanner(Configuration conf, Path rootDir, Path restoreDir, 101 String snapshotName, Scan scan) throws IOException { 102 this(conf, rootDir, restoreDir, snapshotName, scan, false); 103 } 104 105 /** 106 * Creates a TableSnapshotScanner. 107 * @param conf the configuration 108 * @param rootDir root directory for HBase. 109 * @param restoreDir a temporary directory to copy the snapshot files into. Current 110 * user should have write permissions to this directory, and this 111 * should not be a subdirectory of rootdir. The scanner deletes the 112 * contents of the directory once the scanner is closed. 113 * @param snapshotName the name of the snapshot to read from 114 * @param scan a Scan representing scan parameters 115 * @param snapshotAlreadyRestored true to indicate that snapshot has been restored. 116 * @throws IOException in case of error 117 */ 118 public TableSnapshotScanner(Configuration conf, Path rootDir, Path restoreDir, 119 String snapshotName, Scan scan, boolean snapshotAlreadyRestored) throws IOException { 120 this.conf = conf; 121 this.snapshotName = snapshotName; 122 this.rootDir = rootDir; 123 this.scan = scan; 124 this.snapshotAlreadyRestored = snapshotAlreadyRestored; 125 this.fs = rootDir.getFileSystem(conf); 126 conf.setBoolean(MemStoreLAB.USEMSLAB_KEY, false); 127 128 if (snapshotAlreadyRestored) { 129 this.restoreDir = restoreDir; 130 openWithoutRestoringSnapshot(); 131 } else { 132 // restoreDir will be deleted in close(), use a unique sub directory 133 this.restoreDir = new Path(restoreDir, UUID.randomUUID().toString()); 134 openWithRestoringSnapshot(); 135 } 136 137 initScanMetrics(scan); 138 } 139 140 private void openWithoutRestoringSnapshot() throws IOException { 141 Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); 142 SnapshotProtos.SnapshotDescription snapshotDesc = 143 SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); 144 145 SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); 146 List<SnapshotRegionManifest> regionManifests = manifest.getRegionManifests(); 147 if (regionManifests == null) { 148 throw new IllegalArgumentException("Snapshot seems empty, snapshotName: " + snapshotName); 149 } 150 151 regions = new ArrayList<>(regionManifests.size()); 152 regionManifests.stream().map(r -> ProtobufUtil.toRegionInfo(r.getRegionInfo())) 153 .filter(this::isValidRegion).sorted().forEach(r -> regions.add(r)); 154 htd = manifest.getTableDescriptor(); 155 } 156 157 private boolean isValidRegion(RegionInfo hri) { 158 // An offline split parent region should be excluded. 159 if (hri.isOffline() && (hri.isSplit() || hri.isSplitParent())) { 160 return false; 161 } 162 return PrivateCellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), hri.getStartKey(), 163 hri.getEndKey()); 164 } 165 166 private void openWithRestoringSnapshot() throws IOException { 167 final RestoreSnapshotHelper.RestoreMetaChanges meta = 168 RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName); 169 final List<RegionInfo> restoredRegions = meta.getRegionsToAdd(); 170 171 htd = meta.getTableDescriptor(); 172 regions = new ArrayList<>(restoredRegions.size()); 173 restoredRegions.stream().filter(this::isValidRegion).sorted().forEach(r -> regions.add(r)); 174 } 175 176 @Override 177 public Result next() throws IOException { 178 Result result = null; 179 while (true) { 180 if (currentRegionScanner == null) { 181 currentRegion++; 182 if (currentRegion >= regions.size()) { 183 return null; 184 } 185 186 RegionInfo hri = regions.get(currentRegion); 187 currentRegionScanner = 188 new ClientSideRegionScanner(conf, fs, restoreDir, htd, hri, scan, scanMetrics); 189 if (this.scanMetrics != null) { 190 this.scanMetrics.countOfRegions.incrementAndGet(); 191 } 192 } 193 194 try { 195 result = currentRegionScanner.next(); 196 if (result != null) { 197 if (scan.getLimit() > 0 && ++this.numOfCompleteRows > scan.getLimit()) { 198 result = null; 199 } 200 return result; 201 } 202 } finally { 203 if (result == null) { 204 currentRegionScanner.close(); 205 currentRegionScanner = null; 206 } 207 } 208 } 209 } 210 211 private void cleanup() { 212 try { 213 if (fs.exists(this.restoreDir)) { 214 if (!fs.delete(this.restoreDir, true)) { 215 LOG.warn( 216 "Delete restore directory for the snapshot failed. restoreDir: " + this.restoreDir); 217 } 218 } 219 } catch (IOException ex) { 220 LOG.warn( 221 "Could not delete restore directory for the snapshot. restoreDir: " + this.restoreDir, ex); 222 } 223 } 224 225 @Override 226 public void close() { 227 if (currentRegionScanner != null) { 228 currentRegionScanner.close(); 229 } 230 // if snapshotAlreadyRestored is true, then we should invoke cleanup() method by hand. 231 if (!this.snapshotAlreadyRestored) { 232 cleanup(); 233 } 234 } 235 236 @Override 237 public boolean renewLease() { 238 throw new UnsupportedOperationException(); 239 } 240 241}