001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018 019package org.apache.hadoop.hbase.backup; 020 021import java.io.IOException; 022import java.io.InputStream; 023import java.util.ArrayList; 024import java.util.Calendar; 025import java.util.Date; 026import java.util.HashMap; 027import java.util.List; 028import java.util.Map; 029import java.util.Map.Entry; 030import java.util.Set; 031 032import org.apache.commons.lang3.StringUtils; 033import org.apache.hadoop.hbase.TableName; 034import org.apache.hadoop.hbase.backup.util.BackupUtils; 035import org.apache.hadoop.hbase.util.Bytes; 036import org.apache.yetus.audience.InterfaceAudience; 037import org.slf4j.Logger; 038import org.slf4j.LoggerFactory; 039 040import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 041import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos; 042import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos.BackupInfo.Builder; 043 044/** 045 * An object to encapsulate the information for each backup session 046 */ 047@InterfaceAudience.Private 048public class BackupInfo implements Comparable<BackupInfo> { 049 private static final Logger LOG = LoggerFactory.getLogger(BackupInfo.class); 050 051 public interface Filter { 052 /** 053 * Filter interface 054 * @param info backup info 055 * @return true if info passes filter, false otherwise 056 */ 057 boolean apply(BackupInfo info); 058 } 059 060 /** 061 * Backup session states 062 */ 063 public enum BackupState { 064 RUNNING, COMPLETE, FAILED, ANY 065 } 066 067 /** 068 * BackupPhase - phases of an ACTIVE backup session (running), when state of a backup session is 069 * BackupState.RUNNING 070 */ 071 public enum BackupPhase { 072 REQUEST, SNAPSHOT, PREPARE_INCREMENTAL, SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST 073 } 074 075 /** 076 * Backup id 077 */ 078 private String backupId; 079 080 /** 081 * Backup type, full or incremental 082 */ 083 private BackupType type; 084 085 /** 086 * Target root directory for storing the backup files 087 */ 088 private String backupRootDir; 089 090 /** 091 * Backup state 092 */ 093 private BackupState state; 094 095 /** 096 * Backup phase 097 */ 098 private BackupPhase phase = BackupPhase.REQUEST; 099 100 /** 101 * Backup failure message 102 */ 103 private String failedMsg; 104 105 /** 106 * Backup status map for all tables 107 */ 108 private Map<TableName, BackupTableInfo> backupTableInfoMap; 109 110 /** 111 * Actual start timestamp of a backup process 112 */ 113 private long startTs; 114 115 /** 116 * Actual end timestamp of the backup process 117 */ 118 private long completeTs; 119 120 /** 121 * Total bytes of incremental logs copied 122 */ 123 private long totalBytesCopied; 124 125 /** 126 * For incremental backup, a location of a backed-up hlogs 127 */ 128 private String hlogTargetDir = null; 129 130 /** 131 * Incremental backup file list 132 */ 133 private List<String> incrBackupFileList; 134 135 /** 136 * New region server log timestamps for table set after distributed log roll key - table name, 137 * value - map of RegionServer hostname -> last log rolled timestamp 138 */ 139 private HashMap<TableName, HashMap<String, Long>> tableSetTimestampMap; 140 141 /** 142 * Backup progress in %% (0-100) 143 */ 144 private int progress; 145 146 /** 147 * Number of parallel workers. -1 - system defined 148 */ 149 private int workers = -1; 150 151 /** 152 * Bandwidth per worker in MB per sec. -1 - unlimited 153 */ 154 private long bandwidth = -1; 155 156 public BackupInfo() { 157 backupTableInfoMap = new HashMap<>(); 158 } 159 160 public BackupInfo(String backupId, BackupType type, TableName[] tables, String targetRootDir) { 161 this(); 162 this.backupId = backupId; 163 this.type = type; 164 this.backupRootDir = targetRootDir; 165 this.addTables(tables); 166 if (type == BackupType.INCREMENTAL) { 167 setHLogTargetDir(BackupUtils.getLogBackupDir(targetRootDir, backupId)); 168 } 169 this.startTs = 0; 170 this.completeTs = 0; 171 } 172 173 public int getWorkers() { 174 return workers; 175 } 176 177 public void setWorkers(int workers) { 178 this.workers = workers; 179 } 180 181 public long getBandwidth() { 182 return bandwidth; 183 } 184 185 public void setBandwidth(long bandwidth) { 186 this.bandwidth = bandwidth; 187 } 188 189 public void setBackupTableInfoMap(Map<TableName, BackupTableInfo> backupTableInfoMap) { 190 this.backupTableInfoMap = backupTableInfoMap; 191 } 192 193 public HashMap<TableName, HashMap<String, Long>> getTableSetTimestampMap() { 194 return tableSetTimestampMap; 195 } 196 197 public void setTableSetTimestampMap(HashMap<TableName, 198 HashMap<String, Long>> tableSetTimestampMap) { 199 this.tableSetTimestampMap = tableSetTimestampMap; 200 } 201 202 public void setType(BackupType type) { 203 this.type = type; 204 } 205 206 public void setBackupRootDir(String targetRootDir) { 207 this.backupRootDir = targetRootDir; 208 } 209 210 public void setTotalBytesCopied(long totalBytesCopied) { 211 this.totalBytesCopied = totalBytesCopied; 212 } 213 214 /** 215 * Set progress (0-100%) 216 * @param p progress value 217 */ 218 public void setProgress(int p) { 219 this.progress = p; 220 } 221 222 /** 223 * Get current progress 224 */ 225 public int getProgress() { 226 return progress; 227 } 228 229 public String getBackupId() { 230 return backupId; 231 } 232 233 public void setBackupId(String backupId) { 234 this.backupId = backupId; 235 } 236 237 public BackupTableInfo getBackupTableInfo(TableName table) { 238 return this.backupTableInfoMap.get(table); 239 } 240 241 public String getFailedMsg() { 242 return failedMsg; 243 } 244 245 public void setFailedMsg(String failedMsg) { 246 this.failedMsg = failedMsg; 247 } 248 249 public long getStartTs() { 250 return startTs; 251 } 252 253 public void setStartTs(long startTs) { 254 this.startTs = startTs; 255 } 256 257 public long getCompleteTs() { 258 return completeTs; 259 } 260 261 public void setCompleteTs(long endTs) { 262 this.completeTs = endTs; 263 } 264 265 public long getTotalBytesCopied() { 266 return totalBytesCopied; 267 } 268 269 public BackupState getState() { 270 return state; 271 } 272 273 public void setState(BackupState flag) { 274 this.state = flag; 275 } 276 277 public BackupPhase getPhase() { 278 return phase; 279 } 280 281 public void setPhase(BackupPhase phase) { 282 this.phase = phase; 283 } 284 285 public BackupType getType() { 286 return type; 287 } 288 289 public void setSnapshotName(TableName table, String snapshotName) { 290 this.backupTableInfoMap.get(table).setSnapshotName(snapshotName); 291 } 292 293 public String getSnapshotName(TableName table) { 294 return this.backupTableInfoMap.get(table).getSnapshotName(); 295 } 296 297 public List<String> getSnapshotNames() { 298 List<String> snapshotNames = new ArrayList<>(); 299 for (BackupTableInfo backupStatus : this.backupTableInfoMap.values()) { 300 snapshotNames.add(backupStatus.getSnapshotName()); 301 } 302 return snapshotNames; 303 } 304 305 public Set<TableName> getTables() { 306 return this.backupTableInfoMap.keySet(); 307 } 308 309 public List<TableName> getTableNames() { 310 return new ArrayList<>(backupTableInfoMap.keySet()); 311 } 312 313 public void addTables(TableName[] tables) { 314 for (TableName table : tables) { 315 BackupTableInfo backupStatus = new BackupTableInfo(table, this.backupRootDir, this.backupId); 316 this.backupTableInfoMap.put(table, backupStatus); 317 } 318 } 319 320 public void setTables(List<TableName> tables) { 321 this.backupTableInfoMap.clear(); 322 for (TableName table : tables) { 323 BackupTableInfo backupStatus = new BackupTableInfo(table, this.backupRootDir, this.backupId); 324 this.backupTableInfoMap.put(table, backupStatus); 325 } 326 } 327 328 public String getBackupRootDir() { 329 return backupRootDir; 330 } 331 332 public String getTableBackupDir(TableName tableName) { 333 return BackupUtils.getTableBackupDir(backupRootDir, backupId, tableName); 334 } 335 336 public void setHLogTargetDir(String hlogTagetDir) { 337 this.hlogTargetDir = hlogTagetDir; 338 } 339 340 public String getHLogTargetDir() { 341 return hlogTargetDir; 342 } 343 344 public List<String> getIncrBackupFileList() { 345 return incrBackupFileList; 346 } 347 348 public void setIncrBackupFileList(List<String> incrBackupFileList) { 349 this.incrBackupFileList = incrBackupFileList; 350 } 351 352 /** 353 * Set the new region server log timestamps after distributed log roll 354 * @param newTableSetTimestampMap table timestamp map 355 */ 356 public void setIncrTimestampMap(HashMap<TableName, 357 HashMap<String, Long>> newTableSetTimestampMap) { 358 this.tableSetTimestampMap = newTableSetTimestampMap; 359 } 360 361 /** 362 * Get new region server log timestamps after distributed log roll 363 * @return new region server log timestamps 364 */ 365 public HashMap<TableName, HashMap<String, Long>> getIncrTimestampMap() { 366 return this.tableSetTimestampMap; 367 } 368 369 public TableName getTableBySnapshot(String snapshotName) { 370 for (Entry<TableName, BackupTableInfo> entry : this.backupTableInfoMap.entrySet()) { 371 if (snapshotName.equals(entry.getValue().getSnapshotName())) { 372 return entry.getKey(); 373 } 374 } 375 return null; 376 } 377 378 public BackupProtos.BackupInfo toProtosBackupInfo() { 379 BackupProtos.BackupInfo.Builder builder = BackupProtos.BackupInfo.newBuilder(); 380 builder.setBackupId(getBackupId()); 381 setBackupTableInfoMap(builder); 382 builder.setCompleteTs(getCompleteTs()); 383 if (getFailedMsg() != null) { 384 builder.setFailedMessage(getFailedMsg()); 385 } 386 if (getState() != null) { 387 builder.setBackupState(BackupProtos.BackupInfo.BackupState.valueOf(getState().name())); 388 } 389 if (getPhase() != null) { 390 builder.setBackupPhase(BackupProtos.BackupInfo.BackupPhase.valueOf(getPhase().name())); 391 } 392 393 builder.setProgress(getProgress()); 394 builder.setStartTs(getStartTs()); 395 builder.setBackupRootDir(getBackupRootDir()); 396 builder.setBackupType(BackupProtos.BackupType.valueOf(getType().name())); 397 builder.setWorkersNumber(workers); 398 builder.setBandwidth(bandwidth); 399 return builder.build(); 400 } 401 402 @Override 403 public int hashCode() { 404 int hash = 33 * type.hashCode() + backupId != null ? backupId.hashCode() : 0; 405 if (backupRootDir != null) { 406 hash = 33 * hash + backupRootDir.hashCode(); 407 } 408 hash = 33 * hash + state.hashCode(); 409 hash = 33 * hash + phase.hashCode(); 410 hash = 33 * hash + (int) (startTs ^ (startTs >>> 32)); 411 hash = 33 * hash + (int) (completeTs ^ (completeTs >>> 32)); 412 hash = 33 * hash + (int) (totalBytesCopied ^ (totalBytesCopied >>> 32)); 413 if (hlogTargetDir != null) { 414 hash = 33 * hash + hlogTargetDir.hashCode(); 415 } 416 return hash; 417 } 418 419 @Override 420 public boolean equals(Object obj) { 421 if (obj instanceof BackupInfo) { 422 BackupInfo other = (BackupInfo) obj; 423 try { 424 return Bytes.equals(toByteArray(), other.toByteArray()); 425 } catch (IOException e) { 426 LOG.error(e.toString(), e); 427 return false; 428 } 429 } else { 430 return false; 431 } 432 } 433 434 @Override 435 public String toString() { 436 return backupId; 437 } 438 439 public byte[] toByteArray() throws IOException { 440 return toProtosBackupInfo().toByteArray(); 441 } 442 443 private void setBackupTableInfoMap(Builder builder) { 444 for (Entry<TableName, BackupTableInfo> entry : backupTableInfoMap.entrySet()) { 445 builder.addBackupTableInfo(entry.getValue().toProto()); 446 } 447 } 448 449 public static BackupInfo fromByteArray(byte[] data) throws IOException { 450 return fromProto(BackupProtos.BackupInfo.parseFrom(data)); 451 } 452 453 public static BackupInfo fromStream(final InputStream stream) throws IOException { 454 return fromProto(BackupProtos.BackupInfo.parseDelimitedFrom(stream)); 455 } 456 457 public static BackupInfo fromProto(BackupProtos.BackupInfo proto) { 458 BackupInfo context = new BackupInfo(); 459 context.setBackupId(proto.getBackupId()); 460 context.setBackupTableInfoMap(toMap(proto.getBackupTableInfoList())); 461 context.setCompleteTs(proto.getCompleteTs()); 462 if (proto.hasFailedMessage()) { 463 context.setFailedMsg(proto.getFailedMessage()); 464 } 465 if (proto.hasBackupState()) { 466 context.setState(BackupInfo.BackupState.valueOf(proto.getBackupState().name())); 467 } 468 469 context.setHLogTargetDir(BackupUtils.getLogBackupDir(proto.getBackupRootDir(), 470 proto.getBackupId())); 471 472 if (proto.hasBackupPhase()) { 473 context.setPhase(BackupPhase.valueOf(proto.getBackupPhase().name())); 474 } 475 if (proto.hasProgress()) { 476 context.setProgress(proto.getProgress()); 477 } 478 context.setStartTs(proto.getStartTs()); 479 context.setBackupRootDir(proto.getBackupRootDir()); 480 context.setType(BackupType.valueOf(proto.getBackupType().name())); 481 context.setWorkers(proto.getWorkersNumber()); 482 context.setBandwidth(proto.getBandwidth()); 483 return context; 484 } 485 486 private static Map<TableName, BackupTableInfo> toMap(List<BackupProtos.BackupTableInfo> list) { 487 HashMap<TableName, BackupTableInfo> map = new HashMap<>(); 488 for (BackupProtos.BackupTableInfo tbs : list) { 489 map.put(ProtobufUtil.toTableName(tbs.getTableName()), BackupTableInfo.convert(tbs)); 490 } 491 return map; 492 } 493 494 public String getShortDescription() { 495 StringBuilder sb = new StringBuilder(); 496 sb.append("{"); 497 sb.append("ID=" + backupId).append(","); 498 sb.append("Type=" + getType()).append(","); 499 sb.append("Tables=" + getTableListAsString()).append(","); 500 sb.append("State=" + getState()).append(","); 501 Date date = null; 502 Calendar cal = Calendar.getInstance(); 503 cal.setTimeInMillis(getStartTs()); 504 date = cal.getTime(); 505 sb.append("Start time=" + date).append(","); 506 if (state == BackupState.FAILED) { 507 sb.append("Failed message=" + getFailedMsg()).append(","); 508 } else if (state == BackupState.RUNNING) { 509 sb.append("Phase=" + getPhase()).append(","); 510 } else if (state == BackupState.COMPLETE) { 511 cal = Calendar.getInstance(); 512 cal.setTimeInMillis(getCompleteTs()); 513 date = cal.getTime(); 514 sb.append("End time=" + date).append(","); 515 } 516 sb.append("Progress=" + getProgress() + "%"); 517 sb.append("}"); 518 519 return sb.toString(); 520 } 521 522 public String getStatusAndProgressAsString() { 523 StringBuilder sb = new StringBuilder(); 524 sb.append("id: ").append(getBackupId()).append(" state: ").append(getState()) 525 .append(" progress: ").append(getProgress()); 526 return sb.toString(); 527 } 528 529 public String getTableListAsString() { 530 StringBuffer sb = new StringBuffer(); 531 sb.append("{"); 532 sb.append(StringUtils.join(backupTableInfoMap.keySet(), ",")); 533 sb.append("}"); 534 return sb.toString(); 535 } 536 537 /** 538 * We use only time stamps to compare objects during sort operation 539 */ 540 @Override 541 public int compareTo(BackupInfo o) { 542 Long thisTS = 543 Long.valueOf(this.getBackupId().substring(this.getBackupId().lastIndexOf("_") + 1)); 544 Long otherTS = Long.valueOf(o.getBackupId().substring(o.getBackupId().lastIndexOf("_") + 1)); 545 return thisTS.compareTo(otherTS); 546 } 547}