@InterfaceAudience.Private public final class BackupSystemTable extends Object implements Closeable
Modifier and Type | Class and Description |
---|---|
(package private) static class |
BackupSystemTable.WALItem |
Modifier and Type | Field and Description |
---|---|
private static byte[] |
ACTIVE_SESSION_COL |
private static byte[] |
ACTIVE_SESSION_NO |
private static byte[] |
ACTIVE_SESSION_ROW |
private static byte[] |
ACTIVE_SESSION_YES |
private static String |
BACKUP_INFO_PREFIX |
(package private) static byte[] |
BL_COMMIT |
(package private) static byte[] |
BL_PREPARE |
private static String |
BLK_LD_DELIM |
(package private) static byte[] |
BULK_LOAD_FAMILY |
private static String |
BULK_LOAD_PREFIX |
private static byte[] |
BULK_LOAD_PREFIX_BYTES |
private TableName |
bulkLoadTableName
Backup System table name for bulk loaded files.
|
private Connection |
connection
Connection to HBase cluster, shared among all instances
|
private static byte[] |
DELETE_OP_ROW |
private static byte[] |
EMPTY_VALUE |
(package private) static byte[] |
FAM_COL |
private static String |
INCR_BACKUP_SET |
private static org.slf4j.Logger |
LOG |
private static byte[] |
MERGE_OP_ROW |
(package private) static byte[] |
META_FAMILY
Stores other meta
|
private static String |
NULL |
(package private) static byte[] |
PATH_COL |
private static String |
RS_LOG_TS_PREFIX |
(package private) static byte[] |
SESSIONS_FAMILY
Stores backup sessions (contexts)
|
private static String |
SET_KEY_PREFIX |
private static String |
START_CODE_ROW |
(package private) static byte[] |
STATE_COL |
private static String |
TABLE_RS_LOG_MAP_PREFIX |
private TableName |
tableName
Backup system table (main) name
|
(package private) static byte[] |
TBL_COL |
Constructor and Description |
---|
BackupSystemTable(Connection conn) |
Modifier and Type | Method and Description |
---|---|
void |
addIncrementalBackupTableSet(Set<TableName> tables,
String backupRoot)
Add tables to global incremental backup set
|
void |
addToBackupSet(String name,
String[] newTables)
Add backup set (list of tables)
|
private String |
cellKeyToBackupSetName(Cell current)
Converts cell key to backup set name.
|
private BackupInfo |
cellToBackupInfo(Cell current)
Converts cell to backup info instance.
|
private String[] |
cellValueToBackupSet(Cell current)
Converts cell to backup set list.
|
private void |
checkSystemTable() |
void |
close() |
private byte[] |
convertToByteArray(String[] tables) |
private Delete |
createDeleteForBackupDeleteOperation() |
private Delete |
createDeleteForBackupInfo(String backupId)
Creates Delete operation for a given backup id
|
private Delete |
createDeleteForBackupMergeOperation() |
private Delete |
createDeleteForBackupSet(String name)
Creates Delete operation to delete backup set content
|
private Delete |
createDeleteForIncrBackupTableSet(String backupRoot)
Creates Delete for incremental backup table set
|
static List<Delete> |
createDeleteForOrigBulkLoad(List<TableName> lst) |
private Get |
createGetForBackupInfo(String backupId)
Creates Get operation for a given backup id
|
private Get |
createGetForBackupSet(String name)
Creates Get operation to load backup set content
|
private Get |
createGetForDeleteOperation() |
private Get |
createGetForIncrBackupTableSet(String backupRoot)
Creates Get to retrieve incremental backup table set from backup system table
|
private Get |
createGetForMergeOperation() |
private Get |
createGetForStartCode(String rootPath)
Creates Get operation to retrieve start code from backup system table
|
private Put |
createPutForBackupInfo(BackupInfo context)
Creates Put operation for a given backup info object
|
private Put |
createPutForBackupSet(String name,
String[] tables)
Creates Put operation to update backup set content
|
(package private) static Put |
createPutForBulkLoadedFile(TableName tn,
byte[] fam,
String p,
String backupId,
long ts,
int idx) |
(package private) static List<Put> |
createPutForCommittedBulkload(TableName table,
byte[] region,
Map<byte[],List<org.apache.hadoop.fs.Path>> finalPaths) |
private Put |
createPutForDeleteOperation(String[] backupIdList) |
private Put |
createPutForIncrBackupTableSet(Set<TableName> tables,
String backupRoot)
Creates Put to store incremental backup table set
|
private Put |
createPutForMergeOperation(String[] backupIdList) |
(package private) static List<Put> |
createPutForPreparedBulkload(TableName table,
byte[] region,
byte[] family,
List<Pair<org.apache.hadoop.fs.Path,org.apache.hadoop.fs.Path>> pairs) |
private Put |
createPutForRegionServerLastLogRollResult(String server,
Long timestamp,
String backupRoot)
Creates Put to store RS last log result
|
private Put |
createPutForStartBackupSession() |
private Put |
createPutForStartCode(String startCode,
String rootPath)
Creates Put operation to store start code to backup system table
|
private Put |
createPutForStopBackupSession() |
private Put |
createPutForUpdateTablesForMerge(List<TableName> tables) |
private Put |
createPutForWriteRegionServerLogTimestamp(TableName table,
byte[] smap,
String backupRoot)
Creates Put to write RS last roll log timestamp map
|
private Scan |
createScanForBackupHistory()
Creates Scan operation to load backup history
|
private Scan |
createScanForBackupSetList()
Creates Scan operation to load backup set list
|
(package private) static Scan |
createScanForBulkLoadedFiles(String backupId) |
(package private) static Scan |
createScanForOrigBulkLoadedFiles(TableName table) |
private Scan |
createScanForReadLogTimestampMap(String backupRoot)
Creates Scan to load table-> { RS -> ts} map of maps
|
private Scan |
createScanForReadRegionServerLastLogRollResult(String backupRoot)
Creates Scan operation to load last RS log roll results
|
private void |
createSystemTable(Admin admin,
TableDescriptor descriptor) |
void |
deleteBackupInfo(String backupId)
Deletes backup status from backup system table table
|
void |
deleteBackupSet(String name)
Delete backup set
|
void |
deleteBulkLoadedRows(List<byte[]> rows) |
void |
deleteIncrementalBackupTableSet(String backupRoot)
Deletes incremental backup set for a backup destination
|
static void |
deleteSnapshot(Connection conn) |
List<TableName> |
describeBackupSet(String name)
Get backup set description (list of tables)
|
private String[] |
disjoin(String[] existingTables,
String[] toRemove) |
void |
finishBackupExclusiveOperation() |
void |
finishDeleteOperation() |
void |
finishMergeOperation() |
private HashMap<String,Long> |
fromTableServerTimestampProto(org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos.TableServerTimestamp proto) |
List<BackupInfo> |
getBackupHistory()
Get all backups history
|
ArrayList<BackupInfo> |
getBackupHistory(boolean onlyCompleted)
Get all completed backup information (in desc order by time)
|
List<BackupInfo> |
getBackupHistory(int n,
BackupInfo.Filter... filters)
Get backup history records filtered by list of filters.
|
List<BackupInfo> |
getBackupHistory(String backupRoot)
Get history for backup destination
|
List<BackupInfo> |
getBackupHistoryForTable(TableName name)
Get history for a table
|
Map<TableName,ArrayList<BackupInfo>> |
getBackupHistoryForTableSet(Set<TableName> set,
String backupRoot) |
ArrayList<BackupInfo> |
getBackupInfos(BackupInfo.BackupState state)
Get all backup sessions with a given state (in descending order by time)
|
List<BackupInfo> |
getHistory(int n)
Get first n backup history records
|
Set<TableName> |
getIncrementalBackupTableSet(String backupRoot)
Return the current tables covered by incremental backup.
|
String[] |
getListOfBackupIdsFromDeleteOperation() |
String[] |
getListOfBackupIdsFromMergeOperation() |
(package private) static String |
getRegionNameFromOrigBulkLoadRow(String rowStr) |
private String |
getServerNameForReadRegionServerLastLogRollResult(byte[] row)
Get server's name from rowkey
|
static String |
getSnapshotName(org.apache.hadoop.conf.Configuration conf) |
static TableDescriptor |
getSystemTableDescriptor(org.apache.hadoop.conf.Configuration conf)
Get backup system table descriptor
|
static TableDescriptor |
getSystemTableForBulkLoadedDataDescriptor(org.apache.hadoop.conf.Configuration conf)
Get backup system table descriptor
|
static TableName |
getTableName(org.apache.hadoop.conf.Configuration conf) |
static String |
getTableNameAsString(org.apache.hadoop.conf.Configuration conf) |
static TableName |
getTableNameForBulkLoadedData(org.apache.hadoop.conf.Configuration conf) |
private String |
getTableNameForReadLogTimestampMap(byte[] cloneRow)
Get table name from rowkey
|
(package private) static String |
getTableNameFromOrigBulkLoadRow(String rowStr) |
List<TableName> |
getTablesForBackupType(BackupType type) |
boolean |
hasBackupSessions()
Checks if we have at least one backup session in backup system table This API is used by
BackupLogCleaner
|
boolean |
isMergeInProgress() |
List<String> |
listBackupSets()
Get backup set list
|
private String[] |
merge(String[] existingTables,
String[] newTables) |
BackupInfo |
readBackupInfo(String backupId)
Reads backup status object (instance of backup info) from backup system table table
|
String |
readBackupStartCode(String backupRoot)
Read the last backup start code (timestamp) of last successful backup.
|
(package private) Map<byte[],String> |
readBulkLoadedFiles(String backupId) |
Map<byte[],List<org.apache.hadoop.fs.Path>>[] |
readBulkLoadedFiles(String backupId,
List<TableName> sTableList) |
Pair<Map<TableName,Map<String,Map<String,List<Pair<String,Boolean>>>>>,List<byte[]>> |
readBulkloadRows(List<TableName> tableList) |
Map<TableName,Map<String,Long>> |
readLogTimestampMap(String backupRoot)
Read the timestamp for each region server log after the last successful backup.
|
HashMap<String,Long> |
readRegionServerLastLogRollResult(String backupRoot)
Get the Region Servers log information after the last log roll from backup system table.
|
void |
removeFromBackupSet(String name,
String[] toRemove)
Remove tables from backup set (list of tables)
|
static void |
restoreFromSnapshot(Connection conn) |
private BackupInfo |
resultToBackupInfo(Result res)
Converts Result to BackupInfo
|
private static byte[] |
rowkey(String s,
String... other) |
static void |
snapshot(Connection conn) |
private static boolean |
snapshotExists(Admin admin,
String snapshotName) |
static boolean |
snapshotExists(Connection conn) |
void |
startBackupExclusiveOperation()
Exclusive operations are: create, delete, merge
|
void |
startDeleteOperation(String[] backupIdList) |
void |
startMergeOperation(String[] backupIdList) |
private org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos.TableServerTimestamp |
toTableServerTimestampProto(TableName table,
Map<String,Long> map) |
void |
updateBackupInfo(BackupInfo info)
Updates status (state) of a backup session in backup system table table
|
void |
updateProcessedTablesForMerge(List<TableName> tables) |
private void |
verifyNamespaceExists(Admin admin) |
private void |
waitForSystemTable(Admin admin,
TableName tableName) |
void |
writeBackupStartCode(Long startCode,
String backupRoot)
Write the start code (timestamp) to backup system table.
|
void |
writeBulkLoadedFiles(List<TableName> sTableList,
Map<byte[],List<org.apache.hadoop.fs.Path>>[] maps,
String backupId) |
void |
writeFilesForBulkLoadPreCommit(TableName tabName,
byte[] region,
byte[] family,
List<Pair<org.apache.hadoop.fs.Path,org.apache.hadoop.fs.Path>> pairs) |
void |
writePathsPostBulkLoad(TableName tabName,
byte[] region,
Map<byte[],List<org.apache.hadoop.fs.Path>> finalPaths) |
void |
writeRegionServerLastLogRollResult(String server,
Long ts,
String backupRoot)
Writes Region Server last roll log result (timestamp) to backup system table table
|
void |
writeRegionServerLogTimestamp(Set<TableName> tables,
Map<String,Long> newTimestamps,
String backupRoot)
Write the current timestamps for each regionserver to backup system table after a successful
full or incremental backup.
|
private static final org.slf4j.Logger LOG
private TableName bulkLoadTableName
BackupObserver
static final byte[] SESSIONS_FAMILY
static final byte[] META_FAMILY
static final byte[] BULK_LOAD_FAMILY
private final Connection connection
private static final String BACKUP_INFO_PREFIX
private static final String START_CODE_ROW
private static final byte[] ACTIVE_SESSION_ROW
private static final byte[] ACTIVE_SESSION_COL
private static final byte[] ACTIVE_SESSION_YES
private static final byte[] ACTIVE_SESSION_NO
private static final String INCR_BACKUP_SET
private static final String TABLE_RS_LOG_MAP_PREFIX
private static final String RS_LOG_TS_PREFIX
private static final String BULK_LOAD_PREFIX
private static final byte[] BULK_LOAD_PREFIX_BYTES
private static final byte[] DELETE_OP_ROW
private static final byte[] MERGE_OP_ROW
static final byte[] TBL_COL
static final byte[] FAM_COL
static final byte[] PATH_COL
static final byte[] STATE_COL
static final byte[] BL_PREPARE
static final byte[] BL_COMMIT
private static final String SET_KEY_PREFIX
private static final String BLK_LD_DELIM
private static final byte[] EMPTY_VALUE
private static final String NULL
public BackupSystemTable(Connection conn) throws IOException
IOException
private void checkSystemTable() throws IOException
IOException
private void createSystemTable(Admin admin, TableDescriptor descriptor) throws IOException
IOException
private void verifyNamespaceExists(Admin admin) throws IOException
IOException
private void waitForSystemTable(Admin admin, TableName tableName) throws IOException
IOException
public void close()
close
in interface Closeable
close
in interface AutoCloseable
public void updateBackupInfo(BackupInfo info) throws IOException
info
- backup infoIOException
- exceptionMap<byte[],String> readBulkLoadedFiles(String backupId) throws IOException
IOException
public Map<byte[],List<org.apache.hadoop.fs.Path>>[] readBulkLoadedFiles(String backupId, List<TableName> sTableList) throws IOException
IOException
public void deleteBackupInfo(String backupId) throws IOException
backupId
- backup idIOException
- exceptionpublic void writePathsPostBulkLoad(TableName tabName, byte[] region, Map<byte[],List<org.apache.hadoop.fs.Path>> finalPaths) throws IOException
IOException
public void writeFilesForBulkLoadPreCommit(TableName tabName, byte[] region, byte[] family, List<Pair<org.apache.hadoop.fs.Path,org.apache.hadoop.fs.Path>> pairs) throws IOException
IOException
public void deleteBulkLoadedRows(List<byte[]> rows) throws IOException
IOException
public Pair<Map<TableName,Map<String,Map<String,List<Pair<String,Boolean>>>>>,List<byte[]>> readBulkloadRows(List<TableName> tableList) throws IOException
IOException
public void writeBulkLoadedFiles(List<TableName> sTableList, Map<byte[],List<org.apache.hadoop.fs.Path>>[] maps, String backupId) throws IOException
IOException
public BackupInfo readBackupInfo(String backupId) throws IOException
backupId
- backup idIOException
public String readBackupStartCode(String backupRoot) throws IOException
backupRoot
- directory path to backup destinationIOException
- exceptionpublic void writeBackupStartCode(Long startCode, String backupRoot) throws IOException
startCode
- start codebackupRoot
- root directory path to backupIOException
- exceptionpublic void startBackupExclusiveOperation() throws IOException
IOException
- if a table operation fails or an active backup exclusive operation is
already underwayprivate Put createPutForStartBackupSession()
public void finishBackupExclusiveOperation() throws IOException
IOException
private Put createPutForStopBackupSession()
public HashMap<String,Long> readRegionServerLastLogRollResult(String backupRoot) throws IOException
backupRoot
- root directory path to backupIOException
- exceptionpublic void writeRegionServerLastLogRollResult(String server, Long ts, String backupRoot) throws IOException
server
- Region Server namets
- last log timestampbackupRoot
- root directory path to backupIOException
- exceptionpublic ArrayList<BackupInfo> getBackupHistory(boolean onlyCompleted) throws IOException
onlyCompleted
- true, if only successfully completed sessionsIOException
- exceptionpublic List<BackupInfo> getBackupHistory() throws IOException
IOException
- if getting the backup history failspublic List<BackupInfo> getHistory(int n) throws IOException
n
- number of records, if n== -1 - max number is ignoredIOException
- if getting the backup history failspublic List<BackupInfo> getBackupHistory(int n, BackupInfo.Filter... filters) throws IOException
n
- max number of records, if n == -1 , then max number is ignoredfilters
- list of filtersIOException
- if getting the backup history failspublic List<TableName> getTablesForBackupType(BackupType type) throws IOException
IOException
public List<BackupInfo> getBackupHistory(String backupRoot) throws IOException
backupRoot
- backup destination pathIOException
- if getting the backup history failspublic List<BackupInfo> getBackupHistoryForTable(TableName name) throws IOException
name
- table nameIOException
- if getting the backup history failspublic Map<TableName,ArrayList<BackupInfo>> getBackupHistoryForTableSet(Set<TableName> set, String backupRoot) throws IOException
IOException
public ArrayList<BackupInfo> getBackupInfos(BackupInfo.BackupState state) throws IOException
state
- backup session stateIOException
- exceptionpublic void writeRegionServerLogTimestamp(Set<TableName> tables, Map<String,Long> newTimestamps, String backupRoot) throws IOException
tables
- tablesnewTimestamps
- timestampsbackupRoot
- root directory path to backupIOException
- exceptionpublic Map<TableName,Map<String,Long>> readLogTimestampMap(String backupRoot) throws IOException
backupRoot
- root directory path to backupIOException
- exceptionprivate org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos.TableServerTimestamp toTableServerTimestampProto(TableName table, Map<String,Long> map)
private HashMap<String,Long> fromTableServerTimestampProto(org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos.TableServerTimestamp proto)
public Set<TableName> getIncrementalBackupTableSet(String backupRoot) throws IOException
backupRoot
- root directory path to backupIOException
- exceptionpublic void addIncrementalBackupTableSet(Set<TableName> tables, String backupRoot) throws IOException
tables
- set of tablesbackupRoot
- root directory path to backupIOException
- exceptionpublic void deleteIncrementalBackupTableSet(String backupRoot) throws IOException
backupRoot
- backup rootIOException
public boolean hasBackupSessions() throws IOException
IOException
- exceptionpublic List<String> listBackupSets() throws IOException
IOException
- if a table or scanner operation failspublic List<TableName> describeBackupSet(String name) throws IOException
name
- set's nameIOException
- if a table operation failspublic void addToBackupSet(String name, String[] newTables) throws IOException
name
- set namenewTables
- list of tables, comma-separatedIOException
- if a table operation failspublic void removeFromBackupSet(String name, String[] toRemove) throws IOException
name
- set nametoRemove
- list of tablesIOException
- if a table operation or deleting the backup set failspublic void deleteBackupSet(String name) throws IOException
name
- set's nameIOException
- if getting or deleting the table failspublic static TableDescriptor getSystemTableDescriptor(org.apache.hadoop.conf.Configuration conf)
public static TableName getTableName(org.apache.hadoop.conf.Configuration conf)
public static String getTableNameAsString(org.apache.hadoop.conf.Configuration conf)
public static String getSnapshotName(org.apache.hadoop.conf.Configuration conf)
public static TableDescriptor getSystemTableForBulkLoadedDataDescriptor(org.apache.hadoop.conf.Configuration conf)
public static TableName getTableNameForBulkLoadedData(org.apache.hadoop.conf.Configuration conf)
private Put createPutForBackupInfo(BackupInfo context) throws IOException
context
- backup infoIOException
- exceptionprivate Get createGetForBackupInfo(String backupId) throws IOException
backupId
- backup's IDIOException
- exceptionprivate Delete createDeleteForBackupInfo(String backupId)
backupId
- backup's IDprivate BackupInfo resultToBackupInfo(Result res) throws IOException
res
- HBase resultIOException
- exceptionprivate Get createGetForStartCode(String rootPath) throws IOException
IOException
- exceptionprivate Put createPutForStartCode(String startCode, String rootPath)
private Get createGetForIncrBackupTableSet(String backupRoot) throws IOException
IOException
- exceptionprivate Put createPutForIncrBackupTableSet(Set<TableName> tables, String backupRoot)
tables
- tablesprivate Delete createDeleteForIncrBackupTableSet(String backupRoot)
backupRoot
- backup rootprivate Scan createScanForBackupHistory()
private BackupInfo cellToBackupInfo(Cell current) throws IOException
current
- current cellIOException
- exceptionprivate Put createPutForWriteRegionServerLogTimestamp(TableName table, byte[] smap, String backupRoot)
table
- tablesmap
- map, containing RS:tsprivate Scan createScanForReadLogTimestampMap(String backupRoot)
private String getTableNameForReadLogTimestampMap(byte[] cloneRow)
cloneRow
- rowkeyprivate Put createPutForRegionServerLastLogRollResult(String server, Long timestamp, String backupRoot)
server
- server nametimestamp
- log roll result (timestamp)private Scan createScanForReadRegionServerLastLogRollResult(String backupRoot)
private String getServerNameForReadRegionServerLastLogRollResult(byte[] row)
row
- rowkeystatic List<Put> createPutForCommittedBulkload(TableName table, byte[] region, Map<byte[],List<org.apache.hadoop.fs.Path>> finalPaths)
public static void snapshot(Connection conn) throws IOException
IOException
public static void restoreFromSnapshot(Connection conn) throws IOException
IOException
private static boolean snapshotExists(Admin admin, String snapshotName) throws IOException
IOException
public static boolean snapshotExists(Connection conn) throws IOException
IOException
public static void deleteSnapshot(Connection conn) throws IOException
IOException
static List<Put> createPutForPreparedBulkload(TableName table, byte[] region, byte[] family, List<Pair<org.apache.hadoop.fs.Path,org.apache.hadoop.fs.Path>> pairs)
public static List<Delete> createDeleteForOrigBulkLoad(List<TableName> lst)
private Put createPutForDeleteOperation(String[] backupIdList)
private Delete createDeleteForBackupDeleteOperation()
private Get createGetForDeleteOperation()
public void startDeleteOperation(String[] backupIdList) throws IOException
IOException
public void finishDeleteOperation() throws IOException
IOException
public String[] getListOfBackupIdsFromDeleteOperation() throws IOException
IOException
private Put createPutForMergeOperation(String[] backupIdList)
public boolean isMergeInProgress() throws IOException
IOException
private Put createPutForUpdateTablesForMerge(List<TableName> tables)
private Delete createDeleteForBackupMergeOperation()
private Get createGetForMergeOperation()
public void startMergeOperation(String[] backupIdList) throws IOException
IOException
public void updateProcessedTablesForMerge(List<TableName> tables) throws IOException
IOException
public void finishMergeOperation() throws IOException
IOException
public String[] getListOfBackupIdsFromMergeOperation() throws IOException
IOException
static Scan createScanForOrigBulkLoadedFiles(TableName table)
static String getTableNameFromOrigBulkLoadRow(String rowStr)
static String getRegionNameFromOrigBulkLoadRow(String rowStr)
static Scan createScanForBulkLoadedFiles(String backupId)
static Put createPutForBulkLoadedFile(TableName tn, byte[] fam, String p, String backupId, long ts, int idx)
private Scan createScanForBackupSetList()
private Get createGetForBackupSet(String name)
private Delete createDeleteForBackupSet(String name)
name
- backup set's nameprivate Put createPutForBackupSet(String name, String[] tables)
name
- backup set's nametables
- list of tablesprivate byte[] convertToByteArray(String[] tables)
private String[] cellValueToBackupSet(Cell current)
current
- current cellprivate String cellKeyToBackupSetName(Cell current)
current
- current cellCopyright © 2007–2020 The Apache Software Foundation. All rights reserved.