001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.backup.impl; 019 020import java.io.IOException; 021import java.util.ArrayList; 022import java.util.Collections; 023import java.util.HashMap; 024import java.util.HashSet; 025import java.util.List; 026import java.util.Map; 027import java.util.Set; 028import org.apache.commons.lang3.StringUtils; 029import org.apache.hadoop.conf.Configuration; 030import org.apache.hadoop.fs.FileSystem; 031import org.apache.hadoop.fs.Path; 032import org.apache.hadoop.hbase.TableName; 033import org.apache.hadoop.hbase.backup.BackupAdmin; 034import org.apache.hadoop.hbase.backup.BackupClientFactory; 035import org.apache.hadoop.hbase.backup.BackupInfo; 036import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; 037import org.apache.hadoop.hbase.backup.BackupMergeJob; 038import org.apache.hadoop.hbase.backup.BackupRequest; 039import org.apache.hadoop.hbase.backup.BackupRestoreConstants; 040import org.apache.hadoop.hbase.backup.BackupRestoreFactory; 041import org.apache.hadoop.hbase.backup.BackupType; 042import org.apache.hadoop.hbase.backup.HBackupFileSystem; 043import org.apache.hadoop.hbase.backup.RestoreRequest; 044import org.apache.hadoop.hbase.backup.util.BackupSet; 045import org.apache.hadoop.hbase.backup.util.BackupUtils; 046import org.apache.hadoop.hbase.client.Admin; 047import org.apache.hadoop.hbase.client.Connection; 048import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 049import org.apache.yetus.audience.InterfaceAudience; 050import org.slf4j.Logger; 051import org.slf4j.LoggerFactory; 052 053import org.apache.hbase.thirdparty.com.google.common.collect.Lists; 054 055@InterfaceAudience.Private 056public class BackupAdminImpl implements BackupAdmin { 057 public final static String CHECK_OK = "Checking backup images: OK"; 058 public final static String CHECK_FAILED = 059 "Checking backup images: Failed. Some dependencies are missing for restore"; 060 private static final Logger LOG = LoggerFactory.getLogger(BackupAdminImpl.class); 061 062 private final Connection conn; 063 064 public BackupAdminImpl(Connection conn) { 065 this.conn = conn; 066 } 067 068 @Override 069 public void close() { 070 } 071 072 @Override 073 public BackupInfo getBackupInfo(String backupId) throws IOException { 074 BackupInfo backupInfo; 075 try (final BackupSystemTable table = new BackupSystemTable(conn)) { 076 if (backupId == null) { 077 ArrayList<BackupInfo> recentSessions = table.getBackupInfos(BackupState.RUNNING); 078 if (recentSessions.isEmpty()) { 079 LOG.warn("No ongoing sessions found."); 080 return null; 081 } 082 // else show status for ongoing session 083 // must be one maximum 084 return recentSessions.get(0); 085 } else { 086 backupInfo = table.readBackupInfo(backupId); 087 return backupInfo; 088 } 089 } 090 } 091 092 @Override 093 public int deleteBackups(String[] backupIds) throws IOException { 094 095 int totalDeleted = 0; 096 Map<String, HashSet<TableName>> allTablesMap = new HashMap<>(); 097 098 boolean deleteSessionStarted; 099 boolean snapshotDone; 100 try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) { 101 // Step 1: Make sure there is no active session 102 // is running by using startBackupSession API 103 // If there is an active session in progress, exception will be thrown 104 try { 105 sysTable.startBackupExclusiveOperation(); 106 deleteSessionStarted = true; 107 } catch (IOException e) { 108 LOG.warn("You can not run delete command while active backup session is in progress. \n" 109 + "If there is no active backup session running, run backup repair utility to " 110 + "restore \nbackup system integrity."); 111 return -1; 112 } 113 114 // Step 2: Make sure there is no failed session 115 List<BackupInfo> list = sysTable.getBackupInfos(BackupState.RUNNING); 116 if (list.size() != 0) { 117 // ailed sessions found 118 LOG.warn("Failed backup session found. Run backup repair tool first."); 119 return -1; 120 } 121 122 // Step 3: Record delete session 123 sysTable.startDeleteOperation(backupIds); 124 // Step 4: Snapshot backup system table 125 if (!BackupSystemTable.snapshotExists(conn)) { 126 BackupSystemTable.snapshot(conn); 127 } else { 128 LOG.warn("Backup system table snapshot exists"); 129 } 130 snapshotDone = true; 131 try { 132 for (int i = 0; i < backupIds.length; i++) { 133 BackupInfo info = sysTable.readBackupInfo(backupIds[i]); 134 if (info != null) { 135 String rootDir = info.getBackupRootDir(); 136 HashSet<TableName> allTables = allTablesMap.get(rootDir); 137 if (allTables == null) { 138 allTables = new HashSet<>(); 139 allTablesMap.put(rootDir, allTables); 140 } 141 allTables.addAll(info.getTableNames()); 142 totalDeleted += deleteBackup(backupIds[i], sysTable); 143 } 144 } 145 finalizeDelete(allTablesMap, sysTable); 146 // Finish 147 sysTable.finishDeleteOperation(); 148 // delete snapshot 149 BackupSystemTable.deleteSnapshot(conn); 150 } catch (IOException e) { 151 // Fail delete operation 152 // Step 1 153 if (snapshotDone) { 154 if (BackupSystemTable.snapshotExists(conn)) { 155 BackupSystemTable.restoreFromSnapshot(conn); 156 // delete snapshot 157 BackupSystemTable.deleteSnapshot(conn); 158 // We still have record with unfinished delete operation 159 LOG.error("Delete operation failed, please run backup repair utility to restore " 160 + "backup system integrity", e); 161 throw e; 162 } else { 163 LOG.warn("Delete operation succeeded, there were some errors: ", e); 164 } 165 } 166 167 } finally { 168 if (deleteSessionStarted) { 169 sysTable.finishBackupExclusiveOperation(); 170 } 171 } 172 } 173 return totalDeleted; 174 } 175 176 /** 177 * Updates incremental backup set for every backupRoot 178 * @param tablesMap map [backupRoot: {@code Set<TableName>}] 179 * @param table backup system table 180 * @throws IOException if a table operation fails 181 */ 182 private void finalizeDelete(Map<String, HashSet<TableName>> tablesMap, BackupSystemTable table) 183 throws IOException { 184 for (String backupRoot : tablesMap.keySet()) { 185 Set<TableName> incrTableSet = table.getIncrementalBackupTableSet(backupRoot); 186 Map<TableName, ArrayList<BackupInfo>> tableMap = 187 table.getBackupHistoryForTableSet(incrTableSet, backupRoot); 188 for (Map.Entry<TableName, ArrayList<BackupInfo>> entry : tableMap.entrySet()) { 189 if (entry.getValue() == null) { 190 // No more backups for a table 191 incrTableSet.remove(entry.getKey()); 192 } 193 } 194 if (!incrTableSet.isEmpty()) { 195 table.addIncrementalBackupTableSet(incrTableSet, backupRoot); 196 } else { // empty 197 table.deleteIncrementalBackupTableSet(backupRoot); 198 } 199 } 200 } 201 202 /** 203 * Delete single backup and all related backups <br> 204 * Algorithm:<br> 205 * Backup type: FULL or INCREMENTAL <br> 206 * Is this last backup session for table T: YES or NO <br> 207 * For every table T from table list 'tables':<br> 208 * if(FULL, YES) deletes only physical data (PD) <br> 209 * if(FULL, NO), deletes PD, scans all newer backups and removes T from backupInfo,<br> 210 * until we either reach the most recent backup for T in the system or FULL backup<br> 211 * which includes T<br> 212 * if(INCREMENTAL, YES) deletes only physical data (PD) if(INCREMENTAL, NO) deletes physical data 213 * and for table T scans all backup images between last<br> 214 * FULL backup, which is older than the backup being deleted and the next FULL backup (if exists) 215 * <br> 216 * or last one for a particular table T and removes T from list of backup tables. 217 * @param backupId backup id 218 * @param sysTable backup system table 219 * @return total number of deleted backup images 220 * @throws IOException if deleting the backup fails 221 */ 222 private int deleteBackup(String backupId, BackupSystemTable sysTable) throws IOException { 223 BackupInfo backupInfo = sysTable.readBackupInfo(backupId); 224 225 int totalDeleted = 0; 226 if (backupInfo != null) { 227 LOG.info("Deleting backup " + backupInfo.getBackupId() + " ..."); 228 // Step 1: clean up data for backup session (idempotent) 229 BackupUtils.cleanupBackupData(backupInfo, conn.getConfiguration()); 230 // List of tables in this backup; 231 List<TableName> tables = backupInfo.getTableNames(); 232 long startTime = backupInfo.getStartTs(); 233 for (TableName tn : tables) { 234 boolean isLastBackupSession = isLastBackupSession(sysTable, tn, startTime); 235 if (isLastBackupSession) { 236 continue; 237 } 238 // else 239 List<BackupInfo> affectedBackups = getAffectedBackupSessions(backupInfo, tn, sysTable); 240 for (BackupInfo info : affectedBackups) { 241 if (info.equals(backupInfo)) { 242 continue; 243 } 244 removeTableFromBackupImage(info, tn, sysTable); 245 } 246 } 247 Map<byte[], String> map = sysTable.readBulkLoadedFiles(backupId); 248 FileSystem fs = FileSystem.get(conn.getConfiguration()); 249 boolean success = true; 250 int numDeleted = 0; 251 for (String f : map.values()) { 252 Path p = new Path(f); 253 try { 254 LOG.debug("Delete backup info " + p + " for " + backupInfo.getBackupId()); 255 if (!fs.delete(p)) { 256 if (fs.exists(p)) { 257 LOG.warn(f + " was not deleted"); 258 success = false; 259 } 260 } else { 261 numDeleted++; 262 } 263 } catch (IOException ioe) { 264 LOG.warn(f + " was not deleted", ioe); 265 success = false; 266 } 267 } 268 if (LOG.isDebugEnabled()) { 269 LOG.debug(numDeleted + " bulk loaded files out of " + map.size() + " were deleted"); 270 } 271 if (success) { 272 sysTable.deleteBulkLoadedRows(new ArrayList<>(map.keySet())); 273 } 274 275 sysTable.deleteBackupInfo(backupInfo.getBackupId()); 276 LOG.info("Delete backup " + backupInfo.getBackupId() + " completed."); 277 totalDeleted++; 278 } else { 279 LOG.warn("Delete backup failed: no information found for backupID=" + backupId); 280 } 281 return totalDeleted; 282 } 283 284 private void removeTableFromBackupImage(BackupInfo info, TableName tn, BackupSystemTable sysTable) 285 throws IOException { 286 List<TableName> tables = info.getTableNames(); 287 LOG.debug( 288 "Remove " + tn + " from " + info.getBackupId() + " tables=" + info.getTableListAsString()); 289 if (tables.contains(tn)) { 290 tables.remove(tn); 291 292 if (tables.isEmpty()) { 293 LOG.debug("Delete backup info " + info.getBackupId()); 294 295 sysTable.deleteBackupInfo(info.getBackupId()); 296 // Idempotent operation 297 BackupUtils.cleanupBackupData(info, conn.getConfiguration()); 298 } else { 299 info.setTables(tables); 300 sysTable.updateBackupInfo(info); 301 // Now, clean up directory for table (idempotent) 302 cleanupBackupDir(info, tn, conn.getConfiguration()); 303 } 304 } 305 } 306 307 private List<BackupInfo> getAffectedBackupSessions(BackupInfo backupInfo, TableName tn, 308 BackupSystemTable table) throws IOException { 309 LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn); 310 long ts = backupInfo.getStartTs(); 311 List<BackupInfo> list = new ArrayList<>(); 312 List<BackupInfo> history = table.getBackupHistory(backupInfo.getBackupRootDir()); 313 // Scan from most recent to backupInfo 314 // break when backupInfo reached 315 for (BackupInfo info : history) { 316 if (info.getStartTs() == ts) { 317 break; 318 } 319 List<TableName> tables = info.getTableNames(); 320 if (tables.contains(tn)) { 321 BackupType bt = info.getType(); 322 if (bt == BackupType.FULL) { 323 // Clear list if we encounter FULL backup 324 list.clear(); 325 } else { 326 LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn 327 + " added " + info.getBackupId() + " tables=" + info.getTableListAsString()); 328 list.add(info); 329 } 330 } 331 } 332 return list; 333 } 334 335 /** 336 * Clean up the data at target directory 337 * @throws IOException if cleaning up the backup directory fails 338 */ 339 private void cleanupBackupDir(BackupInfo backupInfo, TableName table, Configuration conf) 340 throws IOException { 341 try { 342 // clean up the data at target directory 343 String targetDir = backupInfo.getBackupRootDir(); 344 if (targetDir == null) { 345 LOG.warn("No target directory specified for " + backupInfo.getBackupId()); 346 return; 347 } 348 349 FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf); 350 351 Path targetDirPath = new Path(BackupUtils.getTableBackupDir(backupInfo.getBackupRootDir(), 352 backupInfo.getBackupId(), table)); 353 if (outputFs.delete(targetDirPath, true)) { 354 LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done."); 355 } else { 356 LOG.info("No data has been found in " + targetDirPath.toString() + "."); 357 } 358 } catch (IOException e1) { 359 LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " for table " + table 360 + "at " + backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + "."); 361 throw e1; 362 } 363 } 364 365 private boolean isLastBackupSession(BackupSystemTable table, TableName tn, long startTime) 366 throws IOException { 367 List<BackupInfo> history = table.getBackupHistory(); 368 for (BackupInfo info : history) { 369 List<TableName> tables = info.getTableNames(); 370 if (!tables.contains(tn)) { 371 continue; 372 } 373 return info.getStartTs() <= startTime; 374 } 375 return false; 376 } 377 378 @Override 379 public List<BackupInfo> getHistory(int n) throws IOException { 380 try (final BackupSystemTable table = new BackupSystemTable(conn)) { 381 List<BackupInfo> history = table.getBackupHistory(); 382 383 if (history.size() <= n) { 384 return history; 385 } 386 387 List<BackupInfo> list = new ArrayList<>(); 388 for (int i = 0; i < n; i++) { 389 list.add(history.get(i)); 390 } 391 return list; 392 } 393 } 394 395 @Override 396 public List<BackupInfo> getHistory(int n, BackupInfo.Filter... filters) throws IOException { 397 if (filters.length == 0) { 398 return getHistory(n); 399 } 400 401 try (final BackupSystemTable table = new BackupSystemTable(conn)) { 402 List<BackupInfo> history = table.getBackupHistory(); 403 List<BackupInfo> result = new ArrayList<>(); 404 for (BackupInfo bi : history) { 405 if (result.size() == n) { 406 break; 407 } 408 409 boolean passed = true; 410 for (int i = 0; i < filters.length; i++) { 411 if (!filters[i].apply(bi)) { 412 passed = false; 413 break; 414 } 415 } 416 if (passed) { 417 result.add(bi); 418 } 419 } 420 return result; 421 } 422 } 423 424 @Override 425 public List<BackupSet> listBackupSets() throws IOException { 426 try (final BackupSystemTable table = new BackupSystemTable(conn)) { 427 List<String> list = table.listBackupSets(); 428 List<BackupSet> bslist = new ArrayList<>(); 429 for (String s : list) { 430 List<TableName> tables = table.describeBackupSet(s); 431 if (tables != null) { 432 bslist.add(new BackupSet(s, tables)); 433 } 434 } 435 return bslist; 436 } 437 } 438 439 @Override 440 public BackupSet getBackupSet(String name) throws IOException { 441 try (final BackupSystemTable table = new BackupSystemTable(conn)) { 442 List<TableName> list = table.describeBackupSet(name); 443 444 if (list == null) { 445 return null; 446 } 447 448 return new BackupSet(name, list); 449 } 450 } 451 452 @Override 453 public boolean deleteBackupSet(String name) throws IOException { 454 try (final BackupSystemTable table = new BackupSystemTable(conn)) { 455 if (table.describeBackupSet(name) == null) { 456 return false; 457 } 458 table.deleteBackupSet(name); 459 return true; 460 } 461 } 462 463 @Override 464 public void addToBackupSet(String name, TableName[] tables) throws IOException { 465 String[] tableNames = new String[tables.length]; 466 try (final BackupSystemTable table = new BackupSystemTable(conn); 467 final Admin admin = conn.getAdmin()) { 468 for (int i = 0; i < tables.length; i++) { 469 tableNames[i] = tables[i].getNameAsString(); 470 if (!admin.tableExists(TableName.valueOf(tableNames[i]))) { 471 throw new IOException("Cannot add " + tableNames[i] + " because it doesn't exist"); 472 } 473 } 474 table.addToBackupSet(name, tableNames); 475 LOG.info( 476 "Added tables [" + StringUtils.join(tableNames, " ") + "] to '" + name + "' backup set"); 477 } 478 } 479 480 @Override 481 public void removeFromBackupSet(String name, TableName[] tables) throws IOException { 482 LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "'"); 483 try (final BackupSystemTable table = new BackupSystemTable(conn)) { 484 table.removeFromBackupSet(name, toStringArray(tables)); 485 LOG.info( 486 "Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "' completed."); 487 } 488 } 489 490 private String[] toStringArray(TableName[] list) { 491 String[] arr = new String[list.length]; 492 for (int i = 0; i < list.length; i++) { 493 arr[i] = list[i].toString(); 494 } 495 return arr; 496 } 497 498 @Override 499 public void restore(RestoreRequest request) throws IOException { 500 if (request.isCheck()) { 501 HashMap<TableName, BackupManifest> backupManifestMap = new HashMap<>(); 502 // check and load backup image manifest for the tables 503 Path rootPath = new Path(request.getBackupRootDir()); 504 String backupId = request.getBackupId(); 505 TableName[] sTableArray = request.getFromTables(); 506 HBackupFileSystem.checkImageManifestExist(backupManifestMap, sTableArray, 507 conn.getConfiguration(), rootPath, backupId); 508 509 // Check and validate the backup image and its dependencies 510 if (BackupUtils.validate(backupManifestMap, conn.getConfiguration())) { 511 LOG.info(CHECK_OK); 512 } else { 513 LOG.error(CHECK_FAILED); 514 } 515 return; 516 } 517 // Execute restore request 518 new RestoreTablesClient(conn, request).execute(); 519 } 520 521 @Override 522 public String backupTables(BackupRequest request) throws IOException { 523 BackupType type = request.getBackupType(); 524 String targetRootDir = request.getTargetRootDir(); 525 List<TableName> tableList = request.getTableList(); 526 527 String backupId = BackupRestoreConstants.BACKUPID_PREFIX + EnvironmentEdgeManager.currentTime(); 528 if (type == BackupType.INCREMENTAL) { 529 Set<TableName> incrTableSet; 530 try (BackupSystemTable table = new BackupSystemTable(conn)) { 531 incrTableSet = table.getIncrementalBackupTableSet(targetRootDir); 532 } 533 534 if (incrTableSet.isEmpty()) { 535 String msg = 536 "Incremental backup table set contains no tables. " + "You need to run full backup first " 537 + (tableList != null ? "on " + StringUtils.join(tableList, ",") : ""); 538 539 throw new IOException(msg); 540 } 541 if (tableList != null) { 542 tableList.removeAll(incrTableSet); 543 if (!tableList.isEmpty()) { 544 String extraTables = StringUtils.join(tableList, ","); 545 String msg = "Some tables (" + extraTables + ") haven't gone through full backup. " 546 + "Perform full backup on " + extraTables + " first, " + "then retry the command"; 547 throw new IOException(msg); 548 } 549 } 550 tableList = Lists.newArrayList(incrTableSet); 551 } 552 if (tableList != null && !tableList.isEmpty()) { 553 for (TableName table : tableList) { 554 String targetTableBackupDir = 555 HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table); 556 Path targetTableBackupDirPath = new Path(targetTableBackupDir); 557 FileSystem outputFs = 558 FileSystem.get(targetTableBackupDirPath.toUri(), conn.getConfiguration()); 559 if (outputFs.exists(targetTableBackupDirPath)) { 560 throw new IOException( 561 "Target backup directory " + targetTableBackupDir + " exists already."); 562 } 563 outputFs.mkdirs(targetTableBackupDirPath); 564 } 565 ArrayList<TableName> nonExistingTableList = null; 566 try (Admin admin = conn.getAdmin()) { 567 for (TableName tableName : tableList) { 568 if (!admin.tableExists(tableName)) { 569 if (nonExistingTableList == null) { 570 nonExistingTableList = new ArrayList<>(); 571 } 572 nonExistingTableList.add(tableName); 573 } 574 } 575 } 576 if (nonExistingTableList != null) { 577 if (type == BackupType.INCREMENTAL) { 578 // Update incremental backup set 579 tableList = excludeNonExistingTables(tableList, nonExistingTableList); 580 } else { 581 // Throw exception only in full mode - we try to backup non-existing table 582 throw new IOException( 583 "Non-existing tables found in the table list: " + nonExistingTableList); 584 } 585 } 586 } 587 588 // update table list 589 BackupRequest.Builder builder = new BackupRequest.Builder(); 590 request = builder.withBackupType(request.getBackupType()).withTableList(tableList) 591 .withTargetRootDir(request.getTargetRootDir()).withBackupSetName(request.getBackupSetName()) 592 .withTotalTasks(request.getTotalTasks()).withBandwidthPerTasks((int) request.getBandwidth()) 593 .build(); 594 595 TableBackupClient client; 596 try { 597 client = BackupClientFactory.create(conn, backupId, request); 598 } catch (IOException e) { 599 LOG.error("There is an active session already running"); 600 throw e; 601 } 602 603 client.execute(); 604 605 return backupId; 606 } 607 608 private List<TableName> excludeNonExistingTables(List<TableName> tableList, 609 List<TableName> nonExistingTableList) { 610 for (TableName table : nonExistingTableList) { 611 tableList.remove(table); 612 } 613 return tableList; 614 } 615 616 @Override 617 public void mergeBackups(String[] backupIds) throws IOException { 618 try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) { 619 checkIfValidForMerge(backupIds, sysTable); 620 // TODO run job on remote cluster 621 BackupMergeJob job = BackupRestoreFactory.getBackupMergeJob(conn.getConfiguration()); 622 job.run(backupIds); 623 } 624 } 625 626 /** 627 * Verifies that backup images are valid for merge. 628 * <ul> 629 * <li>All backups MUST be in the same destination 630 * <li>No FULL backups are allowed - only INCREMENTAL 631 * <li>All backups must be in COMPLETE state 632 * <li>No holes in backup list are allowed 633 * </ul> 634 * <p> 635 * @param backupIds list of backup ids 636 * @param table backup system table 637 * @throws IOException if the backup image is not valid for merge 638 */ 639 private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table) 640 throws IOException { 641 String backupRoot = null; 642 643 final Set<TableName> allTables = new HashSet<>(); 644 final Set<String> allBackups = new HashSet<>(); 645 long minTime = Long.MAX_VALUE, maxTime = Long.MIN_VALUE; 646 for (String backupId : backupIds) { 647 BackupInfo bInfo = table.readBackupInfo(backupId); 648 if (bInfo == null) { 649 String msg = "Backup session " + backupId + " not found"; 650 throw new IOException(msg); 651 } 652 if (backupRoot == null) { 653 backupRoot = bInfo.getBackupRootDir(); 654 } else if (!bInfo.getBackupRootDir().equals(backupRoot)) { 655 throw new IOException("Found different backup destinations in a list of a backup sessions " 656 + "\n1. " + backupRoot + "\n" + "2. " + bInfo.getBackupRootDir()); 657 } 658 if (bInfo.getType() == BackupType.FULL) { 659 throw new IOException("FULL backup image can not be merged for: \n" + bInfo); 660 } 661 662 if (bInfo.getState() != BackupState.COMPLETE) { 663 throw new IOException("Backup image " + backupId 664 + " can not be merged becuase of its state: " + bInfo.getState()); 665 } 666 allBackups.add(backupId); 667 allTables.addAll(bInfo.getTableNames()); 668 long time = bInfo.getStartTs(); 669 if (time < minTime) { 670 minTime = time; 671 } 672 if (time > maxTime) { 673 maxTime = time; 674 } 675 } 676 677 final long startRangeTime = minTime; 678 final long endRangeTime = maxTime; 679 final String backupDest = backupRoot; 680 // Check we have no 'holes' in backup id list 681 // Filter 1 : backupRoot 682 // Filter 2 : time range filter 683 // Filter 3 : table filter 684 BackupInfo.Filter destinationFilter = info -> info.getBackupRootDir().equals(backupDest); 685 686 BackupInfo.Filter timeRangeFilter = info -> { 687 long time = info.getStartTs(); 688 return time >= startRangeTime && time <= endRangeTime; 689 }; 690 691 BackupInfo.Filter tableFilter = info -> { 692 List<TableName> tables = info.getTableNames(); 693 return !Collections.disjoint(allTables, tables); 694 }; 695 696 BackupInfo.Filter typeFilter = info -> info.getType() == BackupType.INCREMENTAL; 697 BackupInfo.Filter stateFilter = info -> info.getState() == BackupState.COMPLETE; 698 699 List<BackupInfo> allInfos = table.getBackupHistory(-1, destinationFilter, timeRangeFilter, 700 tableFilter, typeFilter, stateFilter); 701 if (allInfos.size() != allBackups.size()) { 702 // Yes we have at least one hole in backup image sequence 703 List<String> missingIds = new ArrayList<>(); 704 for (BackupInfo info : allInfos) { 705 if (allBackups.contains(info.getBackupId())) { 706 continue; 707 } 708 missingIds.add(info.getBackupId()); 709 } 710 String errMsg = 711 "Sequence of backup ids has 'holes'. The following backup images must be added:" 712 + org.apache.hadoop.util.StringUtils.join(",", missingIds); 713 throw new IOException(errMsg); 714 } 715 } 716}