001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.backup.impl; 019 020import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CONF_KEY; 021 022import java.io.IOException; 023import java.net.URI; 024import java.net.URISyntaxException; 025import java.util.ArrayList; 026import java.util.HashMap; 027import java.util.List; 028import java.util.Map; 029import java.util.Set; 030import org.apache.commons.io.FilenameUtils; 031import org.apache.commons.lang3.StringUtils; 032import org.apache.hadoop.fs.FileSystem; 033import org.apache.hadoop.fs.LocatedFileStatus; 034import org.apache.hadoop.fs.Path; 035import org.apache.hadoop.fs.RemoteIterator; 036import org.apache.hadoop.hbase.TableName; 037import org.apache.hadoop.hbase.backup.BackupCopyJob; 038import org.apache.hadoop.hbase.backup.BackupInfo; 039import org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase; 040import org.apache.hadoop.hbase.backup.BackupRequest; 041import org.apache.hadoop.hbase.backup.BackupRestoreFactory; 042import org.apache.hadoop.hbase.backup.BackupType; 043import org.apache.hadoop.hbase.backup.HBackupFileSystem; 044import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupCopyJob; 045import org.apache.hadoop.hbase.backup.mapreduce.MapReduceHFileSplitterJob; 046import org.apache.hadoop.hbase.backup.util.BackupUtils; 047import org.apache.hadoop.hbase.client.Admin; 048import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; 049import org.apache.hadoop.hbase.client.Connection; 050import org.apache.hadoop.hbase.io.hfile.HFile; 051import org.apache.hadoop.hbase.mapreduce.WALPlayer; 052import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; 053import org.apache.hadoop.hbase.snapshot.SnapshotManifest; 054import org.apache.hadoop.hbase.snapshot.SnapshotRegionLocator; 055import org.apache.hadoop.hbase.util.CommonFSUtils; 056import org.apache.hadoop.hbase.util.HFileArchiveUtil; 057import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; 058import org.apache.hadoop.util.Tool; 059import org.apache.yetus.audience.InterfaceAudience; 060import org.slf4j.Logger; 061import org.slf4j.LoggerFactory; 062 063import org.apache.hbase.thirdparty.com.google.common.collect.Lists; 064 065import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; 066 067/** 068 * Incremental backup implementation. See the {@link #execute() execute} method. 069 */ 070@InterfaceAudience.Private 071public class IncrementalTableBackupClient extends TableBackupClient { 072 private static final Logger LOG = LoggerFactory.getLogger(IncrementalTableBackupClient.class); 073 074 protected IncrementalTableBackupClient() { 075 } 076 077 public IncrementalTableBackupClient(final Connection conn, final String backupId, 078 BackupRequest request) throws IOException { 079 super(conn, backupId, request); 080 } 081 082 protected List<String> filterMissingFiles(List<String> incrBackupFileList) throws IOException { 083 List<String> list = new ArrayList<>(); 084 for (String file : incrBackupFileList) { 085 Path p = new Path(file); 086 if (fs.exists(p) || isActiveWalPath(p)) { 087 list.add(file); 088 } else { 089 LOG.warn("Can't find file: " + file); 090 } 091 } 092 return list; 093 } 094 095 /** 096 * Check if a given path is belongs to active WAL directory 097 * @param p path 098 * @return true, if yes 099 */ 100 protected boolean isActiveWalPath(Path p) { 101 return !AbstractFSWALProvider.isArchivedLogFile(p); 102 } 103 104 protected static int getIndex(TableName tbl, List<TableName> sTableList) { 105 if (sTableList == null) { 106 return 0; 107 } 108 109 for (int i = 0; i < sTableList.size(); i++) { 110 if (tbl.equals(sTableList.get(i))) { 111 return i; 112 } 113 } 114 return -1; 115 } 116 117 /** 118 * Reads bulk load records from backup table, iterates through the records and forms the paths for 119 * bulk loaded hfiles. Copies the bulk loaded hfiles to backup destination. This method does NOT 120 * clean up the entries in the bulk load system table. Those entries should not be cleaned until 121 * the backup is marked as complete. 122 * @param tablesToBackup list of tables to be backed up 123 */ 124 protected List<BulkLoad> handleBulkLoad(List<TableName> tablesToBackup) throws IOException { 125 List<String> activeFiles = new ArrayList<>(); 126 List<String> archiveFiles = new ArrayList<>(); 127 List<BulkLoad> bulkLoads = backupManager.readBulkloadRows(tablesToBackup); 128 FileSystem tgtFs; 129 try { 130 tgtFs = FileSystem.get(new URI(backupInfo.getBackupRootDir()), conf); 131 } catch (URISyntaxException use) { 132 throw new IOException("Unable to get FileSystem", use); 133 } 134 Path rootdir = CommonFSUtils.getRootDir(conf); 135 Path tgtRoot = new Path(new Path(backupInfo.getBackupRootDir()), backupId); 136 137 for (BulkLoad bulkLoad : bulkLoads) { 138 TableName srcTable = bulkLoad.getTableName(); 139 String regionName = bulkLoad.getRegion(); 140 String fam = bulkLoad.getColumnFamily(); 141 String filename = FilenameUtils.getName(bulkLoad.getHfilePath()); 142 143 if (!tablesToBackup.contains(srcTable)) { 144 LOG.debug("Skipping {} since it is not in tablesToBackup", srcTable); 145 continue; 146 } 147 Path tblDir = CommonFSUtils.getTableDir(rootdir, srcTable); 148 Path p = new Path(tblDir, regionName + Path.SEPARATOR + fam + Path.SEPARATOR + filename); 149 150 String srcTableQualifier = srcTable.getQualifierAsString(); 151 String srcTableNs = srcTable.getNamespaceAsString(); 152 Path tgtFam = new Path(tgtRoot, srcTableNs + Path.SEPARATOR + srcTableQualifier 153 + Path.SEPARATOR + regionName + Path.SEPARATOR + fam); 154 if (!tgtFs.mkdirs(tgtFam)) { 155 throw new IOException("couldn't create " + tgtFam); 156 } 157 Path tgt = new Path(tgtFam, filename); 158 159 Path archiveDir = HFileArchiveUtil.getStoreArchivePath(conf, srcTable, regionName, fam); 160 Path archive = new Path(archiveDir, filename); 161 162 if (fs.exists(p)) { 163 if (LOG.isTraceEnabled()) { 164 LOG.trace("found bulk hfile {} in {} for {}", bulkLoad.getHfilePath(), p.getParent(), 165 srcTableQualifier); 166 LOG.trace("copying {} to {}", p, tgt); 167 } 168 activeFiles.add(p.toString()); 169 } else if (fs.exists(archive)) { 170 LOG.debug("copying archive {} to {}", archive, tgt); 171 archiveFiles.add(archive.toString()); 172 } 173 mergeSplitAndCopyBulkloadedHFiles(activeFiles, archiveFiles, srcTable, tgtFs); 174 } 175 return bulkLoads; 176 } 177 178 private void mergeSplitAndCopyBulkloadedHFiles(List<String> activeFiles, 179 List<String> archiveFiles, TableName tn, FileSystem tgtFs) throws IOException { 180 int attempt = 1; 181 182 while (!activeFiles.isEmpty()) { 183 LOG.info("MergeSplit {} active bulk loaded files. Attempt={}", activeFiles.size(), attempt++); 184 // Active file can be archived during copy operation, 185 // we need to handle this properly 186 try { 187 mergeSplitAndCopyBulkloadedHFiles(activeFiles, tn, tgtFs); 188 break; 189 } catch (IOException e) { 190 int numActiveFiles = activeFiles.size(); 191 updateFileLists(activeFiles, archiveFiles); 192 if (activeFiles.size() < numActiveFiles) { 193 continue; 194 } 195 196 throw e; 197 } 198 } 199 200 if (!archiveFiles.isEmpty()) { 201 mergeSplitAndCopyBulkloadedHFiles(archiveFiles, tn, tgtFs); 202 } 203 } 204 205 private void mergeSplitAndCopyBulkloadedHFiles(List<String> files, TableName tn, FileSystem tgtFs) 206 throws IOException { 207 MapReduceHFileSplitterJob player = new MapReduceHFileSplitterJob(); 208 conf.set(MapReduceHFileSplitterJob.BULK_OUTPUT_CONF_KEY, 209 getBulkOutputDirForTable(tn).toString()); 210 player.setConf(conf); 211 212 String inputDirs = StringUtils.join(files, ","); 213 String[] args = { inputDirs, tn.getNameWithNamespaceInclAsString() }; 214 215 int result; 216 217 try { 218 result = player.run(args); 219 } catch (Exception e) { 220 LOG.error("Failed to run MapReduceHFileSplitterJob", e); 221 throw new IOException(e); 222 } 223 224 if (result != 0) { 225 throw new IOException( 226 "Failed to run MapReduceHFileSplitterJob with invalid result: " + result); 227 } 228 229 incrementalCopyBulkloadHFiles(tgtFs, tn); 230 } 231 232 private void updateFileLists(List<String> activeFiles, List<String> archiveFiles) 233 throws IOException { 234 List<String> newlyArchived = new ArrayList<>(); 235 236 for (String spath : activeFiles) { 237 if (!fs.exists(new Path(spath))) { 238 newlyArchived.add(spath); 239 } 240 } 241 242 if (newlyArchived.size() > 0) { 243 activeFiles.removeAll(newlyArchived); 244 archiveFiles.addAll(newlyArchived); 245 } 246 247 LOG.debug(newlyArchived.size() + " files have been archived."); 248 } 249 250 /** 251 * @throws IOException If the execution of the backup fails 252 * @throws ColumnFamilyMismatchException If the column families of the current table do not match 253 * the column families for the last full backup. In which 254 * case, a full backup should be taken 255 */ 256 @Override 257 public void execute() throws IOException, ColumnFamilyMismatchException { 258 try { 259 Map<TableName, String> tablesToFullBackupIds = getFullBackupIds(); 260 verifyCfCompatibility(backupInfo.getTables(), tablesToFullBackupIds); 261 262 // case PREPARE_INCREMENTAL: 263 beginBackup(backupManager, backupInfo); 264 backupInfo.setPhase(BackupPhase.PREPARE_INCREMENTAL); 265 LOG.debug("For incremental backup, current table set is " 266 + backupManager.getIncrementalBackupTableSet()); 267 newTimestamps = ((IncrementalBackupManager) backupManager).getIncrBackupLogFileMap(); 268 } catch (Exception e) { 269 // fail the overall backup and return 270 failBackup(conn, backupInfo, backupManager, e, "Unexpected Exception : ", 271 BackupType.INCREMENTAL, conf); 272 throw new IOException(e); 273 } 274 275 // case INCREMENTAL_COPY: 276 try { 277 // copy out the table and region info files for each table 278 BackupUtils.copyTableRegionInfo(conn, backupInfo, conf); 279 setupRegionLocator(); 280 // convert WAL to HFiles and copy them to .tmp under BACKUP_ROOT 281 convertWALsToHFiles(); 282 incrementalCopyHFiles(new String[] { getBulkOutputDir().toString() }, 283 backupInfo.getBackupRootDir()); 284 } catch (Exception e) { 285 String msg = "Unexpected exception in incremental-backup: incremental copy " + backupId; 286 // fail the overall backup and return 287 failBackup(conn, backupInfo, backupManager, e, msg, BackupType.INCREMENTAL, conf); 288 throw new IOException(e); 289 } 290 // case INCR_BACKUP_COMPLETE: 291 // set overall backup status: complete. Here we make sure to complete the backup. 292 // After this checkpoint, even if entering cancel process, will let the backup finished 293 try { 294 // Set the previousTimestampMap which is before this current log roll to the manifest. 295 Map<TableName, Map<String, Long>> previousTimestampMap = backupManager.readLogTimestampMap(); 296 backupInfo.setIncrTimestampMap(previousTimestampMap); 297 298 // The table list in backupInfo is good for both full backup and incremental backup. 299 // For incremental backup, it contains the incremental backup table set. 300 backupManager.writeRegionServerLogTimestamp(backupInfo.getTables(), newTimestamps); 301 302 Map<TableName, Map<String, Long>> newTableSetTimestampMap = 303 backupManager.readLogTimestampMap(); 304 305 backupInfo.setTableSetTimestampMap(newTableSetTimestampMap); 306 Long newStartCode = 307 BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap)); 308 backupManager.writeBackupStartCode(newStartCode); 309 310 List<BulkLoad> bulkLoads = handleBulkLoad(backupInfo.getTableNames()); 311 312 // backup complete 313 completeBackup(conn, backupInfo, BackupType.INCREMENTAL, conf); 314 315 List<byte[]> bulkLoadedRows = Lists.transform(bulkLoads, BulkLoad::getRowKey); 316 backupManager.deleteBulkLoadedRows(bulkLoadedRows); 317 } catch (IOException e) { 318 failBackup(conn, backupInfo, backupManager, e, "Unexpected Exception : ", 319 BackupType.INCREMENTAL, conf); 320 throw new IOException(e); 321 } 322 } 323 324 protected void incrementalCopyHFiles(String[] files, String backupDest) throws IOException { 325 try { 326 LOG.debug("Incremental copy HFiles is starting. dest=" + backupDest); 327 // set overall backup phase: incremental_copy 328 backupInfo.setPhase(BackupPhase.INCREMENTAL_COPY); 329 // get incremental backup file list and prepare parms for DistCp 330 String[] strArr = new String[files.length + 1]; 331 System.arraycopy(files, 0, strArr, 0, files.length); 332 strArr[strArr.length - 1] = backupDest; 333 334 String jobname = "Incremental_Backup-HFileCopy-" + backupInfo.getBackupId(); 335 if (LOG.isDebugEnabled()) { 336 LOG.debug("Setting incremental copy HFiles job name to : " + jobname); 337 } 338 conf.set(JOB_NAME_CONF_KEY, jobname); 339 340 BackupCopyJob copyService = BackupRestoreFactory.getBackupCopyJob(conf); 341 int res = copyService.copy(backupInfo, backupManager, conf, BackupType.INCREMENTAL, strArr); 342 if (res != 0) { 343 LOG.error("Copy incremental HFile files failed with return code: " + res + "."); 344 throw new IOException( 345 "Failed copy from " + StringUtils.join(files, ',') + " to " + backupDest); 346 } 347 LOG.debug("Incremental copy HFiles from " + StringUtils.join(files, ',') + " to " + backupDest 348 + " finished."); 349 } finally { 350 deleteBulkLoadDirectory(); 351 } 352 } 353 354 protected void deleteBulkLoadDirectory() throws IOException { 355 // delete original bulk load directory on method exit 356 Path path = getBulkOutputDir(); 357 FileSystem fs = FileSystem.get(path.toUri(), conf); 358 boolean result = fs.delete(path, true); 359 if (!result) { 360 LOG.warn("Could not delete " + path); 361 } 362 } 363 364 protected void convertWALsToHFiles() throws IOException { 365 // get incremental backup file list and prepare parameters for DistCp 366 List<String> incrBackupFileList = backupInfo.getIncrBackupFileList(); 367 // Get list of tables in incremental backup set 368 Set<TableName> tableSet = backupManager.getIncrementalBackupTableSet(); 369 // filter missing files out (they have been copied by previous backups) 370 incrBackupFileList = filterMissingFiles(incrBackupFileList); 371 List<String> tableList = new ArrayList<String>(); 372 for (TableName table : tableSet) { 373 // Check if table exists 374 if (tableExists(table, conn)) { 375 tableList.add(table.getNameAsString()); 376 } else { 377 LOG.warn("Table " + table + " does not exists. Skipping in WAL converter"); 378 } 379 } 380 walToHFiles(incrBackupFileList, tableList); 381 382 } 383 384 protected boolean tableExists(TableName table, Connection conn) throws IOException { 385 try (Admin admin = conn.getAdmin()) { 386 return admin.tableExists(table); 387 } 388 } 389 390 protected void walToHFiles(List<String> dirPaths, List<String> tableList) throws IOException { 391 Tool player = new WALPlayer(); 392 393 // Player reads all files in arbitrary directory structure and creates 394 // a Map task for each file. We use ';' as separator 395 // because WAL file names contains ',' 396 String dirs = StringUtils.join(dirPaths, ';'); 397 String jobname = "Incremental_Backup-" + backupId; 398 399 Path bulkOutputPath = getBulkOutputDir(); 400 conf.set(WALPlayer.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString()); 401 conf.set(WALPlayer.INPUT_FILES_SEPARATOR_KEY, ";"); 402 conf.setBoolean(WALPlayer.MULTI_TABLES_SUPPORT, true); 403 conf.set(JOB_NAME_CONF_KEY, jobname); 404 String[] playerArgs = { dirs, StringUtils.join(tableList, ",") }; 405 406 try { 407 player.setConf(conf); 408 int result = player.run(playerArgs); 409 if (result != 0) { 410 throw new IOException("WAL Player failed"); 411 } 412 conf.unset(WALPlayer.INPUT_FILES_SEPARATOR_KEY); 413 conf.unset(JOB_NAME_CONF_KEY); 414 } catch (IOException e) { 415 throw e; 416 } catch (Exception ee) { 417 throw new IOException("Can not convert from directory " + dirs 418 + " (check Hadoop, HBase and WALPlayer M/R job logs) ", ee); 419 } 420 } 421 422 private void incrementalCopyBulkloadHFiles(FileSystem tgtFs, TableName tn) throws IOException { 423 Path bulkOutDir = getBulkOutputDirForTable(tn); 424 425 if (tgtFs.exists(bulkOutDir)) { 426 conf.setInt(MapReduceBackupCopyJob.NUMBER_OF_LEVELS_TO_PRESERVE_KEY, 2); 427 Path tgtPath = getTargetDirForTable(tn); 428 try { 429 RemoteIterator<LocatedFileStatus> locatedFiles = tgtFs.listFiles(bulkOutDir, true); 430 List<String> files = new ArrayList<>(); 431 while (locatedFiles.hasNext()) { 432 LocatedFileStatus file = locatedFiles.next(); 433 if (file.isFile() && HFile.isHFileFormat(tgtFs, file.getPath())) { 434 files.add(file.getPath().toString()); 435 } 436 } 437 incrementalCopyHFiles(files.toArray(files.toArray(new String[0])), tgtPath.toString()); 438 } finally { 439 conf.unset(MapReduceBackupCopyJob.NUMBER_OF_LEVELS_TO_PRESERVE_KEY); 440 } 441 } 442 } 443 444 protected Path getBulkOutputDirForTable(TableName table) { 445 Path tablePath = getBulkOutputDir(); 446 tablePath = new Path(tablePath, table.getNamespaceAsString()); 447 tablePath = new Path(tablePath, table.getQualifierAsString()); 448 return new Path(tablePath, "data"); 449 } 450 451 protected Path getBulkOutputDir() { 452 String backupId = backupInfo.getBackupId(); 453 Path path = new Path(backupInfo.getBackupRootDir()); 454 path = new Path(path, ".tmp"); 455 path = new Path(path, backupId); 456 return path; 457 } 458 459 private Path getTargetDirForTable(TableName table) { 460 Path path = new Path(backupInfo.getBackupRootDir() + Path.SEPARATOR + backupInfo.getBackupId()); 461 path = new Path(path, table.getNamespaceAsString()); 462 path = new Path(path, table.getNameAsString()); 463 return path; 464 } 465 466 private void setupRegionLocator() throws IOException { 467 Map<TableName, String> fullBackupIds = getFullBackupIds(); 468 try (BackupAdminImpl backupAdmin = new BackupAdminImpl(conn)) { 469 470 for (TableName tableName : backupInfo.getTables()) { 471 String fullBackupId = fullBackupIds.get(tableName); 472 BackupInfo fullBackupInfo = backupAdmin.getBackupInfo(fullBackupId); 473 String snapshotName = fullBackupInfo.getSnapshotName(tableName); 474 Path root = HBackupFileSystem.getTableBackupPath(tableName, 475 new Path(fullBackupInfo.getBackupRootDir()), fullBackupId); 476 String manifestDir = 477 SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, root).toString(); 478 SnapshotRegionLocator.setSnapshotManifestDir(conf, manifestDir, tableName); 479 } 480 } 481 } 482 483 private Map<TableName, String> getFullBackupIds() throws IOException { 484 // Ancestors are stored from newest to oldest, so we can iterate backwards 485 // in order to populate our backupId map with the most recent full backup 486 // for a given table 487 List<BackupManifest.BackupImage> images = getAncestors(backupInfo); 488 Map<TableName, String> results = new HashMap<>(); 489 for (int i = images.size() - 1; i >= 0; i--) { 490 BackupManifest.BackupImage image = images.get(i); 491 if (image.getType() != BackupType.FULL) { 492 continue; 493 } 494 495 for (TableName tn : image.getTableNames()) { 496 results.put(tn, image.getBackupId()); 497 } 498 } 499 return results; 500 } 501 502 /** 503 * Verifies that the current table descriptor CFs matches the descriptor CFs of the last full 504 * backup for the tables. This ensures CF compatibility across incremental backups. If a mismatch 505 * is detected, a full table backup should be taken, rather than an incremental one 506 */ 507 private void verifyCfCompatibility(Set<TableName> tables, 508 Map<TableName, String> tablesToFullBackupId) throws IOException, ColumnFamilyMismatchException { 509 ColumnFamilyMismatchException.ColumnFamilyMismatchExceptionBuilder exBuilder = 510 ColumnFamilyMismatchException.newBuilder(); 511 try (Admin admin = conn.getAdmin(); BackupAdminImpl backupAdmin = new BackupAdminImpl(conn)) { 512 for (TableName tn : tables) { 513 String backupId = tablesToFullBackupId.get(tn); 514 BackupInfo fullBackupInfo = backupAdmin.getBackupInfo(backupId); 515 516 ColumnFamilyDescriptor[] currentCfs = admin.getDescriptor(tn).getColumnFamilies(); 517 String snapshotName = fullBackupInfo.getSnapshotName(tn); 518 Path root = HBackupFileSystem.getTableBackupPath(tn, 519 new Path(fullBackupInfo.getBackupRootDir()), fullBackupInfo.getBackupId()); 520 Path manifestDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, root); 521 522 FileSystem fs; 523 try { 524 fs = FileSystem.get(new URI(fullBackupInfo.getBackupRootDir()), conf); 525 } catch (URISyntaxException e) { 526 throw new IOException("Unable to get fs for backup " + fullBackupInfo.getBackupId(), e); 527 } 528 529 SnapshotProtos.SnapshotDescription snapshotDescription = 530 SnapshotDescriptionUtils.readSnapshotInfo(fs, manifestDir); 531 SnapshotManifest manifest = 532 SnapshotManifest.open(conf, fs, manifestDir, snapshotDescription); 533 534 ColumnFamilyDescriptor[] backupCfs = manifest.getTableDescriptor().getColumnFamilies(); 535 if (!areCfsCompatible(currentCfs, backupCfs)) { 536 exBuilder.addMismatchedTable(tn, currentCfs, backupCfs); 537 } 538 } 539 } 540 541 ColumnFamilyMismatchException ex = exBuilder.build(); 542 if (!ex.getMismatchedTables().isEmpty()) { 543 throw ex; 544 } 545 } 546 547 private static boolean areCfsCompatible(ColumnFamilyDescriptor[] currentCfs, 548 ColumnFamilyDescriptor[] backupCfs) { 549 if (currentCfs.length != backupCfs.length) { 550 return false; 551 } 552 553 for (int i = 0; i < backupCfs.length; i++) { 554 String currentCf = currentCfs[i].getNameAsString(); 555 String backupCf = backupCfs[i].getNameAsString(); 556 557 if (!currentCf.equals(backupCf)) { 558 return false; 559 } 560 } 561 562 return true; 563 } 564}