001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.backup;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertThrows;
022import static org.junit.Assert.assertTrue;
023
024import java.io.IOException;
025import java.util.ArrayList;
026import java.util.Collection;
027import java.util.HashSet;
028import java.util.List;
029import org.apache.hadoop.fs.Path;
030import org.apache.hadoop.hbase.HBaseClassTestRule;
031import org.apache.hadoop.hbase.HBaseTestingUtil;
032import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
033import org.apache.hadoop.hbase.TableName;
034import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
035import org.apache.hadoop.hbase.backup.impl.BackupManifest;
036import org.apache.hadoop.hbase.backup.impl.ColumnFamilyMismatchException;
037import org.apache.hadoop.hbase.backup.util.BackupUtils;
038import org.apache.hadoop.hbase.client.Admin;
039import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
040import org.apache.hadoop.hbase.client.Connection;
041import org.apache.hadoop.hbase.client.ConnectionFactory;
042import org.apache.hadoop.hbase.client.Put;
043import org.apache.hadoop.hbase.client.Table;
044import org.apache.hadoop.hbase.client.TableDescriptor;
045import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
046import org.apache.hadoop.hbase.regionserver.HRegion;
047import org.apache.hadoop.hbase.testclassification.LargeTests;
048import org.apache.hadoop.hbase.util.Bytes;
049import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
050import org.junit.Assert;
051import org.junit.ClassRule;
052import org.junit.Test;
053import org.junit.experimental.categories.Category;
054import org.junit.runner.RunWith;
055import org.junit.runners.Parameterized;
056import org.slf4j.Logger;
057import org.slf4j.LoggerFactory;
058
059import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
060import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
061import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
062
063@Category(LargeTests.class)
064@RunWith(Parameterized.class)
065public class TestIncrementalBackup extends TestBackupBase {
066
067  @ClassRule
068  public static final HBaseClassTestRule CLASS_RULE =
069    HBaseClassTestRule.forClass(TestIncrementalBackup.class);
070
071  private static final Logger LOG = LoggerFactory.getLogger(TestIncrementalBackup.class);
072
073  @Parameterized.Parameters
074  public static Collection<Object[]> data() {
075    provider = "multiwal";
076    List<Object[]> params = new ArrayList<>();
077    params.add(new Object[] { Boolean.TRUE });
078    return params;
079  }
080
081  public TestIncrementalBackup(Boolean b) {
082  }
083
084  // implement all test cases in 1 test since incremental
085  // backup/restore has dependencies
086  @Test
087  public void TestIncBackupRestore() throws Exception {
088    int ADD_ROWS = 99;
089
090    // #1 - create full backup for all tables
091    LOG.info("create full backup image for all tables");
092    List<TableName> tables = Lists.newArrayList(table1, table2);
093    final byte[] fam3Name = Bytes.toBytes("f3");
094    final byte[] mobName = Bytes.toBytes("mob");
095
096    TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(table1Desc)
097      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name))
098      .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(mobName).setMobEnabled(true)
099        .setMobThreshold(5L).build())
100      .build();
101    TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
102
103    try (Connection conn = ConnectionFactory.createConnection(conf1)) {
104      int NB_ROWS_FAM3 = 6;
105      insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
106      insertIntoTable(conn, table1, mobName, 3, NB_ROWS_FAM3).close();
107      Admin admin = conn.getAdmin();
108      BackupAdminImpl client = new BackupAdminImpl(conn);
109      String backupIdFull = takeFullBackup(tables, client);
110
111      // #2 - insert some data to table
112      Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
113      LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
114      Assert.assertEquals(HBaseTestingUtil.countRows(t1),
115        NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3);
116      LOG.debug("written " + ADD_ROWS + " rows to " + table1);
117      // additionally, insert rows to MOB cf
118      int NB_ROWS_MOB = 111;
119      insertIntoTable(conn, table1, mobName, 3, NB_ROWS_MOB);
120      LOG.debug("written " + NB_ROWS_MOB + " rows to " + table1 + " to Mob enabled CF");
121      t1.close();
122      Assert.assertEquals(HBaseTestingUtil.countRows(t1),
123        NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_MOB);
124      Table t2 = conn.getTable(table2);
125      Put p2;
126      for (int i = 0; i < 5; i++) {
127        p2 = new Put(Bytes.toBytes("row-t2" + i));
128        p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
129        t2.put(p2);
130      }
131      Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtil.countRows(t2));
132      t2.close();
133      LOG.debug("written " + 5 + " rows to " + table2);
134      // split table1
135      SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
136      List<HRegion> regions = cluster.getRegions(table1);
137      byte[] name = regions.get(0).getRegionInfo().getRegionName();
138      long startSplitTime = EnvironmentEdgeManager.currentTime();
139      try {
140        admin.splitRegionAsync(name).get();
141      } catch (Exception e) {
142        // although split fail, this may not affect following check in current API,
143        // exception will be thrown.
144        LOG.debug("region is not splittable, because " + e);
145      }
146      TEST_UTIL.waitTableAvailable(table1);
147      long endSplitTime = EnvironmentEdgeManager.currentTime();
148      // split finished
149      LOG.debug("split finished in =" + (endSplitTime - startSplitTime));
150
151      // #3 - incremental backup for multiple tables
152      tables = Lists.newArrayList(table1, table2);
153      BackupRequest request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
154      String backupIdIncMultiple = client.backupTables(request);
155      assertTrue(checkSucceeded(backupIdIncMultiple));
156      BackupManifest manifest =
157        HBackupFileSystem.getManifest(conf1, new Path(BACKUP_ROOT_DIR), backupIdIncMultiple);
158      assertEquals(Sets.newHashSet(table1, table2), new HashSet<>(manifest.getTableList()));
159
160      // add column family f2 to table1
161      // drop column family f3
162      final byte[] fam2Name = Bytes.toBytes("f2");
163      newTable1Desc = TableDescriptorBuilder.newBuilder(newTable1Desc)
164        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam2Name)).removeColumnFamily(fam3Name)
165        .build();
166      TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
167
168      // check that an incremental backup fails because the CFs don't match
169      final List<TableName> tablesCopy = tables;
170      IOException ex = assertThrows(IOException.class, () -> client
171        .backupTables(createBackupRequest(BackupType.INCREMENTAL, tablesCopy, BACKUP_ROOT_DIR)));
172      checkThrowsCFMismatch(ex, List.of(table1));
173      takeFullBackup(tables, client);
174
175      int NB_ROWS_FAM2 = 7;
176      Table t3 = insertIntoTable(conn, table1, fam2Name, 2, NB_ROWS_FAM2);
177      t3.close();
178
179      // Wait for 5 sec to make sure that old WALs were deleted
180      Thread.sleep(5000);
181
182      // #4 - additional incremental backup for multiple tables
183      request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
184      String backupIdIncMultiple2 = client.backupTables(request);
185      assertTrue(checkSucceeded(backupIdIncMultiple2));
186
187      // #5 - restore full backup for all tables
188      TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
189      TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore };
190
191      LOG.debug("Restoring full " + backupIdFull);
192      client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false,
193        tablesRestoreFull, tablesMapFull, true));
194
195      // #6.1 - check tables for full restore
196      Admin hAdmin = TEST_UTIL.getAdmin();
197      assertTrue(hAdmin.tableExists(table1_restore));
198      assertTrue(hAdmin.tableExists(table2_restore));
199      hAdmin.close();
200
201      // #6.2 - checking row count of tables for full restore
202      Table hTable = conn.getTable(table1_restore);
203      Assert.assertEquals(HBaseTestingUtil.countRows(hTable), NB_ROWS_IN_BATCH + NB_ROWS_FAM3);
204      hTable.close();
205
206      hTable = conn.getTable(table2_restore);
207      Assert.assertEquals(NB_ROWS_IN_BATCH, HBaseTestingUtil.countRows(hTable));
208      hTable.close();
209
210      // #7 - restore incremental backup for multiple tables, with overwrite
211      TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 };
212      TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore };
213      client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, false,
214        tablesRestoreIncMultiple, tablesMapIncMultiple, true));
215      hTable = conn.getTable(table1_restore);
216
217      LOG.debug("After incremental restore: " + hTable.getDescriptor());
218      int countFamName = TEST_UTIL.countRows(hTable, famName);
219      LOG.debug("f1 has " + countFamName + " rows");
220      Assert.assertEquals(countFamName, NB_ROWS_IN_BATCH + ADD_ROWS);
221
222      int countFam2Name = TEST_UTIL.countRows(hTable, fam2Name);
223      LOG.debug("f2 has " + countFam2Name + " rows");
224      Assert.assertEquals(countFam2Name, NB_ROWS_FAM2);
225
226      int countMobName = TEST_UTIL.countRows(hTable, mobName);
227      LOG.debug("mob has " + countMobName + " rows");
228      Assert.assertEquals(countMobName, NB_ROWS_MOB);
229      hTable.close();
230
231      hTable = conn.getTable(table2_restore);
232      Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtil.countRows(hTable));
233      hTable.close();
234      admin.close();
235    }
236  }
237
238  private void checkThrowsCFMismatch(IOException ex, List<TableName> tables) {
239    Throwable cause = Throwables.getRootCause(ex);
240    assertEquals(cause.getClass(), ColumnFamilyMismatchException.class);
241    ColumnFamilyMismatchException e = (ColumnFamilyMismatchException) cause;
242    assertEquals(tables, e.getMismatchedTables());
243  }
244
245  private String takeFullBackup(List<TableName> tables, BackupAdminImpl backupAdmin)
246    throws IOException {
247    BackupRequest req = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
248    String backupId = backupAdmin.backupTables(req);
249    checkSucceeded(backupId);
250    return backupId;
251  }
252}