001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase; 019 020import static org.junit.Assert.assertArrayEquals; 021import static org.junit.Assert.assertEquals; 022import static org.junit.Assert.assertFalse; 023import static org.junit.Assert.assertNotEquals; 024import static org.junit.Assert.assertNotNull; 025import static org.junit.Assert.assertTrue; 026 027import java.io.IOException; 028import java.util.ArrayList; 029import java.util.List; 030import java.util.NavigableMap; 031import org.apache.hadoop.hbase.TimestampTestBase.FlushCache; 032import org.apache.hadoop.hbase.client.Admin; 033import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 034import org.apache.hadoop.hbase.client.Get; 035import org.apache.hadoop.hbase.client.Put; 036import org.apache.hadoop.hbase.client.Result; 037import org.apache.hadoop.hbase.client.ResultScanner; 038import org.apache.hadoop.hbase.client.Scan; 039import org.apache.hadoop.hbase.client.Table; 040import org.apache.hadoop.hbase.client.TableDescriptor; 041import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 042import org.apache.hadoop.hbase.testclassification.MediumTests; 043import org.apache.hadoop.hbase.testclassification.MiscTests; 044import org.apache.hadoop.hbase.util.Bytes; 045import org.apache.hadoop.hbase.util.Pair; 046import org.junit.AfterClass; 047import org.junit.Before; 048import org.junit.BeforeClass; 049import org.junit.ClassRule; 050import org.junit.Rule; 051import org.junit.Test; 052import org.junit.experimental.categories.Category; 053import org.junit.rules.TestName; 054import org.slf4j.Logger; 055import org.slf4j.LoggerFactory; 056 057import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; 058 059/** 060 * Port of old TestScanMultipleVersions, TestTimestamp and TestGetRowVersions from old testing 061 * framework to {@link HBaseTestingUtil}. 062 */ 063@Category({ MiscTests.class, MediumTests.class }) 064public class TestMultiVersions { 065 066 @ClassRule 067 public static final HBaseClassTestRule CLASS_RULE = 068 HBaseClassTestRule.forClass(TestMultiVersions.class); 069 070 private static final Logger LOG = LoggerFactory.getLogger(TestMultiVersions.class); 071 private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); 072 private Admin admin; 073 074 private static final int NUM_SLAVES = 3; 075 076 @Rule 077 public TestName name = new TestName(); 078 079 @BeforeClass 080 public static void setUpBeforeClass() throws Exception { 081 UTIL.startMiniCluster(NUM_SLAVES); 082 } 083 084 @AfterClass 085 public static void tearDownAfterClass() throws Exception { 086 UTIL.shutdownMiniCluster(); 087 } 088 089 @Before 090 public void before() throws MasterNotRunningException, ZooKeeperConnectionException, IOException { 091 this.admin = UTIL.getAdmin(); 092 } 093 094 /** 095 * Tests user specifiable time stamps putting, getting and scanning. Also tests same in presence 096 * of deletes. Test cores are written so can be run against an HRegion and against an HTable: i.e. 097 * both local and remote. 098 * <p> 099 * Port of old TestTimestamp test to here so can better utilize the spun up cluster running more 100 * than a single test per spin up. Keep old tests' crazyness. 101 */ 102 @Test 103 public void testTimestamps() throws Exception { 104 TableDescriptor tableDescriptor = 105 TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) 106 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(TimestampTestBase.FAMILY_NAME) 107 .setMaxVersions(3).build()) 108 .build(); 109 this.admin.createTable(tableDescriptor); 110 Table table = UTIL.getConnection().getTable(tableDescriptor.getTableName()); 111 // TODO: Remove these deprecated classes or pull them in here if this is 112 // only test using them. 113 TimestampTestBase.doTestDelete(table, new FlushCache() { 114 @Override 115 public void flushcache() throws IOException { 116 UTIL.getHBaseCluster().flushcache(); 117 } 118 }); 119 120 // Perhaps drop and readd the table between tests so the former does 121 // not pollute this latter? Or put into separate tests. 122 TimestampTestBase.doTestTimestampScanning(table, new FlushCache() { 123 @Override 124 public void flushcache() throws IOException { 125 UTIL.getMiniHBaseCluster().flushcache(); 126 } 127 }); 128 129 table.close(); 130 } 131 132 /** 133 * Verifies versions across a cluster restart. 134 * <p/> 135 * Port of old TestGetRowVersions test to here so can better utilize the spun up cluster running 136 * more than a single test per spin up. Keep old tests' crazyness. 137 */ 138 @Test 139 public void testGetRowVersions() throws Exception { 140 final byte[] contents = Bytes.toBytes("contents"); 141 final byte[] row = Bytes.toBytes("row"); 142 final byte[] value1 = Bytes.toBytes("value1"); 143 final byte[] value2 = Bytes.toBytes("value2"); 144 final long timestamp1 = 100L; 145 final long timestamp2 = 200L; 146 TableDescriptor tableDescriptor = TableDescriptorBuilder 147 .newBuilder(TableName.valueOf(name.getMethodName())) 148 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(contents).setMaxVersions(3).build()) 149 .build(); 150 this.admin.createTable(tableDescriptor); 151 Put put = new Put(row, timestamp1); 152 put.addColumn(contents, contents, value1); 153 Table table = UTIL.getConnection().getTable(tableDescriptor.getTableName()); 154 table.put(put); 155 // Shut down and restart the HBase cluster 156 table.close(); 157 UTIL.shutdownMiniHBaseCluster(); 158 LOG.debug("HBase cluster shut down -- restarting"); 159 StartTestingClusterOption option = 160 StartTestingClusterOption.builder().numRegionServers(NUM_SLAVES).build(); 161 UTIL.startMiniHBaseCluster(option); 162 // Make a new connection. 163 table = UTIL.getConnection().getTable(tableDescriptor.getTableName()); 164 // Overwrite previous value 165 put = new Put(row, timestamp2); 166 put.addColumn(contents, contents, value2); 167 table.put(put); 168 // Now verify that getRow(row, column, latest) works 169 Get get = new Get(row); 170 // Should get one version by default 171 Result r = table.get(get); 172 assertNotNull(r); 173 assertFalse(r.isEmpty()); 174 assertEquals(1, r.size()); 175 byte[] value = r.getValue(contents, contents); 176 assertNotEquals(0, value.length); 177 assertTrue(Bytes.equals(value, value2)); 178 // Now check getRow with multiple versions 179 get = new Get(row); 180 get.readAllVersions(); 181 r = table.get(get); 182 assertEquals(2, r.size()); 183 value = r.getValue(contents, contents); 184 assertNotEquals(0, value.length); 185 assertArrayEquals(value, value2); 186 NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> map = r.getMap(); 187 NavigableMap<byte[], NavigableMap<Long, byte[]>> familyMap = map.get(contents); 188 NavigableMap<Long, byte[]> versionMap = familyMap.get(contents); 189 assertEquals(2, versionMap.size()); 190 assertArrayEquals(value1, versionMap.get(timestamp1)); 191 assertArrayEquals(value2, versionMap.get(timestamp2)); 192 table.close(); 193 } 194 195 /** 196 * Port of old TestScanMultipleVersions test here so can better utilize the spun up cluster 197 * running more than just a single test. Keep old tests crazyness. 198 * <p> 199 * Tests five cases of scans and timestamps. 200 */ 201 @Test 202 public void testScanMultipleVersions() throws Exception { 203 final TableName tableName = TableName.valueOf(name.getMethodName()); 204 TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) 205 .setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build(); 206 207 final byte[][] rows = new byte[][] { Bytes.toBytes("row_0200"), Bytes.toBytes("row_0800") }; 208 final byte[][] splitRows = new byte[][] { Bytes.toBytes("row_0500") }; 209 final long[] timestamp = new long[] { 100L, 1000L }; 210 this.admin.createTable(tableDescriptor, splitRows); 211 Table table = UTIL.getConnection().getTable(tableName); 212 // Assert we got the region layout wanted. 213 Pair<byte[][], byte[][]> keys = 214 UTIL.getConnection().getRegionLocator(tableName).getStartEndKeys(); 215 assertEquals(2, keys.getFirst().length); 216 byte[][] startKeys = keys.getFirst(); 217 byte[][] endKeys = keys.getSecond(); 218 219 for (int i = 0; i < startKeys.length; i++) { 220 if (i == 0) { 221 assertArrayEquals(HConstants.EMPTY_START_ROW, startKeys[i]); 222 assertArrayEquals(endKeys[i], splitRows[0]); 223 } else if (i == 1) { 224 assertArrayEquals(splitRows[0], startKeys[i]); 225 assertArrayEquals(endKeys[i], HConstants.EMPTY_END_ROW); 226 } 227 } 228 // Insert data 229 List<Put> puts = new ArrayList<>(); 230 for (int i = 0; i < startKeys.length; i++) { 231 for (int j = 0; j < timestamp.length; j++) { 232 Put put = new Put(rows[i], timestamp[j]); 233 put.addColumn(HConstants.CATALOG_FAMILY, null, timestamp[j], Bytes.toBytes(timestamp[j])); 234 puts.add(put); 235 } 236 } 237 table.put(puts); 238 // There are 5 cases we have to test. Each is described below. 239 for (int i = 0; i < rows.length; i++) { 240 for (int j = 0; j < timestamp.length; j++) { 241 Get get = new Get(rows[i]); 242 get.addFamily(HConstants.CATALOG_FAMILY); 243 get.setTimestamp(timestamp[j]); 244 Result result = table.get(get); 245 int cellCount = result.rawCells().length; 246 assertEquals(1, cellCount); 247 } 248 } 249 250 // Case 1: scan with LATEST_TIMESTAMP. Should get two rows 251 int count; 252 Scan scan = new Scan(); 253 scan.addFamily(HConstants.CATALOG_FAMILY); 254 try (ResultScanner s = table.getScanner(scan)) { 255 count = Iterables.size(s); 256 } 257 assertEquals("Number of rows should be 2", 2, count); 258 259 // Case 2: Scan with a timestamp greater than most recent timestamp 260 // (in this case > 1000 and < LATEST_TIMESTAMP. Should get 2 rows. 261 scan = new Scan(); 262 scan.setTimeRange(1000L, Long.MAX_VALUE); 263 scan.addFamily(HConstants.CATALOG_FAMILY); 264 try (ResultScanner s = table.getScanner(scan)) { 265 count = Iterables.size(s); 266 } 267 assertEquals("Number of rows should be 2", 2, count); 268 269 // Case 3: scan with timestamp equal to most recent timestamp 270 // (in this case == 1000. Should get 2 rows. 271 scan = new Scan(); 272 scan.setTimestamp(1000L); 273 scan.addFamily(HConstants.CATALOG_FAMILY); 274 try (ResultScanner s = table.getScanner(scan)) { 275 count = Iterables.size(s); 276 } 277 assertEquals("Number of rows should be 2", 2, count); 278 279 // Case 4: scan with timestamp greater than first timestamp but less than 280 // second timestamp (100 < timestamp < 1000). Should get 2 rows. 281 scan = new Scan(); 282 scan.setTimeRange(100L, 1000L); 283 scan.addFamily(HConstants.CATALOG_FAMILY); 284 try (ResultScanner s = table.getScanner(scan)) { 285 count = Iterables.size(s); 286 } 287 assertEquals("Number of rows should be 2", 2, count); 288 289 // Case 5: scan with timestamp equal to first timestamp (100) 290 // Should get 2 rows. 291 scan = new Scan(); 292 scan.setTimestamp(100L); 293 scan.addFamily(HConstants.CATALOG_FAMILY); 294 try (ResultScanner s = table.getScanner(scan)) { 295 count = Iterables.size(s); 296 } 297 assertEquals("Number of rows should be 2", 2, count); 298 } 299 300}