001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase; 019 020import static org.junit.Assert.assertArrayEquals; 021import static org.junit.Assert.assertEquals; 022import static org.junit.Assert.assertFalse; 023import static org.junit.Assert.assertNotEquals; 024import static org.junit.Assert.assertNotNull; 025import static org.junit.Assert.assertTrue; 026 027import java.io.IOException; 028import java.util.ArrayList; 029import java.util.List; 030import java.util.NavigableMap; 031import org.apache.hadoop.hbase.TimestampTestBase.FlushCache; 032import org.apache.hadoop.hbase.client.Admin; 033import org.apache.hadoop.hbase.client.Get; 034import org.apache.hadoop.hbase.client.Put; 035import org.apache.hadoop.hbase.client.Result; 036import org.apache.hadoop.hbase.client.ResultScanner; 037import org.apache.hadoop.hbase.client.Scan; 038import org.apache.hadoop.hbase.client.Table; 039import org.apache.hadoop.hbase.testclassification.MediumTests; 040import org.apache.hadoop.hbase.testclassification.MiscTests; 041import org.apache.hadoop.hbase.util.Bytes; 042import org.apache.hadoop.hbase.util.Pair; 043import org.junit.AfterClass; 044import org.junit.Before; 045import org.junit.BeforeClass; 046import org.junit.ClassRule; 047import org.junit.Rule; 048import org.junit.Test; 049import org.junit.experimental.categories.Category; 050import org.junit.rules.TestName; 051import org.slf4j.Logger; 052import org.slf4j.LoggerFactory; 053 054import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; 055 056/** 057 * Port of old TestScanMultipleVersions, TestTimestamp and TestGetRowVersions from old testing 058 * framework to {@link HBaseTestingUtility}. 059 */ 060@Category({ MiscTests.class, MediumTests.class }) 061public class TestMultiVersions { 062 063 @ClassRule 064 public static final HBaseClassTestRule CLASS_RULE = 065 HBaseClassTestRule.forClass(TestMultiVersions.class); 066 067 private static final Logger LOG = LoggerFactory.getLogger(TestMultiVersions.class); 068 private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); 069 private Admin admin; 070 071 private static final int NUM_SLAVES = 3; 072 073 @Rule 074 public TestName name = new TestName(); 075 076 @BeforeClass 077 public static void setUpBeforeClass() throws Exception { 078 UTIL.startMiniCluster(NUM_SLAVES); 079 } 080 081 @AfterClass 082 public static void tearDownAfterClass() throws Exception { 083 UTIL.shutdownMiniCluster(); 084 } 085 086 @Before 087 public void before() throws MasterNotRunningException, ZooKeeperConnectionException, IOException { 088 this.admin = UTIL.getAdmin(); 089 } 090 091 /** 092 * Tests user specifiable time stamps putting, getting and scanning. Also tests same in presence 093 * of deletes. Test cores are written so can be run against an HRegion and against an HTable: i.e. 094 * both local and remote. 095 * <p> 096 * Port of old TestTimestamp test to here so can better utilize the spun up cluster running more 097 * than a single test per spin up. Keep old tests' crazyness. 098 */ 099 @Test 100 public void testTimestamps() throws Exception { 101 HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(name.getMethodName())); 102 HColumnDescriptor hcd = new HColumnDescriptor(TimestampTestBase.FAMILY_NAME); 103 hcd.setMaxVersions(3); 104 desc.addFamily(hcd); 105 this.admin.createTable(desc); 106 Table table = UTIL.getConnection().getTable(desc.getTableName()); 107 // TODO: Remove these deprecated classes or pull them in here if this is 108 // only test using them. 109 TimestampTestBase.doTestDelete(table, new FlushCache() { 110 @Override 111 public void flushcache() throws IOException { 112 UTIL.getHBaseCluster().flushcache(); 113 } 114 }); 115 116 // Perhaps drop and readd the table between tests so the former does 117 // not pollute this latter? Or put into separate tests. 118 TimestampTestBase.doTestTimestampScanning(table, new FlushCache() { 119 @Override 120 public void flushcache() throws IOException { 121 UTIL.getMiniHBaseCluster().flushcache(); 122 } 123 }); 124 125 table.close(); 126 } 127 128 /** 129 * Verifies versions across a cluster restart. 130 * <p/> 131 * Port of old TestGetRowVersions test to here so can better utilize the spun up cluster running 132 * more than a single test per spin up. Keep old tests' crazyness. 133 */ 134 @Test 135 public void testGetRowVersions() throws Exception { 136 final byte[] contents = Bytes.toBytes("contents"); 137 final byte[] row = Bytes.toBytes("row"); 138 final byte[] value1 = Bytes.toBytes("value1"); 139 final byte[] value2 = Bytes.toBytes("value2"); 140 final long timestamp1 = 100L; 141 final long timestamp2 = 200L; 142 final HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(name.getMethodName())); 143 HColumnDescriptor hcd = new HColumnDescriptor(contents); 144 hcd.setMaxVersions(3); 145 desc.addFamily(hcd); 146 this.admin.createTable(desc); 147 Put put = new Put(row, timestamp1); 148 put.addColumn(contents, contents, value1); 149 Table table = UTIL.getConnection().getTable(desc.getTableName()); 150 table.put(put); 151 // Shut down and restart the HBase cluster 152 table.close(); 153 UTIL.shutdownMiniHBaseCluster(); 154 LOG.debug("HBase cluster shut down -- restarting"); 155 StartMiniClusterOption option = 156 StartMiniClusterOption.builder().numRegionServers(NUM_SLAVES).build(); 157 UTIL.startMiniHBaseCluster(option); 158 // Make a new connection. 159 table = UTIL.getConnection().getTable(desc.getTableName()); 160 // Overwrite previous value 161 put = new Put(row, timestamp2); 162 put.addColumn(contents, contents, value2); 163 table.put(put); 164 // Now verify that getRow(row, column, latest) works 165 Get get = new Get(row); 166 // Should get one version by default 167 Result r = table.get(get); 168 assertNotNull(r); 169 assertFalse(r.isEmpty()); 170 assertEquals(1, r.size()); 171 byte[] value = r.getValue(contents, contents); 172 assertNotEquals(0, value.length); 173 assertTrue(Bytes.equals(value, value2)); 174 // Now check getRow with multiple versions 175 get = new Get(row); 176 get.setMaxVersions(); 177 r = table.get(get); 178 assertEquals(2, r.size()); 179 value = r.getValue(contents, contents); 180 assertNotEquals(0, value.length); 181 assertArrayEquals(value, value2); 182 NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> map = r.getMap(); 183 NavigableMap<byte[], NavigableMap<Long, byte[]>> familyMap = map.get(contents); 184 NavigableMap<Long, byte[]> versionMap = familyMap.get(contents); 185 assertEquals(2, versionMap.size()); 186 assertArrayEquals(value1, versionMap.get(timestamp1)); 187 assertArrayEquals(value2, versionMap.get(timestamp2)); 188 table.close(); 189 } 190 191 /** 192 * Port of old TestScanMultipleVersions test here so can better utilize the spun up cluster 193 * running more than just a single test. Keep old tests crazyness. 194 * <p> 195 * Tests five cases of scans and timestamps. 196 */ 197 @Test 198 public void testScanMultipleVersions() throws Exception { 199 final TableName tableName = TableName.valueOf(name.getMethodName()); 200 final HTableDescriptor desc = new HTableDescriptor(tableName); 201 desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); 202 final byte[][] rows = new byte[][] { Bytes.toBytes("row_0200"), Bytes.toBytes("row_0800") }; 203 final byte[][] splitRows = new byte[][] { Bytes.toBytes("row_0500") }; 204 final long[] timestamp = new long[] { 100L, 1000L }; 205 this.admin.createTable(desc, splitRows); 206 Table table = UTIL.getConnection().getTable(tableName); 207 // Assert we got the region layout wanted. 208 Pair<byte[][], byte[][]> keys = 209 UTIL.getConnection().getRegionLocator(tableName).getStartEndKeys(); 210 assertEquals(2, keys.getFirst().length); 211 byte[][] startKeys = keys.getFirst(); 212 byte[][] endKeys = keys.getSecond(); 213 214 for (int i = 0; i < startKeys.length; i++) { 215 if (i == 0) { 216 assertArrayEquals(HConstants.EMPTY_START_ROW, startKeys[i]); 217 assertArrayEquals(endKeys[i], splitRows[0]); 218 } else if (i == 1) { 219 assertArrayEquals(splitRows[0], startKeys[i]); 220 assertArrayEquals(endKeys[i], HConstants.EMPTY_END_ROW); 221 } 222 } 223 // Insert data 224 List<Put> puts = new ArrayList<>(); 225 for (int i = 0; i < startKeys.length; i++) { 226 for (int j = 0; j < timestamp.length; j++) { 227 Put put = new Put(rows[i], timestamp[j]); 228 put.addColumn(HConstants.CATALOG_FAMILY, null, timestamp[j], Bytes.toBytes(timestamp[j])); 229 puts.add(put); 230 } 231 } 232 table.put(puts); 233 // There are 5 cases we have to test. Each is described below. 234 for (int i = 0; i < rows.length; i++) { 235 for (int j = 0; j < timestamp.length; j++) { 236 Get get = new Get(rows[i]); 237 get.addFamily(HConstants.CATALOG_FAMILY); 238 get.setTimestamp(timestamp[j]); 239 Result result = table.get(get); 240 int cellCount = result.rawCells().length; 241 assertEquals(1, cellCount); 242 } 243 } 244 245 // Case 1: scan with LATEST_TIMESTAMP. Should get two rows 246 int count; 247 Scan scan = new Scan(); 248 scan.addFamily(HConstants.CATALOG_FAMILY); 249 try (ResultScanner s = table.getScanner(scan)) { 250 count = Iterables.size(s); 251 } 252 assertEquals("Number of rows should be 2", 2, count); 253 254 // Case 2: Scan with a timestamp greater than most recent timestamp 255 // (in this case > 1000 and < LATEST_TIMESTAMP. Should get 2 rows. 256 scan = new Scan(); 257 scan.setTimeRange(1000L, Long.MAX_VALUE); 258 scan.addFamily(HConstants.CATALOG_FAMILY); 259 try (ResultScanner s = table.getScanner(scan)) { 260 count = Iterables.size(s); 261 } 262 assertEquals("Number of rows should be 2", 2, count); 263 264 // Case 3: scan with timestamp equal to most recent timestamp 265 // (in this case == 1000. Should get 2 rows. 266 scan = new Scan(); 267 scan.setTimestamp(1000L); 268 scan.addFamily(HConstants.CATALOG_FAMILY); 269 try (ResultScanner s = table.getScanner(scan)) { 270 count = Iterables.size(s); 271 } 272 assertEquals("Number of rows should be 2", 2, count); 273 274 // Case 4: scan with timestamp greater than first timestamp but less than 275 // second timestamp (100 < timestamp < 1000). Should get 2 rows. 276 scan = new Scan(); 277 scan.setTimeRange(100L, 1000L); 278 scan.addFamily(HConstants.CATALOG_FAMILY); 279 try (ResultScanner s = table.getScanner(scan)) { 280 count = Iterables.size(s); 281 } 282 assertEquals("Number of rows should be 2", 2, count); 283 284 // Case 5: scan with timestamp equal to first timestamp (100) 285 // Should get 2 rows. 286 scan = new Scan(); 287 scan.setTimestamp(100L); 288 scan.addFamily(HConstants.CATALOG_FAMILY); 289 try (ResultScanner s = table.getScanner(scan)) { 290 count = Iterables.size(s); 291 } 292 assertEquals("Number of rows should be 2", 2, count); 293 } 294 295}