001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import java.io.IOException; 021import org.apache.hadoop.fs.Path; 022import org.apache.hadoop.hbase.HBaseClassTestRule; 023import org.apache.hadoop.hbase.HBaseTestingUtility; 024import org.apache.hadoop.hbase.HColumnDescriptor; 025import org.apache.hadoop.hbase.HConstants; 026import org.apache.hadoop.hbase.HRegionInfo; 027import org.apache.hadoop.hbase.HTableDescriptor; 028import org.apache.hadoop.hbase.TableName; 029import org.apache.hadoop.hbase.client.Get; 030import org.apache.hadoop.hbase.client.Put; 031import org.apache.hadoop.hbase.client.RowTooBigException; 032import org.apache.hadoop.hbase.testclassification.MediumTests; 033import org.apache.hadoop.hbase.testclassification.RegionServerTests; 034import org.apache.hadoop.hbase.util.Bytes; 035import org.junit.AfterClass; 036import org.junit.BeforeClass; 037import org.junit.ClassRule; 038import org.junit.Test; 039import org.junit.experimental.categories.Category; 040 041/** 042 * Test case to check HRS throws {@link org.apache.hadoop.hbase.client.RowTooBigException} when row 043 * size exceeds configured limits. 044 */ 045@Category({ RegionServerTests.class, MediumTests.class }) 046public class TestRowTooBig { 047 048 @ClassRule 049 public static final HBaseClassTestRule CLASS_RULE = 050 HBaseClassTestRule.forClass(TestRowTooBig.class); 051 052 private final static HBaseTestingUtility HTU = HBaseTestingUtility.createLocalHTU(); 053 private static Path rootRegionDir; 054 private static final HTableDescriptor TEST_HTD = 055 new HTableDescriptor(TableName.valueOf(TestRowTooBig.class.getSimpleName())); 056 057 @BeforeClass 058 public static void before() throws Exception { 059 HTU.startMiniCluster(); 060 HTU.getConfiguration().setLong(HConstants.TABLE_MAX_ROWSIZE_KEY, 10 * 1024 * 1024L); 061 rootRegionDir = HTU.getDataTestDirOnTestFS("TestRowTooBig"); 062 } 063 064 @AfterClass 065 public static void after() throws Exception { 066 HTU.shutdownMiniCluster(); 067 } 068 069 /** 070 * Usecase: - create a row with 5 large cells (5 Mb each) - flush memstore but don't compact 071 * storefiles. - try to Get whole row. OOME happened before we actually get to reading results, 072 * but during seeking, as each StoreFile gets it's own scanner, and each scanner seeks after the 073 * first KV. 074 */ 075 @Test(expected = RowTooBigException.class) 076 public void testScannersSeekOnFewLargeCells() throws IOException { 077 byte[] row1 = Bytes.toBytes("row1"); 078 byte[] fam1 = Bytes.toBytes("fam1"); 079 080 HTableDescriptor htd = TEST_HTD; 081 HColumnDescriptor hcd = new HColumnDescriptor(fam1); 082 if (htd.hasFamily(hcd.getName())) { 083 htd.modifyFamily(hcd); 084 } else { 085 htd.addFamily(hcd); 086 } 087 088 final HRegionInfo hri = 089 new HRegionInfo(htd.getTableName(), HConstants.EMPTY_END_ROW, HConstants.EMPTY_END_ROW); 090 HRegion region = 091 HBaseTestingUtility.createRegionAndWAL(hri, rootRegionDir, HTU.getConfiguration(), htd); 092 try { 093 // Add 5 cells to memstore 094 for (int i = 0; i < 5; i++) { 095 Put put = new Put(row1); 096 097 byte[] value = new byte[5 * 1024 * 1024]; 098 put.addColumn(fam1, Bytes.toBytes("col_" + i), value); 099 region.put(put); 100 region.flush(true); 101 } 102 103 Get get = new Get(row1); 104 region.get(get); 105 } finally { 106 HBaseTestingUtility.closeRegionAndWAL(region); 107 } 108 } 109 110 /** 111 * Usecase: - create a row with 1M cells, 10 bytes in each - flush & run major compaction - try to 112 * Get whole row. OOME happened in StoreScanner.next(..). 113 */ 114 @Test(expected = RowTooBigException.class) 115 public void testScanAcrossManySmallColumns() throws IOException { 116 byte[] row1 = Bytes.toBytes("row1"); 117 byte[] fam1 = Bytes.toBytes("fam1"); 118 119 HTableDescriptor htd = TEST_HTD; 120 HColumnDescriptor hcd = new HColumnDescriptor(fam1); 121 if (htd.hasFamily(hcd.getName())) { 122 htd.modifyFamily(hcd); 123 } else { 124 htd.addFamily(hcd); 125 } 126 127 final HRegionInfo hri = 128 new HRegionInfo(htd.getTableName(), HConstants.EMPTY_END_ROW, HConstants.EMPTY_END_ROW); 129 HRegion region = 130 HBaseTestingUtility.createRegionAndWAL(hri, rootRegionDir, HTU.getConfiguration(), htd); 131 try { 132 // Add to memstore 133 for (int i = 0; i < 10; i++) { 134 Put put = new Put(row1); 135 for (int j = 0; j < 10 * 10000; j++) { 136 byte[] value = new byte[10]; 137 put.addColumn(fam1, Bytes.toBytes("col_" + i + "_" + j), value); 138 } 139 region.put(put); 140 region.flush(true); 141 } 142 region.compact(true); 143 144 Get get = new Get(row1); 145 region.get(get); 146 } finally { 147 HBaseTestingUtility.closeRegionAndWAL(region); 148 } 149 } 150}