001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.client;
019
020import static junit.framework.TestCase.assertEquals;
021
022import java.io.IOException;
023import java.util.ArrayList;
024import java.util.List;
025import org.apache.hadoop.hbase.Cell;
026import org.apache.hadoop.hbase.CellBuilderFactory;
027import org.apache.hadoop.hbase.CellBuilderType;
028import org.apache.hadoop.hbase.CompareOperator;
029import org.apache.hadoop.hbase.CompatibilityFactory;
030import org.apache.hadoop.hbase.HBaseClassTestRule;
031import org.apache.hadoop.hbase.HBaseTestingUtility;
032import org.apache.hadoop.hbase.HColumnDescriptor;
033import org.apache.hadoop.hbase.HConstants;
034import org.apache.hadoop.hbase.HTableDescriptor;
035import org.apache.hadoop.hbase.TableName;
036import org.apache.hadoop.hbase.Waiter;
037import org.apache.hadoop.hbase.filter.BinaryComparator;
038import org.apache.hadoop.hbase.filter.QualifierFilter;
039import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
040import org.apache.hadoop.hbase.ipc.RpcServerInterface;
041import org.apache.hadoop.hbase.logging.Log4jUtils;
042import org.apache.hadoop.hbase.metrics.BaseSource;
043import org.apache.hadoop.hbase.regionserver.HRegion;
044import org.apache.hadoop.hbase.regionserver.HRegionServer;
045import org.apache.hadoop.hbase.test.MetricsAssertHelper;
046import org.apache.hadoop.hbase.testclassification.ClientTests;
047import org.apache.hadoop.hbase.testclassification.MediumTests;
048import org.apache.hadoop.hbase.util.Bytes;
049import org.junit.AfterClass;
050import org.junit.BeforeClass;
051import org.junit.ClassRule;
052import org.junit.Rule;
053import org.junit.Test;
054import org.junit.experimental.categories.Category;
055import org.junit.rules.TestName;
056
057/**
058 * This test sets the multi size WAAAAAY low and then checks to make sure that gets will still make
059 * progress.
060 */
061@Category({ MediumTests.class, ClientTests.class })
062public class TestMultiRespectsLimits {
063
064  @ClassRule
065  public static final HBaseClassTestRule CLASS_RULE =
066    HBaseClassTestRule.forClass(TestMultiRespectsLimits.class);
067
068  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
069  private static final MetricsAssertHelper METRICS_ASSERT =
070    CompatibilityFactory.getInstance(MetricsAssertHelper.class);
071  private final static byte[] FAMILY = Bytes.toBytes("D");
072  public static final int MAX_SIZE = 90;
073  private static String LOG_LEVEL;
074
075  @Rule
076  public TestName name = new TestName();
077
078  @BeforeClass
079  public static void setUpBeforeClass() throws Exception {
080    // disable the debug log to avoid flooding the output
081    LOG_LEVEL = Log4jUtils.getEffectiveLevel(AsyncRegionLocatorHelper.class.getName());
082    Log4jUtils.setLogLevel(AsyncRegionLocatorHelper.class.getName(), "INFO");
083    TEST_UTIL.getConfiguration().setLong(HConstants.HBASE_SERVER_SCANNER_MAX_RESULT_SIZE_KEY,
084      MAX_SIZE);
085
086    // Only start on regionserver so that all regions are on the same server.
087    TEST_UTIL.startMiniCluster(1);
088  }
089
090  @AfterClass
091  public static void tearDownAfterClass() throws Exception {
092    if (LOG_LEVEL != null) {
093      Log4jUtils.setLogLevel(AsyncRegionLocatorHelper.class.getName(), LOG_LEVEL);
094    }
095    TEST_UTIL.shutdownMiniCluster();
096  }
097
098  @Test
099  public void testMultiLimits() throws Exception {
100    final TableName tableName = TableName.valueOf(name.getMethodName());
101    Table t = TEST_UTIL.createTable(tableName, FAMILY);
102    TEST_UTIL.loadTable(t, FAMILY, false);
103
104    // Split the table to make sure that the chunking happens accross regions.
105    try (final Admin admin = TEST_UTIL.getAdmin()) {
106      admin.split(tableName);
107      TEST_UTIL.waitFor(60000, new Waiter.Predicate<Exception>() {
108        @Override
109        public boolean evaluate() throws Exception {
110          return admin.getTableRegions(tableName).size() > 1;
111        }
112      });
113    }
114    List<Get> gets = new ArrayList<>(MAX_SIZE);
115
116    for (int i = 0; i < MAX_SIZE; i++) {
117      gets.add(new Get(HBaseTestingUtility.ROWS[i]));
118    }
119
120    RpcServerInterface rpcServer = TEST_UTIL.getHBaseCluster().getRegionServer(0).getRpcServer();
121    BaseSource s = rpcServer.getMetrics().getMetricsSource();
122    long startingExceptions = METRICS_ASSERT.getCounter("exceptions", s);
123    long startingMultiExceptions = METRICS_ASSERT.getCounter("exceptions.multiResponseTooLarge", s);
124
125    Result[] results = t.get(gets);
126    assertEquals(MAX_SIZE, results.length);
127
128    // Cells from TEST_UTIL.loadTable have a length of 27.
129    // Multiplying by less than that gives an easy lower bound on size.
130    // However in reality each kv is being reported as much higher than that.
131    METRICS_ASSERT.assertCounterGt("exceptions", startingExceptions + ((MAX_SIZE * 25) / MAX_SIZE),
132      s);
133    METRICS_ASSERT.assertCounterGt("exceptions.multiResponseTooLarge",
134      startingMultiExceptions + ((MAX_SIZE * 25) / MAX_SIZE), s);
135  }
136
137  @Test
138  public void testBlockMultiLimits() throws Exception {
139    final TableName tableName = TableName.valueOf(name.getMethodName());
140    HTableDescriptor desc = new HTableDescriptor(tableName);
141    HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
142    hcd.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
143    desc.addFamily(hcd);
144    TEST_UTIL.getAdmin().createTable(desc);
145    Table t = TEST_UTIL.getConnection().getTable(tableName);
146
147    final HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
148    RpcServerInterface rpcServer = regionServer.getRpcServer();
149    BaseSource s = rpcServer.getMetrics().getMetricsSource();
150    long startingExceptions = METRICS_ASSERT.getCounter("exceptions", s);
151    long startingMultiExceptions = METRICS_ASSERT.getCounter("exceptions.multiResponseTooLarge", s);
152
153    byte[] row = Bytes.toBytes("TEST");
154    byte[][] cols = new byte[][] { Bytes.toBytes("0"), // Get this
155      Bytes.toBytes("1"), // Buffer
156      Bytes.toBytes("2"), // Buffer
157      Bytes.toBytes("3"), // Get This
158      Bytes.toBytes("4"), // Buffer
159      Bytes.toBytes("5"), // Buffer
160      Bytes.toBytes("6"), // Buffer
161      Bytes.toBytes("7"), // Get This
162      Bytes.toBytes("8"), // Buffer
163      Bytes.toBytes("9"), // Buffer
164    };
165
166    // Set the value size so that one result will be less than the MAX_SIZE
167    // however the block being reference will be larger than MAX_SIZE.
168    // This should cause the regionserver to try and send a result immediately.
169    byte[] value = new byte[1];
170    Bytes.random(value);
171
172    for (int i = 0; i < cols.length; i++) {
173      if (i == 6) {
174        // do a flush here so we end up with 2 blocks, 55 and 45 bytes
175        flush(regionServer, tableName);
176      }
177      byte[] col = cols[i];
178      Put p = new Put(row);
179      p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row).setFamily(FAMILY)
180        .setQualifier(col).setTimestamp(p.getTimestamp()).setType(Cell.Type.Put).setValue(value)
181        .build());
182      t.put(p);
183    }
184
185    // Make sure that a flush happens
186    flush(regionServer, tableName);
187
188    List<Get> gets = new ArrayList<>(4);
189    // This get returns nothing since the filter doesn't match. Filtered cells still retain
190    // blocks, and this is a full row scan of both blocks. This equals 100 bytes so we should
191    // throw a multiResponseTooLarge after this get if we are counting filtered cells correctly.
192    Get g0 = new Get(row).addFamily(FAMILY).setFilter(
193      new QualifierFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("sdf"))));
194    gets.add(g0);
195
196    // g1 and g2 each count the first 55 byte block, so we end up with block size of 110
197    // after g2 and throw a multiResponseTooLarge before g3
198    Get g1 = new Get(row);
199    g1.addColumn(FAMILY, cols[0]);
200    gets.add(g1);
201
202    Get g2 = new Get(row);
203    g2.addColumn(FAMILY, cols[3]);
204    gets.add(g2);
205
206    Get g3 = new Get(row);
207    g3.addColumn(FAMILY, cols[7]);
208    gets.add(g3);
209
210    Result[] results = t.get(gets);
211    assertEquals(4, results.length);
212    // Expect 2 exceptions (thus 3 rpcs) -- one for g0, then another for g1 + g2, final rpc for g3.
213    // If we tracked lastBlock we could squeeze g3 into the second rpc because g2 would be "free"
214    // since it's in the same block as g1.
215    METRICS_ASSERT.assertCounterGt("exceptions", startingExceptions + 1, s);
216    METRICS_ASSERT.assertCounterGt("exceptions.multiResponseTooLarge", startingMultiExceptions + 1,
217      s);
218  }
219
220  private void flush(HRegionServer regionServer, TableName tableName) throws IOException {
221    for (HRegion region : regionServer.getRegions(tableName)) {
222      region.flush(true);
223    }
224  }
225}