001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.quotas;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertNull;
022import static org.junit.Assert.assertTrue;
023import static org.junit.Assert.fail;
024
025import java.util.HashMap;
026import java.util.Map;
027import java.util.concurrent.atomic.AtomicLong;
028import org.apache.hadoop.conf.Configuration;
029import org.apache.hadoop.fs.FileStatus;
030import org.apache.hadoop.fs.FileSystem;
031import org.apache.hadoop.fs.Path;
032import org.apache.hadoop.hbase.HBaseClassTestRule;
033import org.apache.hadoop.hbase.HBaseTestingUtility;
034import org.apache.hadoop.hbase.TableName;
035import org.apache.hadoop.hbase.client.ClientServiceCallable;
036import org.apache.hadoop.hbase.client.ClusterConnection;
037import org.apache.hadoop.hbase.client.Put;
038import org.apache.hadoop.hbase.client.RegionInfo;
039import org.apache.hadoop.hbase.client.ResultScanner;
040import org.apache.hadoop.hbase.client.RpcRetryingCaller;
041import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
042import org.apache.hadoop.hbase.client.Scan;
043import org.apache.hadoop.hbase.client.Table;
044import org.apache.hadoop.hbase.master.HMaster;
045import org.apache.hadoop.hbase.quotas.policies.DefaultViolationPolicyEnforcement;
046import org.apache.hadoop.hbase.regionserver.HRegionServer;
047import org.apache.hadoop.hbase.testclassification.MediumTests;
048import org.apache.hadoop.hbase.util.Bytes;
049import org.junit.AfterClass;
050import org.junit.Before;
051import org.junit.BeforeClass;
052import org.junit.ClassRule;
053import org.junit.Rule;
054import org.junit.Test;
055import org.junit.experimental.categories.Category;
056import org.junit.rules.TestName;
057import org.slf4j.Logger;
058import org.slf4j.LoggerFactory;
059
060@Category(MediumTests.class)
061public class TestSpaceQuotaOnBulkLoad {
062
063  @ClassRule
064  public static final HBaseClassTestRule CLASS_RULE =
065    HBaseClassTestRule.forClass(TestSpaceQuotaOnBulkLoad.class);
066
067  private static final Logger LOG = LoggerFactory.getLogger(TestSpaceQuotaOnBulkLoad.class);
068  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
069
070  @Rule
071  public TestName testName = new TestName();
072  private SpaceQuotaHelperForTests helper;
073
074  @BeforeClass
075  public static void setUp() throws Exception {
076    Configuration conf = TEST_UTIL.getConfiguration();
077    SpaceQuotaHelperForTests.updateConfigForQuotas(conf);
078    TEST_UTIL.startMiniCluster(1);
079  }
080
081  @AfterClass
082  public static void tearDown() throws Exception {
083    TEST_UTIL.shutdownMiniCluster();
084  }
085
086  @Before
087  public void removeAllQuotas() throws Exception {
088    helper = new SpaceQuotaHelperForTests(TEST_UTIL, testName, new AtomicLong(0));
089    helper.removeAllQuotas();
090  }
091
092  @Test
093  public void testNoBulkLoadsWithNoWrites() throws Exception {
094    Put p = new Put(Bytes.toBytes("to_reject"));
095    p.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("to"),
096      Bytes.toBytes("reject"));
097    TableName tableName =
098      helper.writeUntilViolationAndVerifyViolation(SpaceViolationPolicy.NO_WRITES, p);
099
100    // The table is now in violation. Try to do a bulk load
101    ClientServiceCallable<Void> callable = helper.generateFileToLoad(tableName, 1, 50);
102    ClusterConnection conn = (ClusterConnection) TEST_UTIL.getConnection();
103    RpcRetryingCallerFactory factory =
104      new RpcRetryingCallerFactory(TEST_UTIL.getConfiguration(), conn.getConnectionConfiguration());
105    RpcRetryingCaller<Void> caller = factory.<Void> newCaller();
106    try {
107      caller.callWithRetries(callable, Integer.MAX_VALUE);
108      fail("Expected the bulk load call to fail!");
109    } catch (SpaceLimitingException e) {
110      // Pass
111      LOG.trace("Caught expected exception", e);
112    }
113  }
114
115  @Test
116  public void testAtomicBulkLoadUnderQuota() throws Exception {
117    // Need to verify that if the batch of hfiles cannot be loaded, none are loaded.
118    TableName tn = helper.createTableWithRegions(10);
119
120    final long sizeLimit = 50L * SpaceQuotaHelperForTests.ONE_KILOBYTE;
121    QuotaSettings settings =
122      QuotaSettingsFactory.limitTableSpace(tn, sizeLimit, SpaceViolationPolicy.NO_INSERTS);
123    TEST_UTIL.getAdmin().setQuota(settings);
124
125    HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
126    RegionServerSpaceQuotaManager spaceQuotaManager = rs.getRegionServerSpaceQuotaManager();
127    Map<TableName, SpaceQuotaSnapshot> snapshots = spaceQuotaManager.copyQuotaSnapshots();
128    Map<RegionInfo, Long> regionSizes = getReportedSizesForTable(tn);
129    while (true) {
130      SpaceQuotaSnapshot snapshot = snapshots.get(tn);
131      if (snapshot != null && snapshot.getLimit() > 0) {
132        break;
133      }
134      LOG.debug("Snapshot does not yet realize quota limit: " + snapshots + ", regionsizes: "
135        + regionSizes);
136      Thread.sleep(3000);
137      snapshots = spaceQuotaManager.copyQuotaSnapshots();
138      regionSizes = getReportedSizesForTable(tn);
139    }
140    // Our quota limit should be reflected in the latest snapshot
141    SpaceQuotaSnapshot snapshot = snapshots.get(tn);
142    assertEquals(0L, snapshot.getUsage());
143    assertEquals(sizeLimit, snapshot.getLimit());
144
145    // We would also not have a "real" policy in violation
146    ActivePolicyEnforcement activePolicies = spaceQuotaManager.getActiveEnforcements();
147    SpaceViolationPolicyEnforcement enforcement = activePolicies.getPolicyEnforcement(tn);
148    assertTrue("Expected to find Noop policy, but got " + enforcement.getClass().getSimpleName(),
149      enforcement instanceof DefaultViolationPolicyEnforcement);
150
151    // Should generate two files, each of which is over 25KB each
152    ClientServiceCallable<Void> callable = helper.generateFileToLoad(tn, 2, 500);
153    FileSystem fs = TEST_UTIL.getTestFileSystem();
154    FileStatus[] files =
155      fs.listStatus(new Path(fs.getHomeDirectory(), testName.getMethodName() + "_files"));
156    for (FileStatus file : files) {
157      assertTrue("Expected the file, " + file.getPath()
158        + ",  length to be larger than 25KB, but was " + file.getLen(),
159        file.getLen() > 25 * SpaceQuotaHelperForTests.ONE_KILOBYTE);
160      LOG.debug(file.getPath() + " -> " + file.getLen() + "B");
161    }
162
163    ClusterConnection conn = (ClusterConnection) TEST_UTIL.getConnection();
164    RpcRetryingCallerFactory factory =
165      new RpcRetryingCallerFactory(TEST_UTIL.getConfiguration(), conn.getConnectionConfiguration());
166    RpcRetryingCaller<Void> caller = factory.<Void> newCaller();
167    try {
168      caller.callWithRetries(callable, Integer.MAX_VALUE);
169      fail("Expected the bulk load call to fail!");
170    } catch (SpaceLimitingException e) {
171      // Pass
172      LOG.trace("Caught expected exception", e);
173    }
174    // Verify that we have no data in the table because neither file should have been
175    // loaded even though one of the files could have.
176    Table table = TEST_UTIL.getConnection().getTable(tn);
177    ResultScanner scanner = table.getScanner(new Scan());
178    try {
179      assertNull("Expected no results", scanner.next());
180    } finally {
181      scanner.close();
182    }
183  }
184
185  private Map<RegionInfo, Long> getReportedSizesForTable(TableName tn) {
186    HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
187    MasterQuotaManager quotaManager = master.getMasterQuotaManager();
188    Map<RegionInfo, Long> filteredRegionSizes = new HashMap<>();
189    for (Map.Entry<RegionInfo, Long> entry : quotaManager.snapshotRegionSizes().entrySet()) {
190      if (entry.getKey().getTable().equals(tn)) {
191        filteredRegionSizes.put(entry.getKey(), entry.getValue());
192      }
193    }
194    return filteredRegionSizes;
195  }
196}