001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import static org.junit.Assert.assertEquals; 021import static org.junit.Assert.assertTrue; 022 023import java.io.IOException; 024import java.util.ArrayList; 025import java.util.List; 026import org.apache.hadoop.conf.Configuration; 027import org.apache.hadoop.fs.Path; 028import org.apache.hadoop.hbase.HBaseClassTestRule; 029import org.apache.hadoop.hbase.HBaseTestingUtil; 030import org.apache.hadoop.hbase.TableName; 031import org.apache.hadoop.hbase.client.Put; 032import org.apache.hadoop.hbase.client.RegionInfo; 033import org.apache.hadoop.hbase.client.RegionInfoBuilder; 034import org.apache.hadoop.hbase.client.Table; 035import org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure; 036import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; 037import org.apache.hadoop.hbase.procedure2.Procedure; 038import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker; 039import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; 040import org.apache.hadoop.hbase.testclassification.LargeTests; 041import org.apache.hadoop.hbase.testclassification.RegionServerTests; 042import org.apache.hadoop.hbase.util.Bytes; 043import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 044import org.junit.AfterClass; 045import org.junit.BeforeClass; 046import org.junit.ClassRule; 047import org.junit.Rule; 048import org.junit.Test; 049import org.junit.experimental.categories.Category; 050import org.junit.rules.TestName; 051 052@Category({ RegionServerTests.class, LargeTests.class }) 053public class TestDirectStoreSplitsMerges { 054 055 @ClassRule 056 public static final HBaseClassTestRule CLASS_RULE = 057 HBaseClassTestRule.forClass(TestDirectStoreSplitsMerges.class); 058 059 private static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); 060 061 public static final byte[] FAMILY_NAME = Bytes.toBytes("info"); 062 063 @Rule 064 public TestName name = new TestName(); 065 066 @BeforeClass 067 public static void setup() throws Exception { 068 TEST_UTIL.startMiniCluster(); 069 } 070 071 @AfterClass 072 public static void after() throws Exception { 073 TEST_UTIL.shutdownMiniCluster(); 074 } 075 076 @Test 077 public void testSplitStoreDir() throws Exception { 078 TableName table = TableName.valueOf(name.getMethodName()); 079 TEST_UTIL.createTable(table, FAMILY_NAME); 080 // first put some data in order to have a store file created 081 putThreeRowsAndFlush(table); 082 HRegion region = TEST_UTIL.getHBaseCluster().getRegions(table).get(0); 083 HRegionFileSystem regionFS = region.getStores().get(0).getRegionFileSystem(); 084 RegionInfo daughterA = 085 RegionInfoBuilder.newBuilder(table).setStartKey(region.getRegionInfo().getStartKey()) 086 .setEndKey(Bytes.toBytes("002")).setSplit(false) 087 .setRegionId(region.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime()) 088 .build(); 089 HStoreFile file = (HStoreFile) region.getStore(FAMILY_NAME).getStorefiles().toArray()[0]; 090 StoreFileTracker sft = 091 StoreFileTrackerFactory.create(TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), 092 true, region.getStores().get(0).getStoreContext()); 093 Path result = regionFS.splitStoreFile(daughterA, Bytes.toString(FAMILY_NAME), file, 094 Bytes.toBytes("002"), false, region.getSplitPolicy(), sft); 095 // asserts the reference file naming is correct 096 validateResultingFile(region.getRegionInfo().getEncodedName(), result); 097 // Additionally check if split region dir was created directly under table dir, not on .tmp 098 Path resultGreatGrandParent = result.getParent().getParent().getParent(); 099 assertEquals(regionFS.getTableDir().getName(), resultGreatGrandParent.getName()); 100 } 101 102 @Test 103 public void testMergeStoreFile() throws Exception { 104 TableName table = TableName.valueOf(name.getMethodName()); 105 TEST_UTIL.createTable(table, FAMILY_NAME); 106 // splitting the table first 107 TEST_UTIL.getAdmin().split(table, Bytes.toBytes("002")); 108 waitForSplitProcComplete(1000, 10); 109 // Add data and flush to create files in the two different regions 110 putThreeRowsAndFlush(table); 111 List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(table); 112 HRegion first = regions.get(0); 113 HRegion second = regions.get(1); 114 HRegionFileSystem regionFS = first.getRegionFileSystem(); 115 116 RegionInfo mergeResult = 117 RegionInfoBuilder.newBuilder(table).setStartKey(first.getRegionInfo().getStartKey()) 118 .setEndKey(second.getRegionInfo().getEndKey()).setSplit(false) 119 .setRegionId(first.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime()) 120 .build(); 121 122 Configuration configuration = TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(); 123 HRegionFileSystem mergeRegionFs = HRegionFileSystem.createRegionOnFileSystem(configuration, 124 regionFS.getFileSystem(), regionFS.getTableDir(), mergeResult); 125 126 // merge file from first region 127 HStoreFile file = (HStoreFile) first.getStore(FAMILY_NAME).getStorefiles().toArray()[0]; 128 mergeFileFromRegion(mergeRegionFs, first, file, StoreFileTrackerFactory.create(configuration, 129 true, first.getStore(FAMILY_NAME).getStoreContext())); 130 // merge file from second region 131 file = (HStoreFile) second.getStore(FAMILY_NAME).getStorefiles().toArray()[0]; 132 mergeFileFromRegion(mergeRegionFs, second, file, StoreFileTrackerFactory.create(configuration, 133 true, second.getStore(FAMILY_NAME).getStoreContext())); 134 } 135 136 @Test 137 public void testCommitDaughterRegionNoFiles() throws Exception { 138 TableName table = TableName.valueOf(name.getMethodName()); 139 TEST_UTIL.createTable(table, FAMILY_NAME); 140 HRegion region = TEST_UTIL.getHBaseCluster().getRegions(table).get(0); 141 HRegionFileSystem regionFS = region.getStores().get(0).getRegionFileSystem(); 142 RegionInfo daughterA = 143 RegionInfoBuilder.newBuilder(table).setStartKey(region.getRegionInfo().getStartKey()) 144 .setEndKey(Bytes.toBytes("002")).setSplit(false) 145 .setRegionId(region.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime()) 146 .build(); 147 Path splitDir = regionFS.getSplitsDir(daughterA); 148 MasterProcedureEnv env = 149 TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); 150 Path result = regionFS.commitDaughterRegion(daughterA, new ArrayList<>(), env); 151 assertEquals(splitDir, result); 152 } 153 154 @Test 155 public void testCommitDaughterRegionWithFiles() throws Exception { 156 TableName table = TableName.valueOf(name.getMethodName()); 157 TEST_UTIL.createTable(table, FAMILY_NAME); 158 // first put some data in order to have a store file created 159 putThreeRowsAndFlush(table); 160 HRegion region = TEST_UTIL.getHBaseCluster().getRegions(table).get(0); 161 HRegionFileSystem regionFS = region.getStores().get(0).getRegionFileSystem(); 162 RegionInfo daughterA = 163 RegionInfoBuilder.newBuilder(table).setStartKey(region.getRegionInfo().getStartKey()) 164 .setEndKey(Bytes.toBytes("002")).setSplit(false) 165 .setRegionId(region.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime()) 166 .build(); 167 RegionInfo daughterB = RegionInfoBuilder.newBuilder(table).setStartKey(Bytes.toBytes("002")) 168 .setEndKey(region.getRegionInfo().getEndKey()).setSplit(false) 169 .setRegionId(region.getRegionInfo().getRegionId()).build(); 170 Path splitDirA = regionFS.getSplitsDir(daughterA); 171 Path splitDirB = regionFS.getSplitsDir(daughterB); 172 HStoreFile file = (HStoreFile) region.getStore(FAMILY_NAME).getStorefiles().toArray()[0]; 173 List<Path> filesA = new ArrayList<>(); 174 StoreFileTracker sft = 175 StoreFileTrackerFactory.create(TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(), 176 true, region.getStores().get(0).getStoreContext()); 177 filesA.add(regionFS.splitStoreFile(daughterA, Bytes.toString(FAMILY_NAME), file, 178 Bytes.toBytes("002"), false, region.getSplitPolicy(), sft)); 179 List<Path> filesB = new ArrayList<>(); 180 filesB.add(regionFS.splitStoreFile(daughterB, Bytes.toString(FAMILY_NAME), file, 181 Bytes.toBytes("002"), true, region.getSplitPolicy(), sft)); 182 MasterProcedureEnv env = 183 TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); 184 Path resultA = regionFS.commitDaughterRegion(daughterA, filesA, env); 185 Path resultB = regionFS.commitDaughterRegion(daughterB, filesB, env); 186 assertEquals(splitDirA, resultA); 187 assertEquals(splitDirB, resultB); 188 } 189 190 @Test 191 public void testCommitMergedRegion() throws Exception { 192 TableName table = TableName.valueOf(name.getMethodName()); 193 TEST_UTIL.createTable(table, FAMILY_NAME); 194 // splitting the table first 195 TEST_UTIL.getAdmin().split(table, Bytes.toBytes("002")); 196 waitForSplitProcComplete(1000, 10); 197 // Add data and flush to create files in the two different regions 198 putThreeRowsAndFlush(table); 199 List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(table); 200 HRegion first = regions.get(0); 201 HRegion second = regions.get(1); 202 HRegionFileSystem regionFS = first.getRegionFileSystem(); 203 204 RegionInfo mergeResult = 205 RegionInfoBuilder.newBuilder(table).setStartKey(first.getRegionInfo().getStartKey()) 206 .setEndKey(second.getRegionInfo().getEndKey()).setSplit(false) 207 .setRegionId(first.getRegionInfo().getRegionId() + EnvironmentEdgeManager.currentTime()) 208 .build(); 209 210 Configuration configuration = TEST_UTIL.getHBaseCluster().getMaster().getConfiguration(); 211 HRegionFileSystem mergeRegionFs = HRegionFileSystem.createRegionOnFileSystem(configuration, 212 regionFS.getFileSystem(), regionFS.getTableDir(), mergeResult); 213 214 // merge file from first region 215 HStoreFile file = (HStoreFile) first.getStore(FAMILY_NAME).getStorefiles().toArray()[0]; 216 mergeFileFromRegion(mergeRegionFs, first, file, StoreFileTrackerFactory.create(configuration, 217 true, first.getStore(FAMILY_NAME).getStoreContext())); 218 // merge file from second region 219 file = (HStoreFile) second.getStore(FAMILY_NAME).getStorefiles().toArray()[0]; 220 List<Path> mergedFiles = new ArrayList<>(); 221 mergedFiles.add(mergeFileFromRegion(mergeRegionFs, second, file, StoreFileTrackerFactory 222 .create(configuration, true, second.getStore(FAMILY_NAME).getStoreContext()))); 223 MasterProcedureEnv env = 224 TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor().getEnvironment(); 225 mergeRegionFs.commitMergedRegion(mergedFiles, env); 226 } 227 228 private void waitForSplitProcComplete(int attempts, int waitTime) throws Exception { 229 List<Procedure<?>> procedures = TEST_UTIL.getHBaseCluster().getMaster().getProcedures(); 230 if (procedures.size() > 0) { 231 Procedure splitProc = 232 procedures.stream().filter(p -> p instanceof SplitTableRegionProcedure).findFirst().get(); 233 int count = 0; 234 while ((splitProc.isWaiting() || splitProc.isRunnable()) && count < attempts) { 235 synchronized (splitProc) { 236 splitProc.wait(waitTime); 237 } 238 count++; 239 } 240 assertTrue(splitProc.isSuccess()); 241 } 242 } 243 244 private Path mergeFileFromRegion(HRegionFileSystem regionFS, HRegion regionToMerge, 245 HStoreFile file, StoreFileTracker sft) throws IOException { 246 Path mergedFile = regionFS.mergeStoreFile(regionToMerge.getRegionInfo(), 247 Bytes.toString(FAMILY_NAME), file, sft); 248 validateResultingFile(regionToMerge.getRegionInfo().getEncodedName(), mergedFile); 249 return mergedFile; 250 } 251 252 private void validateResultingFile(String originalRegion, Path result) { 253 assertEquals(originalRegion, result.getName().split("\\.")[1]); 254 // asserts we are under the cf directory 255 Path resultParent = result.getParent(); 256 assertEquals(Bytes.toString(FAMILY_NAME), resultParent.getName()); 257 } 258 259 private void putThreeRowsAndFlush(TableName table) throws IOException { 260 Table tbl = TEST_UTIL.getConnection().getTable(table); 261 Put put = new Put(Bytes.toBytes("001")); 262 byte[] qualifier = Bytes.toBytes("1"); 263 put.addColumn(FAMILY_NAME, qualifier, Bytes.toBytes(1)); 264 tbl.put(put); 265 put = new Put(Bytes.toBytes("002")); 266 put.addColumn(FAMILY_NAME, qualifier, Bytes.toBytes(2)); 267 tbl.put(put); 268 put = new Put(Bytes.toBytes("003")); 269 put.addColumn(FAMILY_NAME, qualifier, Bytes.toBytes(2)); 270 tbl.put(put); 271 TEST_UTIL.flush(table); 272 } 273}