001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.master.procedure; 019 020import static org.junit.jupiter.api.Assertions.assertEquals; 021import static org.junit.jupiter.api.Assertions.assertNotEquals; 022 023import java.util.ArrayList; 024import java.util.HashMap; 025import java.util.HashSet; 026import java.util.List; 027import java.util.Map; 028import java.util.Set; 029import org.apache.hadoop.conf.Configuration; 030import org.apache.hadoop.hbase.HBaseTestingUtil; 031import org.apache.hadoop.hbase.TableName; 032import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; 033import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 034import org.apache.hadoop.hbase.client.RegionInfo; 035import org.apache.hadoop.hbase.client.TableDescriptor; 036import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 037import org.apache.hadoop.hbase.regionserver.HRegion; 038import org.apache.hadoop.hbase.regionserver.MetricsRegionWrapperImpl; 039import org.apache.hadoop.hbase.testclassification.MasterTests; 040import org.apache.hadoop.hbase.testclassification.MediumTests; 041import org.apache.hadoop.hbase.util.Bytes; 042import org.junit.jupiter.api.AfterAll; 043import org.junit.jupiter.api.BeforeAll; 044import org.junit.jupiter.api.Tag; 045import org.junit.jupiter.api.Test; 046 047@Tag(MasterTests.TAG) 048@Tag(MediumTests.TAG) 049public class TestReopenTableRegionsIntegration { 050 051 private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); 052 private static final TableName TABLE_NAME = TableName.valueOf("testLazyUpdateReopen"); 053 private static final byte[] CF = Bytes.toBytes("cf"); 054 055 @BeforeAll 056 public static void setupCluster() throws Exception { 057 Configuration conf = UTIL.getConfiguration(); 058 conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); 059 UTIL.startMiniCluster(1); 060 } 061 062 @AfterAll 063 public static void tearDown() throws Exception { 064 UTIL.shutdownMiniCluster(); 065 } 066 067 @Test 068 public void testLazyUpdateThenReopenUpdatesTableDescriptorHash() throws Exception { 069 // Step 1: Create table with column family and 3 regions 070 ColumnFamilyDescriptor cfd = 071 ColumnFamilyDescriptorBuilder.newBuilder(CF).setMaxVersions(1).build(); 072 073 TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLE_NAME).setColumnFamily(cfd) 074 .setMaxFileSize(100 * 1024 * 1024L).build(); 075 076 UTIL.getAdmin().createTable(td, Bytes.toBytes("a"), Bytes.toBytes("z"), 3); 077 UTIL.waitTableAvailable(TABLE_NAME); 078 079 try { 080 // Step 2: Capture initial tableDescriptorHash from all regions 081 List<HRegion> regions = UTIL.getHBaseCluster().getRegions(TABLE_NAME); 082 assertEquals(3, regions.size(), "Expected 3 regions"); 083 084 Map<byte[], String> initialHashes = new HashMap<>(); 085 086 for (HRegion region : regions) { 087 String hash; 088 try (MetricsRegionWrapperImpl wrapper = new MetricsRegionWrapperImpl(region)) { 089 hash = wrapper.getTableDescriptorHash(); 090 } 091 initialHashes.put(region.getRegionInfo().getRegionName(), hash); 092 } 093 094 // Verify all regions have same hash 095 Set<String> uniqueHashes = new HashSet<>(initialHashes.values()); 096 assertEquals(1, uniqueHashes.size(), "All regions should have same hash"); 097 String initialHash = uniqueHashes.iterator().next(); 098 099 // Step 3: Perform lazy table descriptor update 100 ColumnFamilyDescriptor newCfd = 101 ColumnFamilyDescriptorBuilder.newBuilder(cfd).setMaxVersions(5).build(); 102 103 TableDescriptor newTd = TableDescriptorBuilder.newBuilder(td).modifyColumnFamily(newCfd) 104 .setMaxFileSize(200 * 1024 * 1024L).build(); 105 106 // Perform lazy update (reopenRegions = false) 107 UTIL.getAdmin().modifyTableAsync(newTd, false).get(); 108 109 // Wait for modification to complete 110 UTIL.waitFor(30000, () -> { 111 try { 112 TableDescriptor currentTd = UTIL.getAdmin().getDescriptor(TABLE_NAME); 113 return currentTd.getMaxFileSize() == 200 * 1024 * 1024L; 114 } catch (Exception e) { 115 return false; 116 } 117 }); 118 119 // Step 4: Verify tableDescriptorHash has NOT changed in region metrics 120 List<HRegion> regionsAfterLazyUpdate = UTIL.getHBaseCluster().getRegions(TABLE_NAME); 121 for (HRegion region : regionsAfterLazyUpdate) { 122 String currentHash; 123 try (MetricsRegionWrapperImpl wrapper = new MetricsRegionWrapperImpl(region)) { 124 currentHash = wrapper.getTableDescriptorHash(); 125 } 126 assertEquals(initialHashes.get(region.getRegionInfo().getRegionName()), currentHash, 127 "Hash should NOT change without region reopen"); 128 } 129 130 // Verify the table descriptor itself has changed 131 TableDescriptor currentTd = UTIL.getAdmin().getDescriptor(TABLE_NAME); 132 String newDescriptorHash = currentTd.getDescriptorHash(); 133 assertNotEquals(initialHash, newDescriptorHash, "Table descriptor should have new hash"); 134 135 // Step 5: Use new Admin API to reopen all regions 136 UTIL.getAdmin().reopenTableRegions(TABLE_NAME); 137 138 // Wait for all regions to be reopened 139 UTIL.waitFor(60000, () -> { 140 try { 141 List<HRegion> currentRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME); 142 if (currentRegions.size() != 3) { 143 return false; 144 } 145 146 // Check if all regions now have the new hash 147 for (HRegion region : currentRegions) { 148 String hash; 149 try (MetricsRegionWrapperImpl wrapper = new MetricsRegionWrapperImpl(region)) { 150 hash = wrapper.getTableDescriptorHash(); 151 } 152 if (hash.equals(initialHash)) { 153 return false; 154 } 155 } 156 return true; 157 } catch (Exception e) { 158 return false; 159 } 160 }); 161 162 // Step 6: Verify tableDescriptorHash HAS changed in all region metrics 163 List<HRegion> reopenedRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME); 164 assertEquals(3, reopenedRegions.size(), "Should still have 3 regions"); 165 166 for (HRegion region : reopenedRegions) { 167 String currentHash; 168 try (MetricsRegionWrapperImpl wrapper = new MetricsRegionWrapperImpl(region)) { 169 currentHash = wrapper.getTableDescriptorHash(); 170 } 171 assertNotEquals(initialHash, currentHash, "Hash SHOULD change after region reopen"); 172 assertEquals(newDescriptorHash, currentHash, "Hash should match current table descriptor"); 173 } 174 175 // Verify all regions show the same new hash 176 Set<String> newHashes = new HashSet<>(); 177 for (HRegion region : reopenedRegions) { 178 try (MetricsRegionWrapperImpl wrapper = new MetricsRegionWrapperImpl(region)) { 179 newHashes.add(wrapper.getTableDescriptorHash()); 180 } 181 } 182 assertEquals(1, newHashes.size(), "All regions should have same new hash"); 183 184 } finally { 185 UTIL.deleteTable(TABLE_NAME); 186 } 187 } 188 189 @Test 190 public void testLazyUpdateThenReopenSpecificRegions() throws Exception { 191 TableName tableName = TableName.valueOf("testSpecificRegionsReopen"); 192 193 // Step 1: Create table with 5 regions 194 ColumnFamilyDescriptor cfd = 195 ColumnFamilyDescriptorBuilder.newBuilder(CF).setMaxVersions(1).build(); 196 197 TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(cfd) 198 .setMaxFileSize(100 * 1024 * 1024L).build(); 199 200 UTIL.getAdmin().createTable(td, Bytes.toBytes("a"), Bytes.toBytes("z"), 5); 201 UTIL.waitTableAvailable(tableName); 202 203 try { 204 // Step 2: Capture initial hashes 205 List<HRegion> regions = UTIL.getHBaseCluster().getRegions(tableName); 206 assertEquals(5, regions.size(), "Expected 5 regions"); 207 208 Map<byte[], String> initialHashes = new HashMap<>(); 209 210 for (HRegion region : regions) { 211 String hash; 212 try (MetricsRegionWrapperImpl wrapper = new MetricsRegionWrapperImpl(region)) { 213 hash = wrapper.getTableDescriptorHash(); 214 } 215 initialHashes.put(region.getRegionInfo().getRegionName(), hash); 216 } 217 218 String initialHash = initialHashes.values().iterator().next(); 219 220 // Step 3: Perform lazy update 221 ColumnFamilyDescriptor newCfd = 222 ColumnFamilyDescriptorBuilder.newBuilder(cfd).setMaxVersions(10).build(); 223 224 TableDescriptor newTd = TableDescriptorBuilder.newBuilder(td).modifyColumnFamily(newCfd) 225 .setMaxFileSize(300 * 1024 * 1024L).build(); 226 227 UTIL.getAdmin().modifyTableAsync(newTd, false).get(); 228 229 UTIL.waitFor(30000, () -> { 230 try { 231 TableDescriptor currentTd = UTIL.getAdmin().getDescriptor(tableName); 232 return currentTd.getMaxFileSize() == 300 * 1024 * 1024L; 233 } catch (Exception e) { 234 return false; 235 } 236 }); 237 238 String newDescriptorHash = UTIL.getAdmin().getDescriptor(tableName).getDescriptorHash(); 239 240 // Step 4: Reopen only first 2 regions 241 List<RegionInfo> regionsToReopen = new ArrayList<>(); 242 regionsToReopen.add(regions.get(0).getRegionInfo()); 243 regionsToReopen.add(regions.get(1).getRegionInfo()); 244 245 UTIL.getAdmin().reopenTableRegions(tableName, regionsToReopen); 246 247 // Wait for those regions to reopen 248 UTIL.waitFor(60000, () -> { 249 try { 250 List<HRegion> currentRegions = UTIL.getHBaseCluster().getRegions(tableName); 251 int newHashCount = 0; 252 for (HRegion region : currentRegions) { 253 String hash; 254 try (MetricsRegionWrapperImpl wrapper = new MetricsRegionWrapperImpl(region)) { 255 hash = wrapper.getTableDescriptorHash(); 256 } 257 if (!hash.equals(initialHash)) { 258 newHashCount++; 259 } 260 } 261 return newHashCount >= 2; 262 } catch (Exception e) { 263 return false; 264 } 265 }); 266 267 // Step 5: Verify only reopened regions have new hash 268 List<HRegion> regionsAfterFirstReopen = UTIL.getHBaseCluster().getRegions(tableName); 269 int newHashCount = 0; 270 int oldHashCount = 0; 271 272 for (HRegion region : regionsAfterFirstReopen) { 273 String currentHash; 274 try (MetricsRegionWrapperImpl wrapper = new MetricsRegionWrapperImpl(region)) { 275 currentHash = wrapper.getTableDescriptorHash(); 276 } 277 278 if (currentHash.equals(newDescriptorHash)) { 279 newHashCount++; 280 } else if (currentHash.equals(initialHash)) { 281 oldHashCount++; 282 } 283 } 284 285 assertEquals(2, newHashCount, "Should have 2 regions with new hash"); 286 assertEquals(3, oldHashCount, "Should have 3 regions with old hash"); 287 288 // Step 6: Reopen remaining regions 289 List<RegionInfo> remainingRegions = new ArrayList<>(); 290 for (int i = 2; i < regions.size(); i++) { 291 remainingRegions.add(regions.get(i).getRegionInfo()); 292 } 293 294 UTIL.getAdmin().reopenTableRegions(tableName, remainingRegions); 295 296 // Wait for all regions to have new hash 297 UTIL.waitFor(60000, () -> { 298 try { 299 List<HRegion> currentRegions = UTIL.getHBaseCluster().getRegions(tableName); 300 for (HRegion region : currentRegions) { 301 String hash; 302 try (MetricsRegionWrapperImpl wrapper = new MetricsRegionWrapperImpl(region)) { 303 hash = wrapper.getTableDescriptorHash(); 304 } 305 if (!hash.equals(newDescriptorHash)) { 306 return false; 307 } 308 } 309 return true; 310 } catch (Exception e) { 311 return false; 312 } 313 }); 314 315 // Step 7: Verify all regions now have new hash 316 List<HRegion> finalRegions = UTIL.getHBaseCluster().getRegions(tableName); 317 for (HRegion region : finalRegions) { 318 String currentHash; 319 try (MetricsRegionWrapperImpl wrapper = new MetricsRegionWrapperImpl(region)) { 320 currentHash = wrapper.getTableDescriptorHash(); 321 } 322 323 assertEquals(newDescriptorHash, currentHash, "All regions should now have new hash"); 324 } 325 326 } finally { 327 UTIL.deleteTable(tableName); 328 } 329 } 330}