001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.coprocessor; 019 020import static org.apache.hadoop.hbase.HBaseTestingUtil.fam1; 021import static org.apache.hadoop.hbase.HBaseTestingUtil.fam2; 022import static org.apache.hadoop.hbase.HBaseTestingUtil.fam3; 023import static org.junit.Assert.assertFalse; 024import static org.junit.Assert.assertNotNull; 025import static org.junit.Assert.assertNull; 026import static org.junit.Assert.assertTrue; 027import static org.junit.Assert.fail; 028 029import java.io.IOException; 030import java.util.ArrayList; 031import java.util.List; 032import java.util.Map; 033import java.util.Optional; 034import java.util.concurrent.ConcurrentMap; 035import org.apache.hadoop.conf.Configuration; 036import org.apache.hadoop.fs.Path; 037import org.apache.hadoop.hbase.Cell; 038import org.apache.hadoop.hbase.Coprocessor; 039import org.apache.hadoop.hbase.CoprocessorEnvironment; 040import org.apache.hadoop.hbase.ExtendedCell; 041import org.apache.hadoop.hbase.HBaseClassTestRule; 042import org.apache.hadoop.hbase.HBaseTestingUtil; 043import org.apache.hadoop.hbase.HConstants; 044import org.apache.hadoop.hbase.HTestConst; 045import org.apache.hadoop.hbase.TableName; 046import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; 047import org.apache.hadoop.hbase.client.Get; 048import org.apache.hadoop.hbase.client.RegionInfo; 049import org.apache.hadoop.hbase.client.RegionInfoBuilder; 050import org.apache.hadoop.hbase.client.Scan; 051import org.apache.hadoop.hbase.client.TableDescriptorBuilder; 052import org.apache.hadoop.hbase.regionserver.ChunkCreator; 053import org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker; 054import org.apache.hadoop.hbase.regionserver.HRegion; 055import org.apache.hadoop.hbase.regionserver.InternalScanner; 056import org.apache.hadoop.hbase.regionserver.MemStoreLAB; 057import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; 058import org.apache.hadoop.hbase.regionserver.RegionScanner; 059import org.apache.hadoop.hbase.regionserver.RegionServerServices; 060import org.apache.hadoop.hbase.regionserver.ScanType; 061import org.apache.hadoop.hbase.regionserver.ScannerContext; 062import org.apache.hadoop.hbase.regionserver.Store; 063import org.apache.hadoop.hbase.regionserver.StoreFile; 064import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; 065import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; 066import org.apache.hadoop.hbase.testclassification.CoprocessorTests; 067import org.apache.hadoop.hbase.testclassification.MediumTests; 068import org.junit.ClassRule; 069import org.junit.Rule; 070import org.junit.Test; 071import org.junit.experimental.categories.Category; 072import org.junit.rules.TestName; 073import org.mockito.Mockito; 074 075@Category({ CoprocessorTests.class, MediumTests.class }) 076public class TestCoprocessorInterface { 077 078 @ClassRule 079 public static final HBaseClassTestRule CLASS_RULE = 080 HBaseClassTestRule.forClass(TestCoprocessorInterface.class); 081 082 @Rule 083 public TestName name = new TestName(); 084 private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); 085 static final Path DIR = TEST_UTIL.getDataTestDir(); 086 087 private static class CustomScanner implements RegionScanner { 088 089 private RegionScanner delegate; 090 091 public CustomScanner(RegionScanner delegate) { 092 this.delegate = delegate; 093 } 094 095 @Override 096 public boolean next(List<? super ExtendedCell> results) throws IOException { 097 return delegate.next(results); 098 } 099 100 @Override 101 public boolean next(List<? super ExtendedCell> result, ScannerContext scannerContext) 102 throws IOException { 103 return delegate.next(result, scannerContext); 104 } 105 106 @Override 107 public boolean nextRaw(List<? super ExtendedCell> result) throws IOException { 108 return delegate.nextRaw(result); 109 } 110 111 @Override 112 public boolean nextRaw(List<? super ExtendedCell> result, ScannerContext context) 113 throws IOException { 114 return delegate.nextRaw(result, context); 115 } 116 117 @Override 118 public void close() throws IOException { 119 delegate.close(); 120 } 121 122 @Override 123 public RegionInfo getRegionInfo() { 124 return delegate.getRegionInfo(); 125 } 126 127 @Override 128 public boolean isFilterDone() throws IOException { 129 return delegate.isFilterDone(); 130 } 131 132 @Override 133 public boolean reseek(byte[] row) throws IOException { 134 return false; 135 } 136 137 @Override 138 public long getMaxResultSize() { 139 return delegate.getMaxResultSize(); 140 } 141 142 @Override 143 public long getMvccReadPoint() { 144 return delegate.getMvccReadPoint(); 145 } 146 147 @Override 148 public int getBatch() { 149 return delegate.getBatch(); 150 } 151 } 152 153 public static class CoprocessorImpl implements RegionCoprocessor, RegionObserver { 154 155 private boolean startCalled; 156 private boolean stopCalled; 157 private boolean preOpenCalled; 158 private boolean postOpenCalled; 159 private boolean preCloseCalled; 160 private boolean postCloseCalled; 161 private boolean preCompactCalled; 162 private boolean postCompactCalled; 163 private boolean preFlushCalled; 164 private boolean postFlushCalled; 165 private ConcurrentMap<String, Object> sharedData; 166 167 @Override 168 public void start(CoprocessorEnvironment e) { 169 sharedData = ((RegionCoprocessorEnvironment) e).getSharedData(); 170 // using new String here, so that there will be new object on each invocation 171 sharedData.putIfAbsent("test1", new Object()); 172 startCalled = true; 173 } 174 175 @Override 176 public void stop(CoprocessorEnvironment e) { 177 sharedData = null; 178 stopCalled = true; 179 } 180 181 @Override 182 public Optional<RegionObserver> getRegionObserver() { 183 return Optional.of(this); 184 } 185 186 @Override 187 public void preOpen(ObserverContext<? extends RegionCoprocessorEnvironment> e) { 188 preOpenCalled = true; 189 } 190 191 @Override 192 public void postOpen(ObserverContext<? extends RegionCoprocessorEnvironment> e) { 193 postOpenCalled = true; 194 } 195 196 @Override 197 public void preClose(ObserverContext<? extends RegionCoprocessorEnvironment> e, 198 boolean abortRequested) { 199 preCloseCalled = true; 200 } 201 202 @Override 203 public void postClose(ObserverContext<? extends RegionCoprocessorEnvironment> e, 204 boolean abortRequested) { 205 postCloseCalled = true; 206 } 207 208 @Override 209 public InternalScanner preCompact(ObserverContext<? extends RegionCoprocessorEnvironment> e, 210 Store store, InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker, 211 CompactionRequest request) { 212 preCompactCalled = true; 213 return scanner; 214 } 215 216 @Override 217 public void postCompact(ObserverContext<? extends RegionCoprocessorEnvironment> e, Store store, 218 StoreFile resultFile, CompactionLifeCycleTracker tracker, CompactionRequest request) { 219 postCompactCalled = true; 220 } 221 222 @Override 223 public void preFlush(ObserverContext<? extends RegionCoprocessorEnvironment> e, 224 FlushLifeCycleTracker tracker) { 225 preFlushCalled = true; 226 } 227 228 @Override 229 public void postFlush(ObserverContext<? extends RegionCoprocessorEnvironment> e, 230 FlushLifeCycleTracker tracker) { 231 postFlushCalled = true; 232 } 233 234 @Override 235 public RegionScanner postScannerOpen( 236 final ObserverContext<? extends RegionCoprocessorEnvironment> e, final Scan scan, 237 final RegionScanner s) throws IOException { 238 return new CustomScanner(s); 239 } 240 241 boolean wasStarted() { 242 return startCalled; 243 } 244 245 boolean wasStopped() { 246 return stopCalled; 247 } 248 249 boolean wasOpened() { 250 return (preOpenCalled && postOpenCalled); 251 } 252 253 boolean wasClosed() { 254 return (preCloseCalled && postCloseCalled); 255 } 256 257 boolean wasFlushed() { 258 return (preFlushCalled && postFlushCalled); 259 } 260 261 boolean wasCompacted() { 262 return (preCompactCalled && postCompactCalled); 263 } 264 265 Map<String, Object> getSharedData() { 266 return sharedData; 267 } 268 } 269 270 public static class CoprocessorII implements RegionCoprocessor { 271 private ConcurrentMap<String, Object> sharedData; 272 273 @Override 274 public void start(CoprocessorEnvironment e) { 275 sharedData = ((RegionCoprocessorEnvironment) e).getSharedData(); 276 sharedData.putIfAbsent("test2", new Object()); 277 } 278 279 @Override 280 public void stop(CoprocessorEnvironment e) { 281 sharedData = null; 282 } 283 284 @Override 285 public Optional<RegionObserver> getRegionObserver() { 286 return Optional.of(new RegionObserver() { 287 @Override 288 public void preGetOp(final ObserverContext<? extends RegionCoprocessorEnvironment> e, 289 final Get get, final List<Cell> results) throws IOException { 290 throw new RuntimeException(); 291 } 292 }); 293 } 294 295 Map<String, Object> getSharedData() { 296 return sharedData; 297 } 298 } 299 300 @Test 301 public void testSharedData() throws IOException { 302 TableName tableName = TableName.valueOf(name.getMethodName()); 303 byte[][] families = { fam1, fam2, fam3 }; 304 305 Configuration hc = initConfig(); 306 HRegion region = initHRegion(tableName, name.getMethodName(), hc, new Class<?>[] {}, families); 307 308 for (int i = 0; i < 3; i++) { 309 HTestConst.addContent(region, fam3); 310 region.flush(true); 311 } 312 313 region.compact(false); 314 315 region = reopenRegion(region, CoprocessorImpl.class, CoprocessorII.class); 316 317 Coprocessor c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class); 318 Coprocessor c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class); 319 Object o = ((CoprocessorImpl) c).getSharedData().get("test1"); 320 Object o2 = ((CoprocessorII) c2).getSharedData().get("test2"); 321 assertNotNull(o); 322 assertNotNull(o2); 323 // to coprocessors get different sharedDatas 324 assertFalse(((CoprocessorImpl) c).getSharedData() == ((CoprocessorII) c2).getSharedData()); 325 c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class); 326 c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class); 327 // make sure that all coprocessor of a class have identical sharedDatas 328 assertTrue(((CoprocessorImpl) c).getSharedData().get("test1") == o); 329 assertTrue(((CoprocessorII) c2).getSharedData().get("test2") == o2); 330 331 // now have all Environments fail 332 try { 333 byte[] r = region.getRegionInfo().getStartKey(); 334 if (r == null || r.length <= 0) { 335 // Its the start row. Can't ask for null. Ask for minimal key instead. 336 r = new byte[] { 0 }; 337 } 338 Get g = new Get(r); 339 region.get(g); 340 fail(); 341 } catch (org.apache.hadoop.hbase.DoNotRetryIOException xc) { 342 } 343 assertNull(region.getCoprocessorHost().findCoprocessor(CoprocessorII.class)); 344 c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class); 345 assertTrue(((CoprocessorImpl) c).getSharedData().get("test1") == o); 346 c = c2 = null; 347 // perform a GC 348 System.gc(); 349 // reopen the region 350 region = reopenRegion(region, CoprocessorImpl.class, CoprocessorII.class); 351 c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class); 352 // CPimpl is unaffected, still the same reference 353 assertTrue(((CoprocessorImpl) c).getSharedData().get("test1") == o); 354 c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class); 355 // new map and object created, hence the reference is different 356 // hence the old entry was indeed removed by the GC and new one has been created 357 Object o3 = ((CoprocessorII) c2).getSharedData().get("test2"); 358 assertFalse(o3 == o2); 359 HBaseTestingUtil.closeRegionAndWAL(region); 360 } 361 362 @Test 363 public void testCoprocessorInterface() throws IOException { 364 TableName tableName = TableName.valueOf(name.getMethodName()); 365 byte[][] families = { fam1, fam2, fam3 }; 366 367 Configuration hc = initConfig(); 368 HRegion region = initHRegion(tableName, name.getMethodName(), hc, 369 new Class<?>[] { CoprocessorImpl.class }, families); 370 for (int i = 0; i < 3; i++) { 371 HTestConst.addContent(region, fam3); 372 region.flush(true); 373 } 374 375 region.compact(false); 376 377 // HBASE-4197 378 Scan s = new Scan(); 379 RegionScanner scanner = region.getCoprocessorHost().postScannerOpen(s, region.getScanner(s)); 380 assertTrue(scanner instanceof CustomScanner); 381 // this would throw an exception before HBASE-4197 382 scanner.next(new ArrayList<>()); 383 384 HBaseTestingUtil.closeRegionAndWAL(region); 385 Coprocessor c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class); 386 387 assertTrue("Coprocessor not started", ((CoprocessorImpl) c).wasStarted()); 388 assertTrue("Coprocessor not stopped", ((CoprocessorImpl) c).wasStopped()); 389 assertTrue(((CoprocessorImpl) c).wasOpened()); 390 assertTrue(((CoprocessorImpl) c).wasClosed()); 391 assertTrue(((CoprocessorImpl) c).wasFlushed()); 392 assertTrue(((CoprocessorImpl) c).wasCompacted()); 393 } 394 395 HRegion reopenRegion(final HRegion closedRegion, Class<?>... implClasses) throws IOException { 396 // RegionInfo info = new RegionInfo(tableName, null, null, false); 397 HRegion r = HRegion.openHRegion(closedRegion, null); 398 399 // this following piece is a hack. currently a coprocessorHost 400 // is secretly loaded at OpenRegionHandler. we don't really 401 // start a region server here, so just manually create cphost 402 // and set it to region. 403 Configuration conf = TEST_UTIL.getConfiguration(); 404 RegionCoprocessorHost host = 405 new RegionCoprocessorHost(r, Mockito.mock(RegionServerServices.class), conf); 406 r.setCoprocessorHost(host); 407 408 for (Class<?> implClass : implClasses) { 409 host.load(implClass.asSubclass(RegionCoprocessor.class), Coprocessor.PRIORITY_USER, conf); 410 } 411 // we need to manually call pre- and postOpen here since the 412 // above load() is not the real case for CP loading. A CP is 413 // expected to be loaded by default from 1) configuration; or 2) 414 // HTableDescriptor. If it's loaded after HRegion initialized, 415 // the pre- and postOpen() won't be triggered automatically. 416 // Here we have to call pre and postOpen explicitly. 417 host.preOpen(); 418 host.postOpen(); 419 return r; 420 } 421 422 HRegion initHRegion(TableName tableName, String callingMethod, Configuration conf, 423 Class<?>[] implClasses, byte[][] families) throws IOException { 424 TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); 425 for (byte[] family : families) { 426 builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)); 427 } 428 ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, 429 MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); 430 RegionInfo info = RegionInfoBuilder.newBuilder(tableName).setStartKey(null).setEndKey(null) 431 .setSplit(false).build(); 432 Path path = new Path(DIR + callingMethod); 433 HRegion r = HBaseTestingUtil.createRegionAndWAL(info, path, conf, builder.build()); 434 435 // this following piece is a hack. 436 RegionCoprocessorHost host = 437 new RegionCoprocessorHost(r, Mockito.mock(RegionServerServices.class), conf); 438 r.setCoprocessorHost(host); 439 440 for (Class<?> implClass : implClasses) { 441 host.load(implClass.asSubclass(RegionCoprocessor.class), Coprocessor.PRIORITY_USER, conf); 442 Coprocessor c = host.findCoprocessor(implClass.getName()); 443 assertNotNull(c); 444 } 445 446 // Here we have to call pre and postOpen explicitly. 447 host.preOpen(); 448 host.postOpen(); 449 return r; 450 } 451 452 private Configuration initConfig() { 453 // Always compact if there is more than one store file. 454 TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 2); 455 TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 10 * 1000); 456 // Increase the amount of time between client retries 457 TEST_UTIL.getConfiguration().setLong("hbase.client.pause", 15 * 1000); 458 // This size should make it so we always split using the addContent 459 // below. After adding all data, the first region is 1.3M 460 TEST_UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE, 1024 * 128); 461 TEST_UTIL.getConfiguration().setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, false); 462 463 return TEST_UTIL.getConfiguration(); 464 } 465}