001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.rest.client; 019 020import java.io.BufferedInputStream; 021import java.io.File; 022import java.io.FileOutputStream; 023import java.io.IOException; 024import java.io.InputStream; 025import java.net.URI; 026import java.net.URISyntaxException; 027import java.net.URL; 028import java.nio.file.Files; 029import java.nio.file.Path; 030import java.security.GeneralSecurityException; 031import java.security.KeyManagementException; 032import java.security.KeyStore; 033import java.security.KeyStoreException; 034import java.security.NoSuchAlgorithmException; 035import java.security.cert.CertificateException; 036import java.util.Collections; 037import java.util.Map; 038import java.util.Optional; 039import java.util.concurrent.ConcurrentHashMap; 040import java.util.concurrent.ThreadLocalRandom; 041import javax.net.ssl.SSLContext; 042import org.apache.hadoop.conf.Configuration; 043import org.apache.hadoop.hbase.HBaseConfiguration; 044import org.apache.hadoop.hbase.rest.Constants; 045import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; 046import org.apache.hadoop.security.authentication.client.AuthenticatedURL; 047import org.apache.hadoop.security.authentication.client.AuthenticationException; 048import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; 049import org.apache.hadoop.security.ssl.SSLFactory; 050import org.apache.hadoop.security.ssl.SSLFactory.Mode; 051import org.apache.http.Header; 052import org.apache.http.HttpHeaders; 053import org.apache.http.HttpResponse; 054import org.apache.http.HttpStatus; 055import org.apache.http.auth.AuthScope; 056import org.apache.http.auth.UsernamePasswordCredentials; 057import org.apache.http.client.HttpClient; 058import org.apache.http.client.config.RequestConfig; 059import org.apache.http.client.methods.HttpDelete; 060import org.apache.http.client.methods.HttpGet; 061import org.apache.http.client.methods.HttpHead; 062import org.apache.http.client.methods.HttpPost; 063import org.apache.http.client.methods.HttpPut; 064import org.apache.http.client.methods.HttpUriRequest; 065import org.apache.http.client.protocol.HttpClientContext; 066import org.apache.http.conn.HttpClientConnectionManager; 067import org.apache.http.entity.ByteArrayEntity; 068import org.apache.http.impl.client.BasicCredentialsProvider; 069import org.apache.http.impl.client.CloseableHttpClient; 070import org.apache.http.impl.client.HttpClientBuilder; 071import org.apache.http.impl.client.HttpClients; 072import org.apache.http.impl.cookie.BasicClientCookie; 073import org.apache.http.message.BasicHeader; 074import org.apache.http.ssl.SSLContexts; 075import org.apache.http.util.EntityUtils; 076import org.apache.yetus.audience.InterfaceAudience; 077import org.slf4j.Logger; 078import org.slf4j.LoggerFactory; 079 080import org.apache.hbase.thirdparty.com.google.common.io.ByteStreams; 081import org.apache.hbase.thirdparty.com.google.common.io.Closeables; 082 083/** 084 * A wrapper around HttpClient which provides some useful function and semantics for interacting 085 * with the REST gateway. 086 */ 087@InterfaceAudience.Public 088public class Client { 089 public static final Header[] EMPTY_HEADER_ARRAY = new Header[0]; 090 091 private static final Logger LOG = LoggerFactory.getLogger(Client.class); 092 093 private CloseableHttpClient httpClient; 094 private Cluster cluster; 095 private Integer lastNodeId; 096 private boolean sticky = false; 097 private Configuration conf; 098 private boolean sslEnabled; 099 private HttpResponse resp; 100 private HttpGet httpGet = null; 101 private HttpClientContext stickyContext = null; 102 private BasicCredentialsProvider provider; 103 private Optional<KeyStore> trustStore; 104 private Map<String, String> extraHeaders; 105 private KerberosAuthenticator authenticator; 106 107 private static final String AUTH_COOKIE = "hadoop.auth"; 108 private static final String AUTH_COOKIE_EQ = AUTH_COOKIE + "="; 109 private static final String COOKIE = "Cookie"; 110 111 /** 112 * Default Constructor 113 */ 114 public Client() { 115 this(null); 116 } 117 118 private void initialize(Cluster cluster, Configuration conf, boolean sslEnabled, boolean sticky, 119 Optional<KeyStore> trustStore, Optional<String> userName, Optional<String> password, 120 Optional<String> bearerToken, Optional<HttpClientConnectionManager> connManager) { 121 this.cluster = cluster; 122 this.conf = conf; 123 this.sslEnabled = sslEnabled; 124 this.trustStore = trustStore; 125 extraHeaders = new ConcurrentHashMap<>(); 126 String clspath = System.getProperty("java.class.path"); 127 LOG.debug("classpath " + clspath); 128 HttpClientBuilder httpClientBuilder = HttpClients.custom(); 129 130 int connTimeout = this.conf.getInt(Constants.REST_CLIENT_CONN_TIMEOUT, 131 Constants.DEFAULT_REST_CLIENT_CONN_TIMEOUT); 132 int socketTimeout = this.conf.getInt(Constants.REST_CLIENT_SOCKET_TIMEOUT, 133 Constants.DEFAULT_REST_CLIENT_SOCKET_TIMEOUT); 134 RequestConfig requestConfig = RequestConfig.custom().setConnectTimeout(connTimeout) 135 .setSocketTimeout(socketTimeout).setNormalizeUri(false) // URIs should not be normalized, see 136 // HBASE-26903 137 .build(); 138 httpClientBuilder.setDefaultRequestConfig(requestConfig); 139 140 // Since HBASE-25267 we don't use the deprecated DefaultHttpClient anymore. 141 // The new http client would decompress the gzip content automatically. 142 // In order to keep the original behaviour of this public class, we disable 143 // automatic content compression. 144 httpClientBuilder.disableContentCompression(); 145 146 if (sslEnabled && trustStore.isPresent()) { 147 try { 148 SSLContext sslcontext = 149 SSLContexts.custom().loadTrustMaterial(trustStore.get(), null).build(); 150 httpClientBuilder.setSSLContext(sslcontext); 151 } catch (NoSuchAlgorithmException | KeyStoreException | KeyManagementException e) { 152 throw new ClientTrustStoreInitializationException("Error while processing truststore", e); 153 } 154 } 155 156 if (userName.isPresent() && password.isPresent()) { 157 // We want to stick to the old very limited authentication and session handling when sticky is 158 // not set 159 // to preserve backwards compatibility 160 if (!sticky) { 161 throw new IllegalArgumentException("BASIC auth is only implemented when sticky is set"); 162 } 163 provider = new BasicCredentialsProvider(); 164 // AuthScope.ANY is required for pre-emptive auth. We only ever use a single auth method 165 // anyway. 166 AuthScope anyAuthScope = AuthScope.ANY; 167 this.provider.setCredentials(anyAuthScope, 168 new UsernamePasswordCredentials(userName.get(), password.get())); 169 } 170 171 if (bearerToken.isPresent()) { 172 // We want to stick to the old very limited authentication and session handling when sticky is 173 // not set 174 // to preserve backwards compatibility 175 if (!sticky) { 176 throw new IllegalArgumentException("BEARER auth is only implemented when sticky is set"); 177 } 178 // We could also put the header into the context or connection, but that would have the same 179 // effect. 180 extraHeaders.put(HttpHeaders.AUTHORIZATION, "Bearer " + bearerToken.get()); 181 } 182 183 connManager.ifPresent(httpClientBuilder::setConnectionManager); 184 185 this.httpClient = httpClientBuilder.build(); 186 setSticky(sticky); 187 } 188 189 /** 190 * Constructor This constructor will create an object using the old faulty load balancing logic. 191 * When specifying multiple servers in the cluster object, it is highly recommended to call 192 * setSticky() on the created client, or use the preferred constructor instead. 193 * @param cluster the cluster definition 194 */ 195 public Client(Cluster cluster) { 196 this(cluster, false); 197 } 198 199 /** 200 * Constructor This constructor will create an object using the old faulty load balancing logic. 201 * When specifying multiple servers in the cluster object, it is highly recommended to call 202 * setSticky() on the created client, or use the preferred constructor instead. 203 * @param cluster the cluster definition 204 * @param sslEnabled enable SSL or not 205 */ 206 public Client(Cluster cluster, boolean sslEnabled) { 207 initialize(cluster, HBaseConfiguration.create(), sslEnabled, false, Optional.empty(), 208 Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty()); 209 } 210 211 /** 212 * Constructor This constructor will create an object using the old faulty load balancing logic. 213 * When specifying multiple servers in the cluster object, it is highly recommended to call 214 * setSticky() on the created client, or use the preferred constructor instead. 215 * @param cluster the cluster definition 216 * @param conf Configuration 217 * @param sslEnabled enable SSL or not 218 */ 219 public Client(Cluster cluster, Configuration conf, boolean sslEnabled) { 220 initialize(cluster, conf, sslEnabled, false, Optional.empty(), Optional.empty(), 221 Optional.empty(), Optional.empty(), Optional.empty()); 222 } 223 224 /** 225 * Constructor, allowing to define custom trust store (only for SSL connections) This constructor 226 * will create an object using the old faulty load balancing logic. When specifying multiple 227 * servers in the cluster object, it is highly recommended to call setSticky() on the created 228 * client, or use the preferred constructor instead. 229 * @param cluster the cluster definition 230 * @param trustStorePath custom trust store to use for SSL connections 231 * @param trustStorePassword password to use for custom trust store 232 * @param trustStoreType type of custom trust store 233 * @throws ClientTrustStoreInitializationException if the trust store file can not be loaded 234 */ 235 public Client(Cluster cluster, String trustStorePath, Optional<String> trustStorePassword, 236 Optional<String> trustStoreType) { 237 this(cluster, HBaseConfiguration.create(), trustStorePath, trustStorePassword, trustStoreType); 238 } 239 240 /** 241 * Constructor that accepts an optional trustStore and authentication information for either BASIC 242 * or BEARER authentication in sticky mode, which does not use the old faulty load balancing 243 * logic, and enables correct session handling. If neither userName/password, nor the bearer token 244 * is specified, the client falls back to SPNEGO auth. The loadTrustsore static method can be used 245 * to load a local TrustStore file. If connManager is specified, it must be fully configured. Even 246 * then, the TrustStore related parameters must be specified because they are also used for SPNEGO 247 * authentication which uses a separate HTTP client implementation. Specifying the 248 * HttpClientConnectionManager is an experimental feature. It exposes the internal HTTP library 249 * details, and may be changed/removed when the library is updated or replaced. 250 * @param cluster the cluster definition 251 * @param conf HBase/Hadoop configuration 252 * @param sslEnabled use HTTPS 253 * @param trustStore the optional trustStore object 254 * @param userName for BASIC auth 255 * @param password for BASIC auth 256 * @param bearerToken for BEAERER auth 257 */ 258 @InterfaceAudience.Private 259 public Client(Cluster cluster, Configuration conf, boolean sslEnabled, 260 Optional<KeyStore> trustStore, Optional<String> userName, Optional<String> password, 261 Optional<String> bearerToken, Optional<HttpClientConnectionManager> connManager) { 262 initialize(cluster, conf, sslEnabled, true, trustStore, userName, password, bearerToken, 263 connManager); 264 } 265 266 public Client(Cluster cluster, Configuration conf, boolean sslEnabled, 267 Optional<KeyStore> trustStore, Optional<String> userName, Optional<String> password, 268 Optional<String> bearerToken) { 269 initialize(cluster, conf, sslEnabled, true, trustStore, userName, password, bearerToken, 270 Optional.empty()); 271 } 272 273 /** 274 * Constructor, allowing to define custom trust store (only for SSL connections). This constructor 275 * will create an object using the old faulty load balancing logic. When specifying multiple 276 * servers in the cluster object, it is highly recommended to call setSticky() on the created 277 * client, or use the preferred constructor instead. 278 * @param cluster the cluster definition 279 * @param conf HBase/Hadoop Configuration 280 * @param trustStorePath custom trust store to use for SSL connections 281 * @param trustStorePassword password to use for custom trust store 282 * @param trustStoreType type of custom trust store 283 * @throws ClientTrustStoreInitializationException if the trust store file can not be loaded 284 */ 285 public Client(Cluster cluster, Configuration conf, String trustStorePath, 286 Optional<String> trustStorePassword, Optional<String> trustStoreType) { 287 KeyStore trustStore = loadTruststore(trustStorePath, trustStorePassword, trustStoreType); 288 initialize(cluster, conf, true, false, Optional.of(trustStore), Optional.empty(), 289 Optional.empty(), Optional.empty(), Optional.empty()); 290 } 291 292 /** 293 * Loads a trustStore from the local fileSystem. Can be used to load the trustStore for the 294 * preferred constructor. 295 */ 296 public static KeyStore loadTruststore(String trustStorePath, Optional<String> trustStorePassword, 297 Optional<String> trustStoreType) { 298 299 char[] truststorePassword = trustStorePassword.map(String::toCharArray).orElse(null); 300 String type = trustStoreType.orElse(KeyStore.getDefaultType()); 301 302 KeyStore trustStore; 303 try { 304 trustStore = KeyStore.getInstance(type); 305 } catch (KeyStoreException e) { 306 throw new ClientTrustStoreInitializationException("Invalid trust store type: " + type, e); 307 } 308 try (InputStream inputStream = 309 new BufferedInputStream(Files.newInputStream(new File(trustStorePath).toPath()))) { 310 trustStore.load(inputStream, truststorePassword); 311 } catch (CertificateException | NoSuchAlgorithmException | IOException e) { 312 throw new ClientTrustStoreInitializationException("Trust store load error: " + trustStorePath, 313 e); 314 } 315 return trustStore; 316 } 317 318 /** 319 * Shut down the client. Close any open persistent connections. 320 */ 321 public void shutdown() { 322 } 323 324 /** Returns the wrapped HttpClient */ 325 public HttpClient getHttpClient() { 326 return httpClient; 327 } 328 329 /** 330 * Add extra headers. These extra headers will be applied to all http methods before they are 331 * removed. If any header is not used any more, client needs to remove it explicitly. 332 */ 333 public void addExtraHeader(final String name, final String value) { 334 extraHeaders.put(name, value); 335 } 336 337 /** 338 * Get an extra header value. 339 */ 340 public String getExtraHeader(final String name) { 341 return extraHeaders.get(name); 342 } 343 344 /** 345 * Get all extra headers (read-only). 346 */ 347 public Map<String, String> getExtraHeaders() { 348 return Collections.unmodifiableMap(extraHeaders); 349 } 350 351 /** 352 * Remove an extra header. 353 */ 354 public void removeExtraHeader(final String name) { 355 extraHeaders.remove(name); 356 } 357 358 /** 359 * Execute a transaction method given only the path. If sticky is false: Will select at random one 360 * of the members of the supplied cluster definition and iterate through the list until a 361 * transaction can be successfully completed. The definition of success here is a complete HTTP 362 * transaction, irrespective of result code. If sticky is true: For the first request it will 363 * select a random one of the members of the supplied cluster definition. For subsequent requests 364 * it will use the same member, and it will not automatically re-try if the call fails. 365 * @param cluster the cluster definition 366 * @param method the transaction method 367 * @param headers HTTP header values to send 368 * @param path the properly urlencoded path 369 * @return the HTTP response code 370 */ 371 public HttpResponse executePathOnly(Cluster cluster, HttpUriRequest method, Header[] headers, 372 String path) throws IOException { 373 IOException lastException; 374 if (cluster.nodes.size() < 1) { 375 throw new IOException("Cluster is empty"); 376 } 377 if (lastNodeId == null || !sticky) { 378 lastNodeId = ThreadLocalRandom.current().nextInt(cluster.nodes.size()); 379 } 380 int start = lastNodeId; 381 do { 382 cluster.lastHost = cluster.nodes.get(lastNodeId); 383 try { 384 StringBuilder sb = new StringBuilder(); 385 if (sslEnabled) { 386 sb.append("https://"); 387 } else { 388 sb.append("http://"); 389 } 390 sb.append(cluster.lastHost); 391 sb.append(path); 392 URI uri = new URI(sb.toString()); 393 if (method instanceof HttpPut) { 394 HttpPut put = new HttpPut(uri); 395 put.setEntity(((HttpPut) method).getEntity()); 396 put.setHeaders(method.getAllHeaders()); 397 method = put; 398 } else if (method instanceof HttpGet) { 399 method = new HttpGet(uri); 400 } else if (method instanceof HttpHead) { 401 method = new HttpHead(uri); 402 } else if (method instanceof HttpDelete) { 403 method = new HttpDelete(uri); 404 } else if (method instanceof HttpPost) { 405 HttpPost post = new HttpPost(uri); 406 post.setEntity(((HttpPost) method).getEntity()); 407 post.setHeaders(method.getAllHeaders()); 408 method = post; 409 } 410 return executeURI(method, headers, uri.toString()); 411 } catch (IOException e) { 412 lastException = e; 413 } catch (URISyntaxException use) { 414 lastException = new IOException(use); 415 } 416 if (!sticky) { 417 lastNodeId = (++lastNodeId) % cluster.nodes.size(); 418 } 419 // Do not retry if sticky. Let the caller handle the error. 420 } while (!sticky && lastNodeId != start); 421 throw lastException; 422 } 423 424 /** 425 * Execute a transaction method given a complete URI. 426 * @param method the transaction method 427 * @param headers HTTP header values to send 428 * @param uri a properly urlencoded URI 429 * @return the HTTP response code 430 */ 431 public HttpResponse executeURI(HttpUriRequest method, Header[] headers, String uri) 432 throws IOException { 433 // method.setURI(new URI(uri, true)); 434 for (Map.Entry<String, String> e : extraHeaders.entrySet()) { 435 method.addHeader(e.getKey(), e.getValue()); 436 } 437 if (headers != null) { 438 for (Header header : headers) { 439 method.addHeader(header); 440 } 441 } 442 long startTime = EnvironmentEdgeManager.currentTime(); 443 if (resp != null) EntityUtils.consumeQuietly(resp.getEntity()); 444 if (stickyContext != null) { 445 resp = httpClient.execute(method, stickyContext); 446 } else { 447 resp = httpClient.execute(method); 448 } 449 if (resp.getStatusLine().getStatusCode() == HttpStatus.SC_UNAUTHORIZED) { 450 // Authentication error 451 LOG.debug("Performing negotiation with the server."); 452 try { 453 negotiate(method, uri); 454 } catch (GeneralSecurityException e) { 455 throw new IOException(e); 456 } 457 if (stickyContext != null) { 458 resp = httpClient.execute(method, stickyContext); 459 } else { 460 resp = httpClient.execute(method); 461 } 462 } 463 464 long endTime = EnvironmentEdgeManager.currentTime(); 465 if (LOG.isTraceEnabled()) { 466 LOG.trace(method.getMethod() + " " + uri + " " + resp.getStatusLine().getStatusCode() + " " 467 + resp.getStatusLine().getReasonPhrase() + " in " + (endTime - startTime) + " ms"); 468 } 469 return resp; 470 } 471 472 /** 473 * Execute a transaction method. Will call either <tt>executePathOnly</tt> or <tt>executeURI</tt> 474 * depending on whether a path only is supplied in 'path', or if a complete URI is passed instead, 475 * respectively. 476 * @param cluster the cluster definition 477 * @param method the HTTP method 478 * @param headers HTTP header values to send 479 * @param path the properly urlencoded path or URI 480 * @return the HTTP response code 481 */ 482 public HttpResponse execute(Cluster cluster, HttpUriRequest method, Header[] headers, String path) 483 throws IOException { 484 if (path.startsWith("/")) { 485 return executePathOnly(cluster, method, headers, path); 486 } 487 return executeURI(method, headers, path); 488 } 489 490 /** 491 * Initiate client side Kerberos negotiation with the server. 492 * @param method method to inject the authentication token into. 493 * @param uri the String to parse as a URL. 494 * @throws IOException if unknown protocol is found. 495 */ 496 private void negotiate(HttpUriRequest method, String uri) 497 throws IOException, GeneralSecurityException { 498 try { 499 AuthenticatedURL.Token token = new AuthenticatedURL.Token(); 500 if (authenticator == null) { 501 authenticator = new KerberosAuthenticator(); 502 if (trustStore.isPresent()) { 503 // The authenticator does not use Apache HttpClient, so we need to 504 // configure it separately to use the specified trustStore 505 Configuration sslConf = setupTrustStoreForHadoop(trustStore.get()); 506 SSLFactory sslFactory = new SSLFactory(Mode.CLIENT, sslConf); 507 sslFactory.init(); 508 authenticator.setConnectionConfigurator(sslFactory); 509 } 510 } 511 URL url = new URL(uri); 512 authenticator.authenticate(url, token); 513 if (sticky) { 514 BasicClientCookie authCookie = new BasicClientCookie("hadoop.auth", token.toString()); 515 // Hadoop eats the domain even if set by server 516 authCookie.setDomain(url.getHost()); 517 stickyContext.getCookieStore().addCookie(authCookie); 518 } else { 519 // session cookie is NOT set for backwards compatibility for non-sticky mode 520 // Inject the obtained negotiated token in the method cookie 521 // This is only done for this single request, the next one will trigger a new SPENGO 522 // handshake 523 injectToken(method, token); 524 } 525 } catch (AuthenticationException e) { 526 LOG.error("Failed to negotiate with the server.", e); 527 throw new IOException(e); 528 } 529 } 530 531 private Configuration setupTrustStoreForHadoop(KeyStore trustStore) 532 throws IOException, KeyStoreException, NoSuchAlgorithmException, CertificateException { 533 Path tmpDirPath = Files.createTempDirectory("hbase_rest_client_truststore"); 534 File trustStoreFile = tmpDirPath.resolve("truststore.jks").toFile(); 535 // Shouldn't be needed with the secure temp dir, but let's generate a password anyway 536 String password = Double.toString(Math.random()); 537 try (FileOutputStream fos = new FileOutputStream(trustStoreFile)) { 538 trustStore.store(fos, password.toCharArray()); 539 } 540 541 Configuration sslConf = new Configuration(); 542 // Type is the Java default, we use the same JVM to read this back 543 sslConf.set("ssl.client.truststore.location", trustStoreFile.getAbsolutePath()); 544 sslConf.set("ssl.client.truststore.password", password); 545 return sslConf; 546 } 547 548 /** 549 * Helper method that injects an authentication token to send with the method. 550 * @param method method to inject the authentication token into. 551 * @param token authentication token to inject. 552 */ 553 private void injectToken(HttpUriRequest method, AuthenticatedURL.Token token) { 554 String t = token.toString(); 555 if (t != null) { 556 if (!t.startsWith("\"")) { 557 t = "\"" + t + "\""; 558 } 559 method.addHeader(COOKIE, AUTH_COOKIE_EQ + t); 560 } 561 } 562 563 /** Returns the cluster definition */ 564 public Cluster getCluster() { 565 return cluster; 566 } 567 568 /** 569 * @param cluster the cluster definition 570 */ 571 public void setCluster(Cluster cluster) { 572 this.cluster = cluster; 573 } 574 575 /** 576 * The default behaviour is load balancing by sending each request to a random host. This DOES NOT 577 * work with scans, which have state on the REST servers. Make sure sticky is set to true before 578 * attempting Scan related operations if more than one host is defined in the cluster. 579 * @return whether subsequent requests will use the same host 580 */ 581 public boolean isSticky() { 582 return sticky; 583 } 584 585 /** 586 * The default behaviour is load balancing by sending each request to a random host. This DOES NOT 587 * work with scans, which have state on the REST servers. Set sticky to true before attempting 588 * Scan related operations if more than one host is defined in the cluster. Nodes must not be 589 * added or removed from the Cluster object while sticky is true. Setting the sticky flag also 590 * enables session handling, which eliminates the need to re-authenticate each request, and lets 591 * the client handle any other cookies (like the sticky cookie set by load balancers) correctly. 592 * @param sticky whether subsequent requests will use the same host 593 */ 594 public void setSticky(boolean sticky) { 595 lastNodeId = null; 596 if (sticky) { 597 stickyContext = new HttpClientContext(); 598 if (provider != null) { 599 stickyContext.setCredentialsProvider(provider); 600 } 601 } else { 602 stickyContext = null; 603 } 604 this.sticky = sticky; 605 } 606 607 /** 608 * Send a HEAD request 609 * @param path the path or URI 610 * @return a Response object with response detail 611 */ 612 public Response head(String path) throws IOException { 613 return head(cluster, path, null); 614 } 615 616 /** 617 * Send a HEAD request 618 * @param cluster the cluster definition 619 * @param path the path or URI 620 * @param headers the HTTP headers to include in the request 621 * @return a Response object with response detail 622 */ 623 public Response head(Cluster cluster, String path, Header[] headers) throws IOException { 624 HttpHead method = new HttpHead(path); 625 try { 626 HttpResponse resp = execute(cluster, method, null, path); 627 return new Response(resp.getStatusLine().getStatusCode(), resp.getAllHeaders(), null); 628 } finally { 629 method.releaseConnection(); 630 } 631 } 632 633 /** 634 * Send a GET request 635 * @param path the path or URI 636 * @return a Response object with response detail 637 */ 638 public Response get(String path) throws IOException { 639 return get(cluster, path); 640 } 641 642 /** 643 * Send a GET request 644 * @param cluster the cluster definition 645 * @param path the path or URI 646 * @return a Response object with response detail 647 */ 648 public Response get(Cluster cluster, String path) throws IOException { 649 return get(cluster, path, EMPTY_HEADER_ARRAY); 650 } 651 652 /** 653 * Send a GET request 654 * @param path the path or URI 655 * @param accept Accept header value 656 * @return a Response object with response detail 657 */ 658 public Response get(String path, String accept) throws IOException { 659 return get(cluster, path, accept); 660 } 661 662 /** 663 * Send a GET request 664 * @param cluster the cluster definition 665 * @param path the path or URI 666 * @param accept Accept header value 667 * @return a Response object with response detail 668 */ 669 public Response get(Cluster cluster, String path, String accept) throws IOException { 670 Header[] headers = new Header[1]; 671 headers[0] = new BasicHeader("Accept", accept); 672 return get(cluster, path, headers); 673 } 674 675 /** 676 * Send a GET request 677 * @param path the path or URI 678 * @param headers the HTTP headers to include in the request, <tt>Accept</tt> must be supplied 679 * @return a Response object with response detail 680 */ 681 public Response get(String path, Header[] headers) throws IOException { 682 return get(cluster, path, headers); 683 } 684 685 /** 686 * Returns the response body of the HTTPResponse, if any, as an array of bytes. If response body 687 * is not available or cannot be read, returns <tt>null</tt> Note: This will cause the entire 688 * response body to be buffered in memory. A malicious server may easily exhaust all the VM 689 * memory. It is strongly recommended, to use getResponseAsStream if the content length of the 690 * response is unknown or reasonably large. 691 * @param resp HttpResponse 692 * @return The response body, null if body is empty 693 * @throws IOException If an I/O (transport) problem occurs while obtaining the response body. 694 */ 695 public static byte[] getResponseBody(HttpResponse resp) throws IOException { 696 if (resp.getEntity() == null) { 697 return null; 698 } 699 InputStream instream = resp.getEntity().getContent(); 700 if (instream == null) { 701 return null; 702 } 703 try { 704 long contentLength = resp.getEntity().getContentLength(); 705 if (contentLength > Integer.MAX_VALUE) { 706 // guard integer cast from overflow 707 throw new IOException("Content too large to be buffered: " + contentLength + " bytes"); 708 } 709 if (contentLength > 0) { 710 byte[] content = new byte[(int) contentLength]; 711 ByteStreams.readFully(instream, content); 712 return content; 713 } else { 714 return ByteStreams.toByteArray(instream); 715 } 716 } finally { 717 Closeables.closeQuietly(instream); 718 } 719 } 720 721 /** 722 * Send a GET request 723 * @param c the cluster definition 724 * @param path the path or URI 725 * @param headers the HTTP headers to include in the request 726 * @return a Response object with response detail 727 */ 728 public Response get(Cluster c, String path, Header[] headers) throws IOException { 729 if (httpGet != null) { 730 httpGet.releaseConnection(); 731 } 732 httpGet = new HttpGet(path); 733 HttpResponse resp = execute(c, httpGet, headers, path); 734 return new Response(resp.getStatusLine().getStatusCode(), resp.getAllHeaders(), resp, 735 resp.getEntity() == null ? null : resp.getEntity().getContent()); 736 } 737 738 /** 739 * Send a PUT request 740 * @param path the path or URI 741 * @param contentType the content MIME type 742 * @param content the content bytes 743 * @return a Response object with response detail 744 */ 745 public Response put(String path, String contentType, byte[] content) throws IOException { 746 return put(cluster, path, contentType, content); 747 } 748 749 /** 750 * Send a PUT request 751 * @param path the path or URI 752 * @param contentType the content MIME type 753 * @param content the content bytes 754 * @param extraHdr extra Header to send 755 * @return a Response object with response detail 756 */ 757 public Response put(String path, String contentType, byte[] content, Header extraHdr) 758 throws IOException { 759 return put(cluster, path, contentType, content, extraHdr); 760 } 761 762 /** 763 * Send a PUT request 764 * @param cluster the cluster definition 765 * @param path the path or URI 766 * @param contentType the content MIME type 767 * @param content the content bytes 768 * @return a Response object with response detail 769 * @throws IOException for error 770 */ 771 public Response put(Cluster cluster, String path, String contentType, byte[] content) 772 throws IOException { 773 Header[] headers = new Header[1]; 774 headers[0] = new BasicHeader("Content-Type", contentType); 775 return put(cluster, path, headers, content); 776 } 777 778 /** 779 * Send a PUT request 780 * @param cluster the cluster definition 781 * @param path the path or URI 782 * @param contentType the content MIME type 783 * @param content the content bytes 784 * @param extraHdr additional Header to send 785 * @return a Response object with response detail 786 * @throws IOException for error 787 */ 788 public Response put(Cluster cluster, String path, String contentType, byte[] content, 789 Header extraHdr) throws IOException { 790 int cnt = extraHdr == null ? 1 : 2; 791 Header[] headers = new Header[cnt]; 792 headers[0] = new BasicHeader("Content-Type", contentType); 793 if (extraHdr != null) { 794 headers[1] = extraHdr; 795 } 796 return put(cluster, path, headers, content); 797 } 798 799 /** 800 * Send a PUT request 801 * @param path the path or URI 802 * @param headers the HTTP headers to include, <tt>Content-Type</tt> must be supplied 803 * @param content the content bytes 804 * @return a Response object with response detail 805 */ 806 public Response put(String path, Header[] headers, byte[] content) throws IOException { 807 return put(cluster, path, headers, content); 808 } 809 810 /** 811 * Send a PUT request 812 * @param cluster the cluster definition 813 * @param path the path or URI 814 * @param headers the HTTP headers to include, <tt>Content-Type</tt> must be supplied 815 * @param content the content bytes 816 * @return a Response object with response detail 817 */ 818 public Response put(Cluster cluster, String path, Header[] headers, byte[] content) 819 throws IOException { 820 HttpPut method = new HttpPut(path); 821 try { 822 method.setEntity(new ByteArrayEntity(content)); 823 HttpResponse resp = execute(cluster, method, headers, path); 824 headers = resp.getAllHeaders(); 825 content = getResponseBody(resp); 826 return new Response(resp.getStatusLine().getStatusCode(), headers, content); 827 } finally { 828 method.releaseConnection(); 829 } 830 } 831 832 /** 833 * Send a POST request 834 * @param path the path or URI 835 * @param contentType the content MIME type 836 * @param content the content bytes 837 * @return a Response object with response detail 838 */ 839 public Response post(String path, String contentType, byte[] content) throws IOException { 840 return post(cluster, path, contentType, content); 841 } 842 843 /** 844 * Send a POST request 845 * @param path the path or URI 846 * @param contentType the content MIME type 847 * @param content the content bytes 848 * @param extraHdr additional Header to send 849 * @return a Response object with response detail 850 */ 851 public Response post(String path, String contentType, byte[] content, Header extraHdr) 852 throws IOException { 853 return post(cluster, path, contentType, content, extraHdr); 854 } 855 856 /** 857 * Send a POST request 858 * @param cluster the cluster definition 859 * @param path the path or URI 860 * @param contentType the content MIME type 861 * @param content the content bytes 862 * @return a Response object with response detail 863 * @throws IOException for error 864 */ 865 public Response post(Cluster cluster, String path, String contentType, byte[] content) 866 throws IOException { 867 Header[] headers = new Header[1]; 868 headers[0] = new BasicHeader("Content-Type", contentType); 869 return post(cluster, path, headers, content); 870 } 871 872 /** 873 * Send a POST request 874 * @param cluster the cluster definition 875 * @param path the path or URI 876 * @param contentType the content MIME type 877 * @param content the content bytes 878 * @param extraHdr additional Header to send 879 * @return a Response object with response detail 880 * @throws IOException for error 881 */ 882 public Response post(Cluster cluster, String path, String contentType, byte[] content, 883 Header extraHdr) throws IOException { 884 int cnt = extraHdr == null ? 1 : 2; 885 Header[] headers = new Header[cnt]; 886 headers[0] = new BasicHeader("Content-Type", contentType); 887 if (extraHdr != null) { 888 headers[1] = extraHdr; 889 } 890 return post(cluster, path, headers, content); 891 } 892 893 /** 894 * Send a POST request 895 * @param path the path or URI 896 * @param headers the HTTP headers to include, <tt>Content-Type</tt> must be supplied 897 * @param content the content bytes 898 * @return a Response object with response detail 899 */ 900 public Response post(String path, Header[] headers, byte[] content) throws IOException { 901 return post(cluster, path, headers, content); 902 } 903 904 /** 905 * Send a POST request 906 * @param cluster the cluster definition 907 * @param path the path or URI 908 * @param headers the HTTP headers to include, <tt>Content-Type</tt> must be supplied 909 * @param content the content bytes 910 * @return a Response object with response detail 911 */ 912 public Response post(Cluster cluster, String path, Header[] headers, byte[] content) 913 throws IOException { 914 HttpPost method = new HttpPost(path); 915 try { 916 method.setEntity(new ByteArrayEntity(content)); 917 HttpResponse resp = execute(cluster, method, headers, path); 918 headers = resp.getAllHeaders(); 919 content = getResponseBody(resp); 920 return new Response(resp.getStatusLine().getStatusCode(), headers, content); 921 } finally { 922 method.releaseConnection(); 923 } 924 } 925 926 /** 927 * Send a DELETE request 928 * @param path the path or URI 929 * @return a Response object with response detail 930 */ 931 public Response delete(String path) throws IOException { 932 return delete(cluster, path); 933 } 934 935 /** 936 * Send a DELETE request 937 * @param path the path or URI 938 * @param extraHdr additional Header to send 939 * @return a Response object with response detail 940 */ 941 public Response delete(String path, Header extraHdr) throws IOException { 942 return delete(cluster, path, extraHdr); 943 } 944 945 /** 946 * Send a DELETE request 947 * @param cluster the cluster definition 948 * @param path the path or URI 949 * @return a Response object with response detail 950 * @throws IOException for error 951 */ 952 public Response delete(Cluster cluster, String path) throws IOException { 953 HttpDelete method = new HttpDelete(path); 954 try { 955 HttpResponse resp = execute(cluster, method, null, path); 956 Header[] headers = resp.getAllHeaders(); 957 byte[] content = getResponseBody(resp); 958 return new Response(resp.getStatusLine().getStatusCode(), headers, content); 959 } finally { 960 method.releaseConnection(); 961 } 962 } 963 964 /** 965 * Send a DELETE request 966 * @param cluster the cluster definition 967 * @param path the path or URI 968 * @return a Response object with response detail 969 * @throws IOException for error 970 */ 971 public Response delete(Cluster cluster, String path, Header extraHdr) throws IOException { 972 HttpDelete method = new HttpDelete(path); 973 try { 974 Header[] headers = { extraHdr }; 975 HttpResponse resp = execute(cluster, method, headers, path); 976 headers = resp.getAllHeaders(); 977 byte[] content = getResponseBody(resp); 978 return new Response(resp.getStatusLine().getStatusCode(), headers, content); 979 } finally { 980 method.releaseConnection(); 981 } 982 } 983 984 public static class ClientTrustStoreInitializationException extends RuntimeException { 985 986 public ClientTrustStoreInitializationException(String message, Throwable cause) { 987 super(message, cause); 988 } 989 } 990 991 public void close() { 992 try { 993 httpClient.close(); 994 } catch (Exception e) { 995 LOG.info("Exception while shutting down connection manager", e); 996 } 997 } 998 999}