001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.rest;
019
020import java.io.IOException;
021import java.util.EnumSet;
022import java.util.Map;
023import org.apache.hadoop.hbase.ClusterMetrics;
024import org.apache.hadoop.hbase.ClusterMetrics.Option;
025import org.apache.hadoop.hbase.RegionMetrics;
026import org.apache.hadoop.hbase.ServerMetrics;
027import org.apache.hadoop.hbase.ServerName;
028import org.apache.hadoop.hbase.Size;
029import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel;
030import org.apache.yetus.audience.InterfaceAudience;
031import org.slf4j.Logger;
032import org.slf4j.LoggerFactory;
033
034import org.apache.hbase.thirdparty.javax.ws.rs.GET;
035import org.apache.hbase.thirdparty.javax.ws.rs.Produces;
036import org.apache.hbase.thirdparty.javax.ws.rs.core.CacheControl;
037import org.apache.hbase.thirdparty.javax.ws.rs.core.Context;
038import org.apache.hbase.thirdparty.javax.ws.rs.core.Response;
039import org.apache.hbase.thirdparty.javax.ws.rs.core.Response.ResponseBuilder;
040import org.apache.hbase.thirdparty.javax.ws.rs.core.UriInfo;
041
042@InterfaceAudience.Private
043public class StorageClusterStatusResource extends ResourceBase {
044  private static final Logger LOG = LoggerFactory.getLogger(StorageClusterStatusResource.class);
045
046  static CacheControl cacheControl;
047  static {
048    cacheControl = new CacheControl();
049    cacheControl.setNoCache(true);
050    cacheControl.setNoTransform(false);
051  }
052
053  /**
054   * Constructor
055   */
056  public StorageClusterStatusResource() throws IOException {
057    super();
058  }
059
060  @GET
061  @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
062    MIMETYPE_PROTOBUF_IETF })
063  public Response get(final @Context UriInfo uriInfo) {
064    if (LOG.isTraceEnabled()) {
065      LOG.trace("GET " + uriInfo.getAbsolutePath());
066    }
067    servlet.getMetrics().incrementRequests(1);
068    try {
069      ClusterMetrics status =
070        servlet.getAdmin().getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS));
071      StorageClusterStatusModel model = new StorageClusterStatusModel();
072      model.setRegions(status.getRegionCount());
073      model.setRequests(status.getRequestCount());
074      model.setAverageLoad(status.getAverageLoad());
075      for (Map.Entry<ServerName, ServerMetrics> entry : status.getLiveServerMetrics().entrySet()) {
076        ServerName sn = entry.getKey();
077        ServerMetrics load = entry.getValue();
078        StorageClusterStatusModel.Node node =
079          model.addLiveNode(sn.getHostname() + ":" + Integer.toString(sn.getPort()),
080            sn.getStartcode(), (int) load.getUsedHeapSize().get(Size.Unit.MEGABYTE),
081            (int) load.getMaxHeapSize().get(Size.Unit.MEGABYTE));
082        node.setRequests(load.getRequestCount());
083        for (RegionMetrics region : load.getRegionMetrics().values()) {
084          node.addRegion(region.getRegionName(), region.getStoreCount(), region.getStoreFileCount(),
085            (int) region.getStoreFileSize().get(Size.Unit.MEGABYTE),
086            (int) region.getMemStoreSize().get(Size.Unit.MEGABYTE),
087            (long) region.getStoreFileIndexSize().get(Size.Unit.KILOBYTE),
088            region.getReadRequestCount(), region.getCpRequestCount(), region.getWriteRequestCount(),
089            (int) region.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE),
090            (int) region.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE),
091            (int) region.getBloomFilterSize().get(Size.Unit.KILOBYTE),
092            region.getCompactingCellCount(), region.getCompactedCellCount());
093        }
094      }
095      for (ServerName name : status.getDeadServerNames()) {
096        model.addDeadNode(name.toString());
097      }
098      ResponseBuilder response = Response.ok(model);
099      response.cacheControl(cacheControl);
100      servlet.getMetrics().incrementSucessfulGetRequests(1);
101      return response.build();
102    } catch (IOException e) {
103      servlet.getMetrics().incrementFailedGetRequests(1);
104      return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT)
105        .entity("Unavailable" + CRLF).build();
106    }
107  }
108}