001/* 002 * 003 * Licensed to the Apache Software Foundation (ASF) under one 004 * or more contributor license agreements. See the NOTICE file 005 * distributed with this work for additional information 006 * regarding copyright ownership. The ASF licenses this file 007 * to you under the Apache License, Version 2.0 (the 008 * "License"); you may not use this file except in compliance 009 * with the License. You may obtain a copy of the License at 010 * 011 * http://www.apache.org/licenses/LICENSE-2.0 012 * 013 * Unless required by applicable law or agreed to in writing, software 014 * distributed under the License is distributed on an "AS IS" BASIS, 015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 016 * See the License for the specific language governing permissions and 017 * limitations under the License. 018 */ 019 020package org.apache.hadoop.hbase.rest; 021 022import java.io.IOException; 023import java.util.EnumSet; 024import java.util.Map; 025import javax.ws.rs.GET; 026import javax.ws.rs.Produces; 027import javax.ws.rs.core.CacheControl; 028import javax.ws.rs.core.Context; 029import javax.ws.rs.core.Response; 030import javax.ws.rs.core.Response.ResponseBuilder; 031import javax.ws.rs.core.UriInfo; 032import org.apache.hadoop.hbase.ClusterMetrics; 033import org.apache.hadoop.hbase.ClusterMetrics.Option; 034import org.apache.hadoop.hbase.RegionMetrics; 035import org.apache.hadoop.hbase.ServerMetrics; 036import org.apache.hadoop.hbase.ServerName; 037import org.apache.hadoop.hbase.Size; 038import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel; 039import org.apache.yetus.audience.InterfaceAudience; 040import org.slf4j.Logger; 041import org.slf4j.LoggerFactory; 042 043@InterfaceAudience.Private 044public class StorageClusterStatusResource extends ResourceBase { 045 private static final Logger LOG = 046 LoggerFactory.getLogger(StorageClusterStatusResource.class); 047 048 static CacheControl cacheControl; 049 static { 050 cacheControl = new CacheControl(); 051 cacheControl.setNoCache(true); 052 cacheControl.setNoTransform(false); 053 } 054 055 /** 056 * Constructor 057 * @throws IOException 058 */ 059 public StorageClusterStatusResource() throws IOException { 060 super(); 061 } 062 063 @GET 064 @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, 065 MIMETYPE_PROTOBUF_IETF}) 066 public Response get(final @Context UriInfo uriInfo) { 067 if (LOG.isTraceEnabled()) { 068 LOG.trace("GET " + uriInfo.getAbsolutePath()); 069 } 070 servlet.getMetrics().incrementRequests(1); 071 try { 072 ClusterMetrics status = servlet.getAdmin().getClusterMetrics( 073 EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS)); 074 StorageClusterStatusModel model = new StorageClusterStatusModel(); 075 model.setRegions(status.getRegionCount()); 076 model.setRequests(status.getRequestCount()); 077 model.setAverageLoad(status.getAverageLoad()); 078 for (Map.Entry<ServerName, ServerMetrics> entry: status.getLiveServerMetrics().entrySet()) { 079 ServerName sn = entry.getKey(); 080 ServerMetrics load = entry.getValue(); 081 StorageClusterStatusModel.Node node = 082 model.addLiveNode( 083 sn.getHostname() + ":" + 084 Integer.toString(sn.getPort()), 085 sn.getStartcode(), (int) load.getUsedHeapSize().get(Size.Unit.MEGABYTE), 086 (int) load.getMaxHeapSize().get(Size.Unit.MEGABYTE)); 087 node.setRequests(load.getRequestCount()); 088 for (RegionMetrics region: load.getRegionMetrics().values()) { 089 node.addRegion(region.getRegionName(), region.getStoreCount(), 090 region.getStoreFileCount(), 091 (int) region.getStoreFileSize().get(Size.Unit.MEGABYTE), 092 (int) region.getMemStoreSize().get(Size.Unit.MEGABYTE), 093 (long) region.getStoreFileIndexSize().get(Size.Unit.KILOBYTE), 094 region.getReadRequestCount(), 095 region.getWriteRequestCount(), 096 (int) region.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE), 097 (int) region.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE), 098 (int) region.getBloomFilterSize().get(Size.Unit.KILOBYTE), 099 region.getCompactingCellCount(), 100 region.getCompactedCellCount()); 101 } 102 } 103 for (ServerName name: status.getDeadServerNames()) { 104 model.addDeadNode(name.toString()); 105 } 106 ResponseBuilder response = Response.ok(model); 107 response.cacheControl(cacheControl); 108 servlet.getMetrics().incrementSucessfulGetRequests(1); 109 return response.build(); 110 } catch (IOException e) { 111 servlet.getMetrics().incrementFailedGetRequests(1); 112 return Response.status(Response.Status.SERVICE_UNAVAILABLE) 113 .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) 114 .build(); 115 } 116 } 117}