001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase; 019 020import static org.junit.Assert.*; 021 022import com.codahale.metrics.Histogram; 023import com.codahale.metrics.Snapshot; 024import com.codahale.metrics.UniformReservoir; 025import com.fasterxml.jackson.core.JsonGenerationException; 026import com.fasterxml.jackson.databind.JsonMappingException; 027import com.fasterxml.jackson.databind.ObjectMapper; 028import java.io.BufferedReader; 029import java.io.ByteArrayInputStream; 030import java.io.IOException; 031import java.io.InputStreamReader; 032import java.lang.reflect.Constructor; 033import java.lang.reflect.InvocationTargetException; 034import java.util.LinkedList; 035import java.util.NoSuchElementException; 036import java.util.Queue; 037import java.util.Random; 038import org.apache.hadoop.fs.FSDataInputStream; 039import org.apache.hadoop.fs.FileSystem; 040import org.apache.hadoop.fs.Path; 041import org.apache.hadoop.hbase.PerformanceEvaluation.RandomReadTest; 042import org.apache.hadoop.hbase.PerformanceEvaluation.TestOptions; 043import org.apache.hadoop.hbase.regionserver.CompactingMemStore; 044import org.apache.hadoop.hbase.testclassification.MiscTests; 045import org.apache.hadoop.hbase.testclassification.SmallTests; 046import org.junit.ClassRule; 047import org.junit.Ignore; 048import org.junit.Test; 049import org.junit.experimental.categories.Category; 050 051@Category({MiscTests.class, SmallTests.class}) 052public class TestPerformanceEvaluation { 053 054 @ClassRule 055 public static final HBaseClassTestRule CLASS_RULE = 056 HBaseClassTestRule.forClass(TestPerformanceEvaluation.class); 057 058 private static final HBaseTestingUtility HTU = new HBaseTestingUtility(); 059 060 @Test 061 public void testDefaultInMemoryCompaction() { 062 PerformanceEvaluation.TestOptions defaultOpts = 063 new PerformanceEvaluation.TestOptions(); 064 assertEquals(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT.toString(), 065 defaultOpts.getInMemoryCompaction().toString()); 066 HTableDescriptor htd = PerformanceEvaluation.getTableDescriptor(defaultOpts); 067 for (HColumnDescriptor hcd: htd.getFamilies()) { 068 assertEquals(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT.toString(), 069 hcd.getInMemoryCompaction().toString()); 070 } 071 } 072 073 @Test 074 public void testSerialization() 075 throws JsonGenerationException, JsonMappingException, IOException { 076 PerformanceEvaluation.TestOptions options = new PerformanceEvaluation.TestOptions(); 077 assertTrue(!options.isAutoFlush()); 078 options.setAutoFlush(true); 079 ObjectMapper mapper = new ObjectMapper(); 080 String optionsString = mapper.writeValueAsString(options); 081 PerformanceEvaluation.TestOptions optionsDeserialized = 082 mapper.readValue(optionsString, PerformanceEvaluation.TestOptions.class); 083 assertTrue(optionsDeserialized.isAutoFlush()); 084 } 085 086 /** 087 * Exercise the mr spec writing. Simple assertions to make sure it is basically working. 088 * @throws IOException 089 */ 090 @Ignore @Test 091 public void testWriteInputFile() throws IOException { 092 TestOptions opts = new PerformanceEvaluation.TestOptions(); 093 final int clients = 10; 094 opts.setNumClientThreads(clients); 095 opts.setPerClientRunRows(10); 096 Path dir = 097 PerformanceEvaluation.writeInputFile(HTU.getConfiguration(), opts, HTU.getDataTestDir()); 098 FileSystem fs = FileSystem.get(HTU.getConfiguration()); 099 Path p = new Path(dir, PerformanceEvaluation.JOB_INPUT_FILENAME); 100 long len = fs.getFileStatus(p).getLen(); 101 assertTrue(len > 0); 102 byte [] content = new byte[(int)len]; 103 FSDataInputStream dis = fs.open(p); 104 try { 105 dis.readFully(content); 106 BufferedReader br = 107 new BufferedReader(new InputStreamReader(new ByteArrayInputStream(content))); 108 int count = 0; 109 while (br.readLine() != null) { 110 count++; 111 } 112 assertEquals(clients, count); 113 } finally { 114 dis.close(); 115 } 116 } 117 118 @Test 119 public void testSizeCalculation() { 120 TestOptions opts = new PerformanceEvaluation.TestOptions(); 121 opts = PerformanceEvaluation.calculateRowsAndSize(opts); 122 int rows = opts.getPerClientRunRows(); 123 // Default row count 124 final int defaultPerClientRunRows = 1024 * 1024; 125 assertEquals(defaultPerClientRunRows, rows); 126 // If size is 2G, then twice the row count. 127 opts.setSize(2.0f); 128 opts = PerformanceEvaluation.calculateRowsAndSize(opts); 129 assertEquals(defaultPerClientRunRows * 2, opts.getPerClientRunRows()); 130 // If two clients, then they get half the rows each. 131 opts.setNumClientThreads(2); 132 opts = PerformanceEvaluation.calculateRowsAndSize(opts); 133 assertEquals(defaultPerClientRunRows, opts.getPerClientRunRows()); 134 // What if valueSize is 'random'? Then half of the valueSize so twice the rows. 135 opts.valueRandom = true; 136 opts = PerformanceEvaluation.calculateRowsAndSize(opts); 137 assertEquals(defaultPerClientRunRows * 2, opts.getPerClientRunRows()); 138 } 139 140 @Test 141 public void testRandomReadCalculation() { 142 TestOptions opts = new PerformanceEvaluation.TestOptions(); 143 opts = PerformanceEvaluation.calculateRowsAndSize(opts); 144 int rows = opts.getPerClientRunRows(); 145 // Default row count 146 final int defaultPerClientRunRows = 1024 * 1024; 147 assertEquals(defaultPerClientRunRows, rows); 148 // If size is 2G, then twice the row count. 149 opts.setSize(2.0f); 150 opts.setPerClientRunRows(1000); 151 opts.setCmdName(PerformanceEvaluation.RANDOM_READ); 152 opts = PerformanceEvaluation.calculateRowsAndSize(opts); 153 assertEquals(1000, opts.getPerClientRunRows()); 154 // If two clients, then they get half the rows each. 155 opts.setNumClientThreads(2); 156 opts = PerformanceEvaluation.calculateRowsAndSize(opts); 157 assertEquals(1000, opts.getPerClientRunRows()); 158 Random random = new Random(); 159 // assuming we will get one before this loop expires 160 boolean foundValue = false; 161 for (int i = 0; i < 10000000; i++) { 162 int randomRow = PerformanceEvaluation.generateRandomRow(random, opts.totalRows); 163 if (randomRow > 1000) { 164 foundValue = true; 165 break; 166 } 167 } 168 assertTrue("We need to get a value more than 1000", foundValue); 169 } 170 171 @Test 172 public void testZipfian() 173 throws NoSuchMethodException, SecurityException, InstantiationException, IllegalAccessException, 174 IllegalArgumentException, InvocationTargetException { 175 TestOptions opts = new PerformanceEvaluation.TestOptions(); 176 opts.setValueZipf(true); 177 final int valueSize = 1024; 178 opts.setValueSize(valueSize); 179 RandomReadTest rrt = new RandomReadTest(null, opts, null); 180 Constructor<?> ctor = 181 Histogram.class.getDeclaredConstructor(com.codahale.metrics.Reservoir.class); 182 ctor.setAccessible(true); 183 Histogram histogram = (Histogram)ctor.newInstance(new UniformReservoir(1024 * 500)); 184 for (int i = 0; i < 100; i++) { 185 histogram.update(rrt.getValueLength(null)); 186 } 187 Snapshot snapshot = histogram.getSnapshot(); 188 double stddev = snapshot.getStdDev(); 189 assertTrue(stddev != 0 && stddev != 1.0); 190 assertTrue(snapshot.getStdDev() != 0); 191 double median = snapshot.getMedian(); 192 assertTrue(median != 0 && median != 1 && median != valueSize); 193 } 194 195 @Test 196 public void testSetBufferSizeOption() { 197 TestOptions opts = new PerformanceEvaluation.TestOptions(); 198 long bufferSize = opts.getBufferSize(); 199 assertEquals(bufferSize, 2l * 1024l * 1024l); 200 opts.setBufferSize(64l * 1024l); 201 bufferSize = opts.getBufferSize(); 202 assertEquals(bufferSize, 64l * 1024l); 203 } 204 205 @Test 206 public void testParseOptsWithThreads() { 207 Queue<String> opts = new LinkedList<>(); 208 String cmdName = "sequentialWrite"; 209 int threads = 1; 210 opts.offer(cmdName); 211 opts.offer(String.valueOf(threads)); 212 PerformanceEvaluation.TestOptions options = PerformanceEvaluation.parseOpts(opts); 213 assertNotNull(options); 214 assertNotNull(options.getCmdName()); 215 assertEquals(cmdName, options.getCmdName()); 216 assertEquals(threads, options.getNumClientThreads()); 217 } 218 219 @Test 220 public void testParseOptsWrongThreads() { 221 Queue<String> opts = new LinkedList<>(); 222 String cmdName = "sequentialWrite"; 223 opts.offer(cmdName); 224 opts.offer("qq"); 225 try { 226 PerformanceEvaluation.parseOpts(opts); 227 } catch (IllegalArgumentException e) { 228 System.out.println(e.getMessage()); 229 assertEquals("Command " + cmdName + " does not have threads number", e.getMessage()); 230 assertTrue(e.getCause() instanceof NumberFormatException); 231 } 232 } 233 234 @Test 235 public void testParseOptsNoThreads() { 236 Queue<String> opts = new LinkedList<>(); 237 String cmdName = "sequentialWrite"; 238 try { 239 PerformanceEvaluation.parseOpts(opts); 240 } catch (IllegalArgumentException e) { 241 System.out.println(e.getMessage()); 242 assertEquals("Command " + cmdName + " does not have threads number", e.getMessage()); 243 assertTrue(e.getCause() instanceof NoSuchElementException); 244 } 245 } 246}