001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hbase.regionserver; 019 020import static org.junit.Assert.assertNotNull; 021import static org.junit.Assert.assertTrue; 022 023import java.io.IOException; 024import java.security.Key; 025import java.security.SecureRandom; 026import java.util.ArrayList; 027import java.util.Collection; 028import java.util.List; 029import javax.crypto.spec.SecretKeySpec; 030import org.apache.hadoop.conf.Configuration; 031import org.apache.hadoop.fs.Path; 032import org.apache.hadoop.hbase.HBaseClassTestRule; 033import org.apache.hadoop.hbase.HBaseTestingUtility; 034import org.apache.hadoop.hbase.HColumnDescriptor; 035import org.apache.hadoop.hbase.HConstants; 036import org.apache.hadoop.hbase.HTableDescriptor; 037import org.apache.hadoop.hbase.TableName; 038import org.apache.hadoop.hbase.Waiter.Predicate; 039import org.apache.hadoop.hbase.client.Put; 040import org.apache.hadoop.hbase.client.Table; 041import org.apache.hadoop.hbase.io.crypto.Encryption; 042import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting; 043import org.apache.hadoop.hbase.io.crypto.aes.AES; 044import org.apache.hadoop.hbase.io.hfile.CacheConfig; 045import org.apache.hadoop.hbase.io.hfile.HFile; 046import org.apache.hadoop.hbase.security.EncryptionUtil; 047import org.apache.hadoop.hbase.security.User; 048import org.apache.hadoop.hbase.testclassification.MediumTests; 049import org.apache.hadoop.hbase.testclassification.RegionServerTests; 050import org.apache.hadoop.hbase.util.Bytes; 051import org.junit.AfterClass; 052import org.junit.BeforeClass; 053import org.junit.ClassRule; 054import org.junit.Rule; 055import org.junit.Test; 056import org.junit.experimental.categories.Category; 057import org.junit.rules.TestName; 058import org.slf4j.Logger; 059import org.slf4j.LoggerFactory; 060 061@Category({RegionServerTests.class, MediumTests.class}) 062public class TestEncryptionKeyRotation { 063 064 @ClassRule 065 public static final HBaseClassTestRule CLASS_RULE = 066 HBaseClassTestRule.forClass(TestEncryptionKeyRotation.class); 067 068 private static final Logger LOG = LoggerFactory.getLogger(TestEncryptionKeyRotation.class); 069 private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); 070 private static final Configuration conf = TEST_UTIL.getConfiguration(); 071 private static final Key initialCFKey; 072 private static final Key secondCFKey; 073 074 @Rule 075 public TestName name = new TestName(); 076 077 static { 078 // Create the test encryption keys 079 SecureRandom rng = new SecureRandom(); 080 byte[] keyBytes = new byte[AES.KEY_LENGTH]; 081 rng.nextBytes(keyBytes); 082 String algorithm = 083 conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); 084 initialCFKey = new SecretKeySpec(keyBytes, algorithm); 085 rng.nextBytes(keyBytes); 086 secondCFKey = new SecretKeySpec(keyBytes, algorithm); 087 } 088 089 @BeforeClass 090 public static void setUp() throws Exception { 091 conf.setInt("hfile.format.version", 3); 092 conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); 093 conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase"); 094 095 // Start the minicluster 096 TEST_UTIL.startMiniCluster(1); 097 } 098 099 @AfterClass 100 public static void tearDown() throws Exception { 101 TEST_UTIL.shutdownMiniCluster(); 102 } 103 104 @Test 105 public void testCFKeyRotation() throws Exception { 106 // Create the table schema 107 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("default", name.getMethodName())); 108 HColumnDescriptor hcd = new HColumnDescriptor("cf"); 109 String algorithm = 110 conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); 111 hcd.setEncryptionType(algorithm); 112 hcd.setEncryptionKey(EncryptionUtil.wrapKey(conf, "hbase", initialCFKey)); 113 htd.addFamily(hcd); 114 115 // Create the table and some on disk files 116 createTableAndFlush(htd); 117 118 // Verify we have store file(s) with the initial key 119 final List<Path> initialPaths = findStorefilePaths(htd.getTableName()); 120 assertTrue(initialPaths.size() > 0); 121 for (Path path: initialPaths) { 122 assertTrue("Store file " + path + " has incorrect key", 123 Bytes.equals(initialCFKey.getEncoded(), extractHFileKey(path))); 124 } 125 126 // Update the schema with a new encryption key 127 hcd = htd.getFamily(Bytes.toBytes("cf")); 128 hcd.setEncryptionKey(EncryptionUtil.wrapKey(conf, 129 conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()), 130 secondCFKey)); 131 TEST_UTIL.getAdmin().modifyColumnFamily(htd.getTableName(), hcd); 132 Thread.sleep(5000); // Need a predicate for online schema change 133 134 // And major compact 135 TEST_UTIL.getAdmin().majorCompact(htd.getTableName()); 136 final List<Path> updatePaths = findCompactedStorefilePaths(htd.getTableName()); 137 TEST_UTIL.waitFor(30000, 1000, true, new Predicate<Exception>() { 138 @Override 139 public boolean evaluate() throws Exception { 140 // When compaction has finished, all of the original files will be 141 // gone 142 boolean found = false; 143 for (Path path: updatePaths) { 144 found = TEST_UTIL.getTestFileSystem().exists(path); 145 if (found) { 146 LOG.info("Found " + path); 147 break; 148 } 149 } 150 return !found; 151 } 152 }); 153 154 // Verify we have store file(s) with only the new key 155 Thread.sleep(1000); 156 waitForCompaction(htd.getTableName()); 157 List<Path> pathsAfterCompaction = findStorefilePaths(htd.getTableName()); 158 assertTrue(pathsAfterCompaction.size() > 0); 159 for (Path path: pathsAfterCompaction) { 160 assertTrue("Store file " + path + " has incorrect key", 161 Bytes.equals(secondCFKey.getEncoded(), extractHFileKey(path))); 162 } 163 List<Path> compactedPaths = findCompactedStorefilePaths(htd.getTableName()); 164 assertTrue(compactedPaths.size() > 0); 165 for (Path path: compactedPaths) { 166 assertTrue("Store file " + path + " retains initial key", 167 Bytes.equals(initialCFKey.getEncoded(), extractHFileKey(path))); 168 } 169 } 170 171 @Test 172 public void testMasterKeyRotation() throws Exception { 173 // Create the table schema 174 HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("default", name.getMethodName())); 175 HColumnDescriptor hcd = new HColumnDescriptor("cf"); 176 String algorithm = 177 conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); 178 hcd.setEncryptionType(algorithm); 179 hcd.setEncryptionKey(EncryptionUtil.wrapKey(conf, "hbase", initialCFKey)); 180 htd.addFamily(hcd); 181 182 // Create the table and some on disk files 183 createTableAndFlush(htd); 184 185 // Verify we have store file(s) with the initial key 186 List<Path> storeFilePaths = findStorefilePaths(htd.getTableName()); 187 assertTrue(storeFilePaths.size() > 0); 188 for (Path path: storeFilePaths) { 189 assertTrue("Store file " + path + " has incorrect key", 190 Bytes.equals(initialCFKey.getEncoded(), extractHFileKey(path))); 191 } 192 193 // Now shut down the HBase cluster 194 TEST_UTIL.shutdownMiniHBaseCluster(); 195 196 // "Rotate" the master key 197 conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "other"); 198 conf.set(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY, "hbase"); 199 200 // Start the cluster back up 201 TEST_UTIL.startMiniHBaseCluster(1, 1); 202 // Verify the table can still be loaded 203 TEST_UTIL.waitTableAvailable(htd.getTableName(), 5000); 204 // Double check that the store file keys can be unwrapped 205 storeFilePaths = findStorefilePaths(htd.getTableName()); 206 assertTrue(storeFilePaths.size() > 0); 207 for (Path path: storeFilePaths) { 208 assertTrue("Store file " + path + " has incorrect key", 209 Bytes.equals(initialCFKey.getEncoded(), extractHFileKey(path))); 210 } 211 } 212 213 private static void waitForCompaction(TableName tableName) 214 throws IOException, InterruptedException { 215 boolean compacted = false; 216 for (Region region : TEST_UTIL.getRSForFirstRegionInTable(tableName) 217 .getRegions(tableName)) { 218 for (HStore store : ((HRegion) region).getStores()) { 219 compacted = false; 220 while (!compacted) { 221 if (store.getStorefiles() != null) { 222 while (store.getStorefilesCount() != 1) { 223 Thread.sleep(100); 224 } 225 for (HStoreFile storefile : store.getStorefiles()) { 226 if (!storefile.isCompactedAway()) { 227 compacted = true; 228 break; 229 } 230 Thread.sleep(100); 231 } 232 } else { 233 break; 234 } 235 } 236 } 237 } 238 } 239 240 private static List<Path> findStorefilePaths(TableName tableName) throws Exception { 241 List<Path> paths = new ArrayList<>(); 242 for (Region region : TEST_UTIL.getRSForFirstRegionInTable(tableName) 243 .getRegions(tableName)) { 244 for (HStore store : ((HRegion) region).getStores()) { 245 for (HStoreFile storefile : store.getStorefiles()) { 246 paths.add(storefile.getPath()); 247 } 248 } 249 } 250 return paths; 251 } 252 253 private static List<Path> findCompactedStorefilePaths(TableName tableName) throws Exception { 254 List<Path> paths = new ArrayList<>(); 255 for (Region region : TEST_UTIL.getRSForFirstRegionInTable(tableName) 256 .getRegions(tableName)) { 257 for (HStore store : ((HRegion) region).getStores()) { 258 Collection<HStoreFile> compactedfiles = 259 store.getStoreEngine().getStoreFileManager().getCompactedfiles(); 260 if (compactedfiles != null) { 261 for (HStoreFile storefile : compactedfiles) { 262 paths.add(storefile.getPath()); 263 } 264 } 265 } 266 } 267 return paths; 268 } 269 270 private void createTableAndFlush(HTableDescriptor htd) throws Exception { 271 HColumnDescriptor hcd = htd.getFamilies().iterator().next(); 272 // Create the test table 273 TEST_UTIL.getAdmin().createTable(htd); 274 TEST_UTIL.waitTableAvailable(htd.getTableName(), 5000); 275 // Create a store file 276 Table table = TEST_UTIL.getConnection().getTable(htd.getTableName()); 277 try { 278 table.put(new Put(Bytes.toBytes("testrow")) 279 .addColumn(hcd.getName(), Bytes.toBytes("q"), Bytes.toBytes("value"))); 280 } finally { 281 table.close(); 282 } 283 TEST_UTIL.getAdmin().flush(htd.getTableName()); 284 } 285 286 private static byte[] extractHFileKey(Path path) throws Exception { 287 HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path, 288 new CacheConfig(conf), true, conf); 289 try { 290 reader.loadFileInfo(); 291 Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext(); 292 assertNotNull("Reader has a null crypto context", cryptoContext); 293 Key key = cryptoContext.getKey(); 294 assertNotNull("Crypto context has no key", key); 295 return key.getEncoded(); 296 } finally { 297 reader.close(); 298 } 299 } 300 301}