HADOOP-11110. JavaKeystoreProvider should not report a key as created if it was not flushed to the backing file. (Arun Suresh via wang)

This commit is contained in:
Andrew Wang 2014-09-29 13:10:26 -07:00
parent f0293f11a8
commit a78953c974
6 changed files with 201 additions and 13 deletions

View File

@ -761,6 +761,9 @@ Release 2.6.0 - UNRELEASED
HDFS-7157. Using Time.now() for recording start/end time of reconfiguration
tasks (Lei Xu via cmccabe)
HADOOP-1110. JavaKeystoreProvider should not report a key as created if it
was not flushed to the backing file.
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
HADOOP-10734. Implement high-performance secure random number sources.

View File

@ -20,6 +20,7 @@
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
@ -30,6 +31,8 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import javax.crypto.spec.SecretKeySpec;
import java.io.IOException;
@ -107,6 +110,20 @@ public class JavaKeyStoreProvider extends KeyProvider {
private final Map<String, Metadata> cache = new HashMap<String, Metadata>();
@VisibleForTesting
JavaKeyStoreProvider(JavaKeyStoreProvider other) {
super(new Configuration());
uri = other.uri;
path = other.path;
fs = other.fs;
permissions = other.permissions;
keyStore = other.keyStore;
password = other.password;
changed = other.changed;
readLock = other.readLock;
writeLock = other.writeLock;
}
private JavaKeyStoreProvider(URI uri, Configuration conf) throws IOException {
super(conf);
this.uri = uri;
@ -501,6 +518,7 @@ public KeyVersion rollNewVersion(String name,
public void flush() throws IOException {
Path newPath = constructNewPath(path);
Path oldPath = constructOldPath(path);
Path resetPath = path;
writeLock.lock();
try {
if (!changed) {
@ -527,6 +545,9 @@ public void flush() throws IOException {
// Save old File first
boolean fileExisted = backupToOld(oldPath);
if (fileExisted) {
resetPath = oldPath;
}
// write out the keystore
// Write to _NEW path first :
try {
@ -534,16 +555,34 @@ public void flush() throws IOException {
} catch (IOException ioe) {
// rename _OLD back to curent and throw Exception
revertFromOld(oldPath, fileExisted);
resetPath = path;
throw ioe;
}
// Rename _NEW to CURRENT and delete _OLD
cleanupNewAndOld(newPath, oldPath);
changed = false;
} catch (IOException ioe) {
resetKeyStoreState(resetPath);
throw ioe;
} finally {
writeLock.unlock();
}
}
private void resetKeyStoreState(Path path) {
LOG.debug("Could not flush Keystore.."
+ "attempting to reset to previous state !!");
// 1) flush cache
cache.clear();
// 2) load keyStore from previous path
try {
loadFromPath(path, password);
LOG.debug("KeyStore resetting to previously flushed state !!");
} catch (Exception e) {
LOG.debug("Could not reset Keystore to previous state", e);
}
}
private void cleanupNewAndOld(Path newPath, Path oldPath) throws IOException {
// Rename _NEW to CURRENT
renameOrFail(newPath, path);
@ -553,7 +592,7 @@ private void cleanupNewAndOld(Path newPath, Path oldPath) throws IOException {
}
}
private void writeToNew(Path newPath) throws IOException {
protected void writeToNew(Path newPath) throws IOException {
FSDataOutputStream out =
FileSystem.create(fs, newPath, permissions);
try {
@ -570,14 +609,7 @@ private void writeToNew(Path newPath) throws IOException {
out.close();
}
private void revertFromOld(Path oldPath, boolean fileExisted)
throws IOException {
if (fileExisted) {
renameOrFail(oldPath, path);
}
}
private boolean backupToOld(Path oldPath)
protected boolean backupToOld(Path oldPath)
throws IOException {
boolean fileExisted = false;
if (fs.exists(path)) {
@ -587,6 +619,14 @@ private boolean backupToOld(Path oldPath)
return fileExisted;
}
private void revertFromOld(Path oldPath, boolean fileExisted)
throws IOException {
if (fileExisted) {
renameOrFail(oldPath, path);
}
}
private void renameOrFail(Path src, Path dest)
throws IOException {
if (!fs.rename(src, dest)) {

View File

@ -345,8 +345,8 @@ public void execute() throws NoSuchAlgorithmException, IOException {
+ provider + "\n for key name: " + keyName);
try {
provider.rollNewVersion(keyName);
out.println(keyName + " has been successfully rolled.");
provider.flush();
out.println(keyName + " has been successfully rolled.");
printProviderWritten();
} catch (NoSuchAlgorithmException e) {
out.println("Cannot roll key: " + keyName + " within KeyProvider: "
@ -418,8 +418,8 @@ public void execute() throws IOException {
if (cont) {
try {
provider.deleteKey(keyName);
out.println(keyName + " has been successfully deleted.");
provider.flush();
out.println(keyName + " has been successfully deleted.");
printProviderWritten();
} catch (IOException e) {
out.println(keyName + " has not been deleted.");
@ -479,9 +479,9 @@ public void execute() throws IOException, NoSuchAlgorithmException {
warnIfTransientProvider();
try {
provider.createKey(keyName, options);
provider.flush();
out.println(keyName + " has been successfully created with options "
+ options.toString() + ".");
provider.flush();
printProviderWritten();
} catch (InvalidParameterException e) {
out.println(keyName + " has not been created. " + e.getMessage());

View File

@ -0,0 +1,80 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
public class FailureInjectingJavaKeyStoreProvider extends JavaKeyStoreProvider {
public static final String SCHEME_NAME = "failjceks";
private boolean backupFail = false;
private boolean writeFail = false;
FailureInjectingJavaKeyStoreProvider(JavaKeyStoreProvider prov) {
super(prov);
}
public void setBackupFail(boolean b) {
backupFail = b;
}
public void setWriteFail(boolean b) {
backupFail = b;
}
// Failure injection methods..
@Override
public void writeToNew(Path newPath) throws IOException {
if (writeFail) {
throw new IOException("Injecting failure on write");
}
super.writeToNew(newPath);
}
@Override
public boolean backupToOld(Path oldPath) throws IOException {
if (backupFail) {
throw new IOException("Inejection Failure on backup");
}
return super.backupToOld(oldPath);
}
public static class Factory extends KeyProviderFactory {
@Override
public KeyProvider createProvider(URI providerName,
Configuration conf) throws IOException {
if (SCHEME_NAME.equals(providerName.getScheme())) {
try {
return new FailureInjectingJavaKeyStoreProvider(
(JavaKeyStoreProvider) new JavaKeyStoreProvider.Factory()
.createProvider(
new URI(providerName.toString().replace(SCHEME_NAME,
JavaKeyStoreProvider.SCHEME_NAME)), conf));
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
}
return null;
}
}
}

View File

@ -40,6 +40,7 @@
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertNotNull;
public class TestKeyProviderFactory {
@ -171,6 +172,7 @@ static void checkSpecificProvider(Configuration conf,
assertEquals("Key no-such-key not found", e.getMessage());
}
provider.flush();
// get a new instance of the provider to ensure it was saved correctly
provider = KeyProviderFactory.getProviders(conf).get(0);
assertArrayEquals(new byte[]{2},
@ -215,6 +217,50 @@ public void testJksProvider() throws Exception {
file.delete();
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ourUrl);
checkSpecificProvider(conf, ourUrl);
// START : Test flush error by failure injection
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ourUrl.replace(
JavaKeyStoreProvider.SCHEME_NAME,
FailureInjectingJavaKeyStoreProvider.SCHEME_NAME));
// get a new instance of the provider to ensure it was saved correctly
KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
// inject failure during keystore write
FailureInjectingJavaKeyStoreProvider fProvider =
(FailureInjectingJavaKeyStoreProvider) provider;
fProvider.setWriteFail(true);
provider.createKey("key5", new byte[]{1},
KeyProvider.options(conf).setBitLength(8));
assertNotNull(provider.getCurrentKey("key5"));
try {
provider.flush();
Assert.fail("Should not succeed");
} catch (Exception e) {
// Ignore
}
// SHould be reset to pre-flush state
Assert.assertNull(provider.getCurrentKey("key5"));
// Un-inject last failure and
// inject failure during keystore backup
fProvider.setWriteFail(false);
fProvider.setBackupFail(true);
provider.createKey("key6", new byte[]{1},
KeyProvider.options(conf).setBitLength(8));
assertNotNull(provider.getCurrentKey("key6"));
try {
provider.flush();
Assert.fail("Should not succeed");
} catch (Exception e) {
// Ignore
}
// SHould be reset to pre-flush state
Assert.assertNull(provider.getCurrentKey("key6"));
// END : Test flush error by failure injection
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ourUrl.replace(
FailureInjectingJavaKeyStoreProvider.SCHEME_NAME,
JavaKeyStoreProvider.SCHEME_NAME));
Path path = ProviderUtils.unnestUri(new URI(ourUrl));
FileSystem fs = path.getFileSystem(conf);
FileStatus s = fs.getFileStatus(path);
@ -227,7 +273,7 @@ public void testJksProvider() throws Exception {
file.delete();
file.createNewFile();
assertTrue(oldFile.exists());
KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
provider = KeyProviderFactory.getProviders(conf).get(0);
assertTrue(file.exists());
assertTrue(oldFile + "should be deleted", !oldFile.exists());
verifyAfterReload(file, provider);

View File

@ -0,0 +1,19 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
org.apache.hadoop.crypto.key.JavaKeyStoreProvider$Factory
org.apache.hadoop.crypto.key.UserProvider$Factory
org.apache.hadoop.crypto.key.kms.KMSClientProvider$Factory
org.apache.hadoop.crypto.key.FailureInjectingJavaKeyStoreProvider$Factory