HDFS-12300. Audit-log delegation token related operations.

This commit is contained in:
Xiao Chen 2017-08-31 23:17:16 -07:00
parent 36f33a1efb
commit 1b3b9938cf
3 changed files with 103 additions and 6 deletions

View File

@ -36,6 +36,8 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.net.InetAddress;
@ -70,6 +72,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.http.HttpConfig;
@ -80,6 +83,7 @@
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ToolRunner;
import com.google.common.annotations.VisibleForTesting;
@ -1570,4 +1574,22 @@ public static KeyProviderCryptoExtension createKeyProviderCryptoExtension(
.createKeyProviderCryptoExtension(keyProvider);
return cryptoProvider;
}
/**
* Decodes an HDFS delegation token to its identifier.
*
* @param token the token
* @return the decoded identifier.
* @throws IOException
*/
public static DelegationTokenIdentifier decodeDelegationToken(
final Token<DelegationTokenIdentifier> token) throws IOException {
final DelegationTokenIdentifier id = new DelegationTokenIdentifier();
final ByteArrayInputStream buf =
new ByteArrayInputStream(token.getIdentifier());
try (DataInputStream in = new DataInputStream(buf)) {
id.readFields(in);
}
return id;
}
}

View File

@ -101,9 +101,7 @@
import static org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics.TOPMETRICS_METRICS_SOURCE_NAME;
import java.io.BufferedWriter;
import java.io.ByteArrayInputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileNotFoundException;
@ -5399,6 +5397,9 @@ DelegationTokenSecretManager getDelegationTokenSecretManager() {
*/
Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
throws IOException {
final String operationName = "getDelegationToken";
final boolean success;
final String tokenId;
Token<DelegationTokenIdentifier> token;
checkOperation(OperationCategory.WRITE);
writeLock();
@ -5427,10 +5428,13 @@ Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
dtId, dtSecretManager);
long expiryTime = dtSecretManager.getTokenExpiryTime(dtId);
getEditLog().logGetDelegationToken(dtId, expiryTime);
tokenId = dtId.toStringStable();
success = true;
} finally {
writeUnlock("getDelegationToken");
}
getEditLog().logSync();
logAuditEvent(success, operationName, tokenId);
return token;
}
@ -5443,6 +5447,9 @@ Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
*/
long renewDelegationToken(Token<DelegationTokenIdentifier> token)
throws InvalidToken, IOException {
final String operationName = "renewDelegationToken";
boolean success = false;
String tokenId;
long expiryTime;
checkOperation(OperationCategory.WRITE);
writeLock();
@ -5456,15 +5463,20 @@ long renewDelegationToken(Token<DelegationTokenIdentifier> token)
}
String renewer = getRemoteUser().getShortUserName();
expiryTime = dtSecretManager.renewToken(token, renewer);
DelegationTokenIdentifier id = new DelegationTokenIdentifier();
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
DataInputStream in = new DataInputStream(buf);
id.readFields(in);
final DelegationTokenIdentifier id = DFSUtil.decodeDelegationToken(token);
getEditLog().logRenewDelegationToken(id, expiryTime);
tokenId = id.toStringStable();
success = true;
} catch (AccessControlException ace) {
final DelegationTokenIdentifier id = DFSUtil.decodeDelegationToken(token);
tokenId = id.toStringStable();
logAuditEvent(success, operationName, tokenId);
throw ace;
} finally {
writeUnlock("renewDelegationToken");
}
getEditLog().logSync();
logAuditEvent(success, operationName, tokenId);
return expiryTime;
}
@ -5475,6 +5487,9 @@ long renewDelegationToken(Token<DelegationTokenIdentifier> token)
*/
void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
throws IOException {
final String operationName = "cancelDelegationToken";
boolean success = false;
String tokenId;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
@ -5485,10 +5500,18 @@ void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
DelegationTokenIdentifier id = dtSecretManager
.cancelToken(token, canceller);
getEditLog().logCancelDelegationToken(id);
tokenId = id.toStringStable();
success = true;
} catch (AccessControlException ace) {
final DelegationTokenIdentifier id = DFSUtil.decodeDelegationToken(token);
tokenId = id.toStringStable();
logAuditEvent(success, operationName, tokenId);
throw ace;
} finally {
writeUnlock("cancelDelegationToken");
}
getEditLog().logSync();
logAuditEvent(success, operationName, tokenId);
}
/**

View File

@ -34,13 +34,16 @@
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import org.junit.BeforeClass;
import org.junit.AfterClass;
import org.junit.Ignore;
import org.junit.Test;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import org.mockito.Mock;
@ -68,6 +71,7 @@ public static void initialize() throws Exception {
conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true);
conf.setBoolean(DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
@ -566,6 +570,54 @@ public void testGetAclStatus() throws Exception {
cluster.getNamesystem().setFSDirectory(dir);
}
@Test
public void testDelegationTokens() throws Exception {
Token dt = fs.getDelegationToken("foo");
final String getDT =
".*src=HDFS_DELEGATION_TOKEN token 1.*with renewer foo.*";
verifyAuditLogs(true, ".*cmd=getDelegationToken" + getDT);
// renew
final UserGroupInformation foo =
UserGroupInformation.createUserForTesting("foo", new String[] {});
foo.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
dt.renew(conf);
return null;
}
});
verifyAuditLogs(true, ".*cmd=renewDelegationToken" + getDT);
try {
dt.renew(conf);
fail("Renewing a token with non-renewer should fail");
} catch (AccessControlException expected) {
}
verifyAuditLogs(false, ".*cmd=renewDelegationToken" + getDT);
// cancel
final UserGroupInformation bar =
UserGroupInformation.createUserForTesting("bar", new String[] {});
try {
bar.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
dt.cancel(conf);
return null;
}
});
fail("Canceling a token with non-renewer should fail");
} catch (AccessControlException expected) {
}
verifyAuditLogs(false, ".*cmd=cancelDelegationToken" + getDT);
dt.cancel(conf);
verifyAuditLogs(true, ".*cmd=cancelDelegationToken" + getDT);
}
private int verifyAuditLogs(final boolean allowed, final String pattern) {
return verifyAuditLogs(".*allowed=" + allowed + pattern);
}
private int verifyAuditLogs(String pattern) {
int length = auditlog.getOutput().split("\n").length;
String lastAudit = auditlog.getOutput().split("\n")[length - 1];