HDFS-17439. Support -nonSuperUser for NNThroughputBenchmark: useful for testing auth frameworks such as Ranger (#6677)

This commit is contained in:
Fateh Singh 2024-06-18 05:52:24 -07:00 committed by GitHub
parent 2fbbfe3cc9
commit 90024d8cb1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 134 additions and 52 deletions

View File

@ -46,6 +46,7 @@ The following are all supported command options:
|`-logLevel` | Specify the logging level when the benchmark runs. The default logging level is ERROR. | |`-logLevel` | Specify the logging level when the benchmark runs. The default logging level is ERROR. |
|`-UGCacheRefreshCount` | After every specified number of operations, the benchmark purges the name-node's user group cache. By default the refresh is never called. | |`-UGCacheRefreshCount` | After every specified number of operations, the benchmark purges the name-node's user group cache. By default the refresh is never called. |
|`-keepResults` | If specified, do not clean up the name-space after execution. By default the name-space will be removed after test. | |`-keepResults` | If specified, do not clean up the name-space after execution. By default the name-space will be removed after test. |
|`-nonSuperUser` | If specified, non super user can use the tool and can be helpful for bringing authorization time into benchmarking calculations. |
##### Operations Supported ##### Operations Supported

View File

@ -107,7 +107,7 @@ public class NNThroughputBenchmark implements Tool {
LoggerFactory.getLogger(NNThroughputBenchmark.class); LoggerFactory.getLogger(NNThroughputBenchmark.class);
private static final int BLOCK_SIZE = 16; private static final int BLOCK_SIZE = 16;
private static final String GENERAL_OPTIONS_USAGE = private static final String GENERAL_OPTIONS_USAGE =
"[-keepResults] | [-logLevel L] | [-UGCacheRefreshCount G]"; "[-keepResults] | [-logLevel L] | [-UGCacheRefreshCount G] [-nonSuperUser]";
static Configuration config; static Configuration config;
static NameNode nameNode; static NameNode nameNode;
@ -175,6 +175,7 @@ abstract class OperationStatsBase {
protected long cumulativeTime = 0; // sum of times for each op protected long cumulativeTime = 0; // sum of times for each op
protected long elapsedTime = 0; // time from start to finish protected long elapsedTime = 0; // time from start to finish
protected boolean keepResults = false;// don't clean base directory on exit protected boolean keepResults = false;// don't clean base directory on exit
protected boolean nonSuperUser = false; // enter/exit safe mode
protected Level logLevel; // logging level, ERROR by default protected Level logLevel; // logging level, ERROR by default
protected int ugcRefreshCount = 0; // user group cache refresh count protected int ugcRefreshCount = 0; // user group cache refresh count
@ -285,14 +286,30 @@ private boolean isInProgress() {
} }
void cleanUp() throws IOException { void cleanUp() throws IOException {
clientProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, if (!nonSuperUser) {
false); try {
clientProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,
false);
} catch (Exception e){
LOG.error("Potentially insufficient permission: try running the tool" +
"with -nonSuperUser argument or login as super user");
throw e;
}
}
if(!keepResults) if(!keepResults)
clientProto.delete(getBaseDir(), true); clientProto.delete(getBaseDir(), true);
else { else {
clientProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER, if (!nonSuperUser) {
true); try {
clientProto.saveNamespace(0, 0); clientProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER,
true);
clientProto.saveNamespace(0, 0);
} catch (Exception e){
LOG.error("Potentially insufficient permission: try running the tool" +
" with -nonSuperUser argument or login as super user");
throw e;
}
}
} }
} }
public String getBaseDirName() { public String getBaseDirName() {
@ -358,6 +375,12 @@ protected boolean verifyOpArgument(List<String> args) {
args.remove(krIndex); args.remove(krIndex);
} }
int nonSuperUserIndex = args.indexOf("-nonSuperUser");
nonSuperUser = (nonSuperUserIndex >= 0);
if(nonSuperUser) {
args.remove(nonSuperUserIndex);
}
int llIndex = args.indexOf("-logLevel"); int llIndex = args.indexOf("-logLevel");
if(llIndex >= 0) { if(llIndex >= 0) {
if(args.size() <= llIndex + 1) if(args.size() <= llIndex + 1)
@ -501,8 +524,16 @@ String getExecutionArgument(int daemonId) {
@Override @Override
long executeOp(int daemonId, int inputIdx, String ignore) long executeOp(int daemonId, int inputIdx, String ignore)
throws IOException { throws IOException {
clientProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, if (!nonSuperUser) {
false); try{
clientProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,
false);
} catch (Exception e){
LOG.error("Potentially insufficient permission: try running the tool" +
" with -nonSuperUser argument or login as super user");
throw e;
}
}
long start = Time.now(); long start = Time.now();
clientProto.delete(getBaseDirName(), true); clientProto.delete(getBaseDirName(), true);
long end = Time.now(); long end = Time.now();
@ -579,8 +610,16 @@ void parseArguments(List<String> args) {
@Override @Override
void generateInputs(int[] opsPerThread) throws IOException { void generateInputs(int[] opsPerThread) throws IOException {
assert opsPerThread.length == numThreads : "Error opsPerThread.length"; assert opsPerThread.length == numThreads : "Error opsPerThread.length";
clientProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, if (!nonSuperUser) {
false); try{
clientProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,
false);
} catch (Exception e){
LOG.error("Potentially insufficient permission: try running the tool" +
"with -nonSuperUser argument or login as super user");
throw e;
}
}
// int generatedFileIdx = 0; // int generatedFileIdx = 0;
LOG.info("Generate " + numOpsRequired + " intputs for " + getOpName()); LOG.info("Generate " + numOpsRequired + " intputs for " + getOpName());
LOG.info("basedir: " + getBaseDir()); LOG.info("basedir: " + getBaseDir());
@ -695,8 +734,16 @@ void parseArguments(List<String> args) {
@Override @Override
void generateInputs(int[] opsPerThread) throws IOException { void generateInputs(int[] opsPerThread) throws IOException {
assert opsPerThread.length == numThreads : "Error opsPerThread.length"; assert opsPerThread.length == numThreads : "Error opsPerThread.length";
clientProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, if (!nonSuperUser) {
false); try {
clientProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,
false);
} catch (Exception e){
LOG.error("Potentially insufficient permission: try running the tool" +
" with -nonSuperUser argument or login as super user");
throw e;
}
}
LOG.info("Generate " + numOpsRequired + " inputs for " + getOpName()); LOG.info("Generate " + numOpsRequired + " inputs for " + getOpName());
dirPaths = new String[numThreads][]; dirPaths = new String[numThreads][];
try { try {
@ -796,8 +843,11 @@ void generateInputs(int[] opsPerThread) throws IOException {
String.valueOf(nameGenerator.getFilesPerDirectory()), String.valueOf(nameGenerator.getFilesPerDirectory()),
"-baseDirName", getBaseDirName(), "-baseDirName", getBaseDirName(),
"-close"}; "-close"};
CreateFileStats opCreate = new CreateFileStats(Arrays.asList(createArgs)); List<String> createArgsList = new ArrayList<String>(Arrays.asList(createArgs));
if (this.nonSuperUser){
createArgsList.add("-nonSuperUser");
}
CreateFileStats opCreate = new CreateFileStats(createArgsList);
if(!useExisting) { // create files if they were not created before if(!useExisting) { // create files if they were not created before
opCreate.benchmark(); opCreate.benchmark();
LOG.info("Created " + numOpsRequired + " files."); LOG.info("Created " + numOpsRequired + " files.");
@ -1240,8 +1290,16 @@ void generateInputs(int[] ignore) throws IOException {
FileNameGenerator nameGenerator; FileNameGenerator nameGenerator;
nameGenerator = new FileNameGenerator(getBaseDir(), 100); nameGenerator = new FileNameGenerator(getBaseDir(), 100);
String clientName = getClientName(007); String clientName = getClientName(007);
clientProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, if (!nonSuperUser) {
false); try {
clientProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,
false);
} catch (Exception e){
LOG.error("Potentially insufficient permission: try running the tool" +
" with -nonSuperUser argument or login as super user");
throw e;
}
}
for(int idx=0; idx < nrFiles; idx++) { for(int idx=0; idx < nrFiles; idx++) {
String fileName = nameGenerator.getNextFileName("ThroughputBench"); String fileName = nameGenerator.getNextFileName("ThroughputBench");
clientProto.create(fileName, FsPermission.getDefault(), clientName, clientProto.create(fileName, FsPermission.getDefault(), clientName,

View File

@ -100,7 +100,30 @@ public void testNNThroughputAgainstRemoteNN() throws Exception {
} }
} }
} }
/**
* This test runs {@link NNThroughputBenchmark} against a mini DFS cluster with
* nonSuperUser option (useful when testing any authorization framework e.g.
* Ranger since only super user e.g. hdfs can enter/exit safemode
* but any request from super user is not sent for authorization).
*/
@Test(timeout = 120000)
public void testNNThroughputAgainstRemoteNNNonSuperUser() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final Configuration benchConf = new HdfsConfiguration();
benchConf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16);
FileSystem.setDefaultUri(benchConf, cluster.getURI());
NNThroughputBenchmark.runBenchmark(benchConf, new String[]{"-op", "all", "-nonSuperUser"});
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/** /**
* This test runs {@link NNThroughputBenchmark} against a mini DFS cluster * This test runs {@link NNThroughputBenchmark} against a mini DFS cluster
* with explicit -fs option. * with explicit -fs option.