MAPREDUCE-4034. Unable to view task logs on history server with mapreduce.job.acl-view-job=* (Jason Lowe and Siddarth Seth via bobby)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1302980 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Joseph Evans 2012-03-20 16:29:42 +00:00
parent 12fca4cf33
commit 40a8293d36
4 changed files with 28 additions and 4 deletions

View File

@ -197,9 +197,12 @@ Release 0.23.2 - UNRELEASED
MAPREDUCE-3954. Added new envs to separate heap size for different daemons MAPREDUCE-3954. Added new envs to separate heap size for different daemons
started via bin scripts. (Robert Joseph Evans via vinodkv) started via bin scripts. (Robert Joseph Evans via vinodkv)
MAPREDUCE-4025. AM can crash if task attempt reports bogus progress value MAPREDUCE-4025. AM can crash if task attempt reports bogus progress value
(Jason Lowe via bobby) (Jason Lowe via bobby)
MAPREDUCE-4034. Unable to view task logs on history server with
mapreduce.job.acl-view-job=* (Jason Lowe and Siddarth Seth via bobby)
OPTIMIZATIONS OPTIMIZATIONS
MAPREDUCE-3901. Modified JobHistory records in YARN to lazily load job and MAPREDUCE-3901. Modified JobHistory records in YARN to lazily load job and

View File

@ -183,7 +183,8 @@ public RegisterApplicationMasterResponse registerApplicationMaster(
new RMAppAttemptRegistrationEvent(applicationAttemptId, request new RMAppAttemptRegistrationEvent(applicationAttemptId, request
.getHost(), request.getRpcPort(), request.getTrackingUrl())); .getHost(), request.getRpcPort(), request.getTrackingUrl()));
RMAuditLogger.logSuccess(this.rmContext.getRMApps().get(appID).getUser(), RMApp app = this.rmContext.getRMApps().get(appID);
RMAuditLogger.logSuccess(app.getUser(),
AuditConstants.REGISTER_AM, "ApplicationMasterService", appID, AuditConstants.REGISTER_AM, "ApplicationMasterService", appID,
applicationAttemptId); applicationAttemptId);
@ -194,6 +195,8 @@ public RegisterApplicationMasterResponse registerApplicationMaster(
.getMinimumResourceCapability()); .getMinimumResourceCapability());
response.setMaximumResourceCapability(rScheduler response.setMaximumResourceCapability(rScheduler
.getMaximumResourceCapability()); .getMaximumResourceCapability());
response.setApplicationACLs(app.getRMAppAttempt(applicationAttemptId)
.getSubmissionContext().getAMContainerSpec().getApplicationACLs());
return response; return response;
} }
} }

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.yarn.server.resourcemanager; package org.apache.hadoop.yarn.server.resourcemanager;
import java.util.Map;
import junit.framework.Assert; import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -26,6 +28,7 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
@ -93,6 +96,11 @@ public RMApp submitApp(int masterMemory) throws Exception {
// client // client
public RMApp submitApp(int masterMemory, String name, String user) throws Exception { public RMApp submitApp(int masterMemory, String name, String user) throws Exception {
return submitApp(masterMemory, name, user, null);
}
public RMApp submitApp(int masterMemory, String name, String user,
Map<ApplicationAccessType, String> acls) throws Exception {
ClientRMProtocol client = getClientRMService(); ClientRMProtocol client = getClientRMService();
GetNewApplicationResponse resp = client.getNewApplication(Records GetNewApplicationResponse resp = client.getNewApplication(Records
.newRecord(GetNewApplicationRequest.class)); .newRecord(GetNewApplicationRequest.class));
@ -110,6 +118,7 @@ public RMApp submitApp(int masterMemory, String name, String user) throws Except
Resource capability = Records.newRecord(Resource.class); Resource capability = Records.newRecord(Resource.class);
capability.setMemory(masterMemory); capability.setMemory(masterMemory);
clc.setResource(capability); clc.setResource(capability);
clc.setApplicationACLs(acls);
sub.setAMContainerSpec(clc); sub.setAMContainerSpec(clc);
req.setApplicationSubmissionContext(sub); req.setApplicationSubmissionContext(sub);

View File

@ -20,6 +20,7 @@
import java.io.IOException; import java.io.IOException;
import java.security.PrivilegedAction; import java.security.PrivilegedAction;
import java.util.HashMap;
import java.util.Map; import java.util.Map;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -36,10 +37,12 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse; import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException; import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
@ -121,7 +124,10 @@ public void testAuthorizedAccess() throws Exception {
MockNM nm1 = rm.registerNode("localhost:1234", 5120); MockNM nm1 = rm.registerNode("localhost:1234", 5120);
RMApp app = rm.submitApp(1024); Map<ApplicationAccessType, String> acls =
new HashMap<ApplicationAccessType, String>(2);
acls.put(ApplicationAccessType.VIEW_APP, "*");
RMApp app = rm.submitApp(1024, "appname", "appuser", acls);
nm1.nodeHeartbeat(true); nm1.nodeHeartbeat(true);
@ -164,7 +170,10 @@ public AMRMProtocol run() {
RegisterApplicationMasterRequest request = Records RegisterApplicationMasterRequest request = Records
.newRecord(RegisterApplicationMasterRequest.class); .newRecord(RegisterApplicationMasterRequest.class);
request.setApplicationAttemptId(applicationAttemptId); request.setApplicationAttemptId(applicationAttemptId);
client.registerApplicationMaster(request); RegisterApplicationMasterResponse response =
client.registerApplicationMaster(request);
Assert.assertEquals("Register response has bad ACLs", "*",
response.getApplicationACLs().get(ApplicationAccessType.VIEW_APP));
rm.stop(); rm.stop();
} }