diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 4f358bb9ab..2642abe05d 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -443,6 +443,9 @@ Release 2.0.5-beta - UNRELEASED MAPREDUCE-5261. Fix issues in TestRMContainerAllocator after YARN-617. (Omkar Vinit Joshi via vinodkv) + MAPREDUCE-5282. Updating MR App to use immutable ApplicationID after + YARN-716. (Siddharth Seth via vinodkv) + BREAKDOWN OF HADOOP-8562 SUBTASKS MAPREDUCE-4739. Some MapReduce tests fail to find winutils. diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java index 37667c6391..2eb5344ee9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java @@ -128,9 +128,7 @@ public class MRApp extends MRAppMaster { static ApplicationId applicationId; static { - applicationId = recordFactory.newRecordInstance(ApplicationId.class); - applicationId.setClusterTimestamp(0); - applicationId.setId(0); + applicationId = ApplicationId.newInstance(0, 0); } public MRApp(int maps, int reduces, boolean autoComplete, String testName, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java index 0e20d6f384..3539728425 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java @@ -789,9 +789,7 @@ class MyAppContext implements AppContext { private final Map allJobs; MyAppContext(int numberMaps, int numberReduces) { - myApplicationID = recordFactory.newRecordInstance(ApplicationId.class); - myApplicationID.setClusterTimestamp(clock.getTime()); - myApplicationID.setId(1); + myApplicationID = ApplicationId.newInstance(clock.getTime(), 1); myAppAttemptID = recordFactory .newRecordInstance(ApplicationAttemptId.class); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java index 73d1686e29..3dca3e5879 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java @@ -84,9 +84,8 @@ public void testDeletionofStaging() throws IOException { ApplicationAttemptId attemptId = recordFactory.newRecordInstance( ApplicationAttemptId.class); attemptId.setAttemptId(0); - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(System.currentTimeMillis()); - appId.setId(0); + ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), + 0); attemptId.setApplicationId(appId); JobId jobid = recordFactory.newRecordInstance(JobId.class); jobid.setAppId(appId); @@ -113,9 +112,8 @@ public void testNoDeletionofStagingOnReboot() throws IOException { ApplicationAttemptId attemptId = recordFactory.newRecordInstance( ApplicationAttemptId.class); attemptId.setAttemptId(0); - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(System.currentTimeMillis()); - appId.setId(0); + ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), + 0); attemptId.setApplicationId(appId); ContainerAllocator mockAlloc = mock(ContainerAllocator.class); Assert.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1); @@ -141,9 +139,8 @@ public void testDeletionofStagingOnReboot() throws IOException { ApplicationAttemptId attemptId = recordFactory.newRecordInstance( ApplicationAttemptId.class); attemptId.setAttemptId(1); - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(System.currentTimeMillis()); - appId.setId(0); + ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), + 0); attemptId.setApplicationId(appId); ContainerAllocator mockAlloc = mock(ContainerAllocator.class); MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc, @@ -169,9 +166,8 @@ public void testDeletionofStagingOnKill() throws IOException { ApplicationAttemptId attemptId = recordFactory.newRecordInstance( ApplicationAttemptId.class); attemptId.setAttemptId(0); - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(System.currentTimeMillis()); - appId.setId(0); + ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), + 0); attemptId.setApplicationId(appId); JobId jobid = recordFactory.newRecordInstance(JobId.class); jobid.setAppId(appId); @@ -197,9 +193,8 @@ public void testDeletionofStagingOnKillLastTry() throws IOException { ApplicationAttemptId attemptId = recordFactory.newRecordInstance( ApplicationAttemptId.class); attemptId.setAttemptId(1); - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(System.currentTimeMillis()); - appId.setId(0); + ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), + 0); attemptId.setApplicationId(appId); JobId jobid = recordFactory.newRecordInstance(JobId.class); jobid.setAppId(appId); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java index 9fd0fb8b1a..4f701b072c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java @@ -224,9 +224,7 @@ public void setup() { metrics = mock(MRAppMetrics.class); dataLocations = new String[1]; - appId = Records.newRecord(ApplicationId.class); - appId.setClusterTimestamp(System.currentTimeMillis()); - appId.setId(1); + appId = ApplicationId.newInstance(System.currentTimeMillis(), 1); jobId = Records.newRecord(JobId.class); jobId.setId(1); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java index 4fcb475573..da2e6b2dab 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java @@ -27,7 +27,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.webapp.Controller.RequestContext; import org.junit.Before; import org.junit.Test; @@ -41,7 +40,7 @@ public class TestAppController { public void setUp() { AppContext context = mock(AppContext.class); when(context.getApplicationID()).thenReturn( - Records.newRecord(ApplicationId.class)); + ApplicationId.newInstance(0, 0)); App app = new App(context); Configuration conf = new Configuration(); ctx = mock(RequestContext.class); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java index ff38ff3dd2..892eb87df5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java @@ -76,9 +76,8 @@ public static JobId toYarn(org.apache.hadoop.mapreduce.JobID id) { JobId jobId = recordFactory.newRecordInstance(JobId.class); jobId.setId(id.getId()); //currently there is 1-1 mapping between appid and jobid - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setId(id.getId()); - appId.setClusterTimestamp(toClusterTimeStamp(id.getJtIdentifier())); + ApplicationId appId = ApplicationId.newInstance( + toClusterTimeStamp(id.getJtIdentifier()), id.getId()); jobId.setAppId(appId); return jobId; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java index 960b168fcd..16a72a87af 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.mapreduce; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + import java.util.ArrayList; import java.util.List; @@ -28,18 +31,13 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskState; import org.apache.hadoop.mapreduce.v2.api.records.TaskType; import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; -import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.api.records.ApplicationReport; -import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationReportPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationResourceUsageReportPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.QueueInfoPBImpl; -import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl; +import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.QueueState; - -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.util.Records; import org.junit.Test; import org.mockito.Mockito; @@ -74,14 +72,16 @@ public void testEnums() throws Exception { public void testFromYarn() throws Exception { int appStartTime = 612354; YarnApplicationState state = YarnApplicationState.RUNNING; - ApplicationId applicationId = new ApplicationIdPBImpl(); - ApplicationReportPBImpl applicationReport = new ApplicationReportPBImpl(); + ApplicationId applicationId = ApplicationId.newInstance(0, 0); + ApplicationReport applicationReport = Records + .newRecord(ApplicationReport.class); applicationReport.setApplicationId(applicationId); applicationReport.setYarnApplicationState(state); applicationReport.setStartTime(appStartTime); applicationReport.setUser("TestTypeConverter-user"); - ApplicationResourceUsageReportPBImpl appUsageRpt = new ApplicationResourceUsageReportPBImpl(); - ResourcePBImpl r = new ResourcePBImpl(); + ApplicationResourceUsageReport appUsageRpt = Records + .newRecord(ApplicationResourceUsageReport.class); + Resource r = Records.newRecord(Resource.class); r.setMemory(2048); appUsageRpt.setNeededResources(r); appUsageRpt.setNumReservedContainers(1); @@ -107,8 +107,9 @@ public void testFromYarnApplicationReport() { when(mockReport.getUser()).thenReturn("dummy-user"); when(mockReport.getQueue()).thenReturn("dummy-queue"); String jobFile = "dummy-path/job.xml"; - ApplicationResourceUsageReportPBImpl appUsageRpt = new ApplicationResourceUsageReportPBImpl(); - ResourcePBImpl r = new ResourcePBImpl(); + ApplicationResourceUsageReport appUsageRpt = Records + .newRecord(ApplicationResourceUsageReport.class); + Resource r = Records.newRecord(Resource.class); r.setMemory(2048); appUsageRpt.setNeededResources(r); appUsageRpt.setNumReservedContainers(1); @@ -134,7 +135,8 @@ public void testFromYarnApplicationReport() { @Test public void testFromYarnQueueInfo() { - org.apache.hadoop.yarn.api.records.QueueInfo queueInfo = new QueueInfoPBImpl(); + org.apache.hadoop.yarn.api.records.QueueInfo queueInfo = Records + .newRecord(org.apache.hadoop.yarn.api.records.QueueInfo.class); queueInfo.setQueueState(org.apache.hadoop.yarn.api.records.QueueState.STOPPED); org.apache.hadoop.mapreduce.QueueInfo returned = TypeConverter.fromYarn(queueInfo, new Configuration()); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java index 160da13385..4128f10450 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java @@ -81,7 +81,7 @@ private static void delete(File dir) throws IOException { @Test (timeout = 120000) public void testJobIDtoString() { JobId jid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class); - jid.setAppId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class)); + jid.setAppId(ApplicationId.newInstance(0, 0)); assertEquals("job_0_0000", MRApps.toString(jid)); } @@ -103,7 +103,7 @@ public void testJobIDShort() { public void testTaskIDtoString() { TaskId tid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskId.class); tid.setJobId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class)); - tid.getJobId().setAppId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class)); + tid.getJobId().setAppId(ApplicationId.newInstance(0, 0)); tid.setTaskType(TaskType.MAP); TaskType type = tid.getTaskType(); System.err.println(type); @@ -145,7 +145,7 @@ public void testTaskAttemptIDtoString() { taid.setTaskId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskId.class)); taid.getTaskId().setTaskType(TaskType.MAP); taid.getTaskId().setJobId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class)); - taid.getTaskId().getJobId().setAppId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class)); + taid.getTaskId().getJobId().setAppId(ApplicationId.newInstance(0, 0)); assertEquals("attempt_0_0000_m_000000_0", MRApps.toString(taid)); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestShufflePlugin.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestShufflePlugin.java index e172be54e8..ecf5b8f3af 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestShufflePlugin.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestShufflePlugin.java @@ -21,7 +21,6 @@ import org.junit.Test; import static org.junit.Assert.*; import static org.mockito.Mockito.*; -import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.fs.LocalDirAllocator; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.mapred.Task.CombineOutputCollector; @@ -30,7 +29,6 @@ import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.mapreduce.task.reduce.Shuffle; -import org.apache.hadoop.mapred.Counters; import org.apache.hadoop.mapred.Counters.Counter; import org.apache.hadoop.mapred.MapOutputFile; import org.apache.hadoop.mapred.JobConf; @@ -40,7 +38,6 @@ import org.apache.hadoop.mapred.TaskUmbilicalProtocol; import org.apache.hadoop.mapred.ShuffleConsumerPlugin; import org.apache.hadoop.mapred.RawKeyValueIterator; -import org.apache.hadoop.mapred.Reducer; /** * A JUnit for testing availability and accessibility of shuffle related API. @@ -181,10 +178,6 @@ public void testConsumerApi() { * AuxiliaryService(s) which are "Shuffle-Providers" (ShuffleHandler and 3rd party plugins) */ public void testProviderApi() { - - ApplicationId mockApplicationId = mock(ApplicationId.class); - mockApplicationId.setClusterTimestamp(new Long(10)); - mockApplicationId.setId(mock(JobID.class).getId()); LocalDirAllocator mockLocalDirAllocator = mock(LocalDirAllocator.class); JobConf mockJobConf = mock(JobConf.class); try { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java index 2a8affb924..0f9cf267b5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java @@ -74,8 +74,7 @@ public class JobHistory extends AbstractService implements HistoryContext { public void init(Configuration conf) throws YarnException { LOG.info("JobHistory Init"); this.conf = conf; - this.appID = RecordFactoryProvider.getRecordFactory(conf) - .newRecordInstance(ApplicationId.class); + this.appID = ApplicationId.newInstance(0, 0); this.appAttemptID = RecordFactoryProvider.getRecordFactory(conf) .newRecordInstance(ApplicationAttemptId.class); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java index ddf167ceb1..c2c17aeb3c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java @@ -129,7 +129,7 @@ private ApplicationReport getApplicationReport( ApplicationResourceUsageReport appResources = Mockito .mock(ApplicationResourceUsageReport.class); Mockito.when(appReport.getApplicationId()).thenReturn( - Records.newRecord(ApplicationId.class)); + ApplicationId.newInstance(0, 0)); Mockito.when(appResources.getNeededResources()).thenReturn( Records.newRecord(Resource.class)); Mockito.when(appResources.getReservedResources()).thenReturn( diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java index 601268a7e7..4063f38e81 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java @@ -140,9 +140,7 @@ public ApplicationSubmissionContext answer(InvocationOnMock invocation) ).when(yarnRunner).createApplicationSubmissionContext(any(Configuration.class), any(String.class), any(Credentials.class)); - appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(System.currentTimeMillis()); - appId.setId(1); + appId = ApplicationId.newInstance(System.currentTimeMillis(), 1); jobId = TypeConverter.fromYarn(appId); if (testWorkDir.exists()) { FileContext.getLocalFSFileContext().delete(new Path(testWorkDir.toString()), true); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java index 5715bd146d..af358e463f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java @@ -79,7 +79,6 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; import org.apache.hadoop.yarn.service.AbstractService; import org.apache.hadoop.yarn.util.ConverterUtils; -import org.apache.hadoop.yarn.util.Records; import org.jboss.netty.bootstrap.ServerBootstrap; import org.jboss.netty.buffer.ChannelBuffers; import org.jboss.netty.channel.Channel; @@ -549,9 +548,8 @@ protected ChannelFuture sendMapOutput(ChannelHandlerContext ctx, Channel ch, // $x/$user/appcache/$appId/output/$mapId // TODO: Once Shuffle is out of NM, this can use MR APIs to convert between App and Job JobID jobID = JobID.forName(jobId); - ApplicationId appID = Records.newRecord(ApplicationId.class); - appID.setClusterTimestamp(Long.parseLong(jobID.getJtIdentifier())); - appID.setId(jobID.getId()); + ApplicationId appID = ApplicationId.newInstance( + Long.parseLong(jobID.getJtIdentifier()), jobID.getId()); final String base = ContainerLocalizer.USERCACHE + "/" + user + "/" + ContainerLocalizer.APPCACHE + "/" diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index b5560349e5..36f109cc37 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -72,6 +72,8 @@ Release 2.0.5-beta - UNRELEASED YARN-571. Remove user from ContainerLaunchContext. (Omkar Vinit Joshi via vinodkv) + YARN-716. Making ApplicationID immutable. (Siddharth Seth via vinodkv) + NEW FEATURES YARN-482. FS: Extend SchedulingMode to intermediate queues. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationId.java index 097a5334c5..243d06045b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationId.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationId.java @@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; -import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; /** *

ApplicationId represents the globally unique @@ -40,6 +40,14 @@ public abstract class ApplicationId implements Comparable { public static final String appIdStrPrefix = "application_"; + public static ApplicationId newInstance(long clusterTimestamp, int id) { + ApplicationId appId = Records.newRecord(ApplicationId.class); + appId.setClusterTimestamp(clusterTimestamp); + appId.setId(id); + appId.build(); + return appId; + } + /** * Get the short integer identifier of the ApplicationId * which is unique for all applications started by a particular instance @@ -51,8 +59,7 @@ public abstract class ApplicationId implements Comparable { public abstract int getId(); @Private - @Unstable - public abstract void setId(int id); + protected abstract void setId(int id); /** * Get the start time of the ResourceManager which is @@ -62,10 +69,9 @@ public abstract class ApplicationId implements Comparable { public abstract long getClusterTimestamp(); @Private - @Unstable - public abstract void setClusterTimestamp(long clusterTimestamp); + protected abstract void setClusterTimestamp(long clusterTimestamp); - + protected abstract void build(); static final ThreadLocal appIdFormat = new ThreadLocal() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationIdPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationIdPBImpl.java index ad5c778b4e..031c194d6a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationIdPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationIdPBImpl.java @@ -21,58 +21,49 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProtoOrBuilder; + +import com.google.common.base.Preconditions; - public class ApplicationIdPBImpl extends ApplicationId { - ApplicationIdProto proto = ApplicationIdProto.getDefaultInstance(); + ApplicationIdProto proto = null; ApplicationIdProto.Builder builder = null; - boolean viaProto = false; - + public ApplicationIdPBImpl() { builder = ApplicationIdProto.newBuilder(); } public ApplicationIdPBImpl(ApplicationIdProto proto) { this.proto = proto; - viaProto = true; } - public synchronized ApplicationIdProto getProto() { - proto = viaProto ? proto : builder.build(); - viaProto = true; + public ApplicationIdProto getProto() { return proto; } - private synchronized void maybeInitBuilder() { - if (viaProto || builder == null) { - builder = ApplicationIdProto.newBuilder(proto); - } - viaProto = false; - } - - @Override - public synchronized int getId() { - ApplicationIdProtoOrBuilder p = viaProto ? proto : builder; - return (p.getId()); + public int getId() { + Preconditions.checkNotNull(proto); + return proto.getId(); } @Override - public synchronized void setId(int id) { - maybeInitBuilder(); - builder.setId((id)); + protected void setId(int id) { + builder.setId(id); } @Override - public synchronized long getClusterTimestamp() { - ApplicationIdProtoOrBuilder p = viaProto ? proto : builder; - return (p.getClusterTimestamp()); + public long getClusterTimestamp() { + Preconditions.checkNotNull(proto); + return proto.getClusterTimestamp(); } @Override - public synchronized void setClusterTimestamp(long clusterTimestamp) { - maybeInitBuilder(); + protected void setClusterTimestamp(long clusterTimestamp) { builder.setClusterTimestamp((clusterTimestamp)); } + + @Override + protected void build() { + proto = builder.build(); + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestYarnClient.java index c3232d02c7..2700039648 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestYarnClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestYarnClient.java @@ -43,7 +43,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; -import org.apache.hadoop.yarn.util.Records; import org.apache.log4j.Level; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; @@ -90,9 +89,8 @@ public void testSubmitApplication() { for (int i = 0; i < exitStates.length; ++i) { ApplicationSubmissionContext context = mock(ApplicationSubmissionContext.class); - ApplicationId applicationId = Records.newRecord(ApplicationId.class); - applicationId.setClusterTimestamp(System.currentTimeMillis()); - applicationId.setId(i); + ApplicationId applicationId = ApplicationId.newInstance( + System.currentTimeMillis(), i); when(context.getApplicationId()).thenReturn(applicationId); ((MockYarnClient) client).setYarnApplicationState(exitStates[i]); try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java index b3baff7773..01fc38cbb4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java @@ -24,7 +24,6 @@ import org.apache.hadoop.util.StringInterner; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import static org.apache.hadoop.yarn.util.StringHelper.*; @@ -45,10 +44,8 @@ public static ApplicationId toAppID(String prefix, String s, Iterator it throwParseException(sjoin(prefix, ID), s); } shouldHaveNext(prefix, s, it); - ApplicationId appId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(Long.parseLong(it.next())); - shouldHaveNext(prefix, s, it); - appId.setId(Integer.parseInt(it.next())); + ApplicationId appId = ApplicationId.newInstance(Long.parseLong(it.next()), + Integer.parseInt(it.next())); return appId; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java index e7d9c5b10d..1e65767709 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java @@ -132,28 +132,17 @@ public static LocalResource newLocalResource(URI uri, public static ApplicationId newApplicationId(RecordFactory recordFactory, long clustertimestamp, CharSequence id) { - ApplicationId applicationId = - recordFactory.newRecordInstance(ApplicationId.class); - applicationId.setId(Integer.valueOf(id.toString())); - applicationId.setClusterTimestamp(clustertimestamp); - return applicationId; + return ApplicationId.newInstance(clustertimestamp, + Integer.valueOf(id.toString())); } public static ApplicationId newApplicationId(RecordFactory recordFactory, long clusterTimeStamp, int id) { - ApplicationId applicationId = - recordFactory.newRecordInstance(ApplicationId.class); - applicationId.setId(id); - applicationId.setClusterTimestamp(clusterTimeStamp); - return applicationId; + return ApplicationId.newInstance(clusterTimeStamp, id); } public static ApplicationId newApplicationId(long clusterTimeStamp, int id) { - ApplicationId applicationId = - recordFactory.newRecordInstance(ApplicationId.class); - applicationId.setId(id); - applicationId.setClusterTimestamp(clusterTimeStamp); - return applicationId; + return ApplicationId.newInstance(clusterTimeStamp, id); } public static ApplicationAttemptId newApplicationAttemptId( @@ -166,11 +155,8 @@ public static ApplicationAttemptId newApplicationAttemptId( } public static ApplicationId convert(long clustertimestamp, CharSequence id) { - ApplicationId applicationId = - recordFactory.newRecordInstance(ApplicationId.class); - applicationId.setId(Integer.valueOf(id.toString())); - applicationId.setClusterTimestamp(clustertimestamp); - return applicationId; + return ApplicationId.newInstance(clustertimestamp, + Integer.valueOf(id.toString())); } public static ContainerId newContainerId(ApplicationAttemptId appAttemptId, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java index 21fe2d9874..9a367087dc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java @@ -114,18 +114,15 @@ public static ApplicationId toApplicationId(RecordFactory recordFactory, private static ApplicationId toApplicationId(RecordFactory recordFactory, Iterator it) { - ApplicationId appId = - recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(Long.parseLong(it.next())); - appId.setId(Integer.parseInt(it.next())); + ApplicationId appId = ApplicationId.newInstance(Long.parseLong(it.next()), + Integer.parseInt(it.next())); return appId; } private static ApplicationAttemptId toApplicationAttemptId( Iterator it) throws NumberFormatException { - ApplicationId appId = Records.newRecord(ApplicationId.class); - appId.setClusterTimestamp(Long.parseLong(it.next())); - appId.setId(Integer.parseInt(it.next())); + ApplicationId appId = ApplicationId.newInstance(Long.parseLong(it.next()), + Integer.parseInt(it.next())); ApplicationAttemptId appAttemptId = Records .newRecord(ApplicationAttemptId.class); appAttemptId.setApplicationId(appId); @@ -135,9 +132,8 @@ private static ApplicationAttemptId toApplicationAttemptId( private static ApplicationId toApplicationId( Iterator it) throws NumberFormatException { - ApplicationId appId = Records.newRecord(ApplicationId.class); - appId.setClusterTimestamp(Long.parseLong(it.next())); - appId.setId(Integer.parseInt(it.next())); + ApplicationId appId = ApplicationId.newInstance(Long.parseLong(it.next()), + Integer.parseInt(it.next())); return appId; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java index cc67ff7778..ecfac4d3f3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java @@ -62,10 +62,7 @@ public static String newQueue() { } public static ApplicationId newAppID(int i) { - ApplicationId id = Records.newRecord(ApplicationId.class); - id.setClusterTimestamp(TS); - id.setId(i); - return id; + return ApplicationId.newInstance(TS, i); } public static ApplicationAttemptId newAppAttemptID(ApplicationId appId, int i) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java index d3e9bce1d5..5f1dafb31c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java @@ -91,12 +91,9 @@ private void testRPCTimeout(String rpcClass) throws Exception { .newRecordInstance(ContainerLaunchContext.class); ContainerId containerId = recordFactory .newRecordInstance(ContainerId.class); - ApplicationId applicationId = recordFactory - .newRecordInstance(ApplicationId.class); + ApplicationId applicationId = ApplicationId.newInstance(0, 0); ApplicationAttemptId applicationAttemptId = recordFactory .newRecordInstance(ApplicationAttemptId.class); - applicationId.setClusterTimestamp(0); - applicationId.setId(0); applicationAttemptId.setApplicationId(applicationId); applicationAttemptId.setAttemptId(0); containerId.setApplicationAttemptId(applicationAttemptId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java index 61ca6ea03b..6308658b66 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java @@ -113,12 +113,9 @@ private void test(String rpcClass) throws Exception { recordFactory.newRecordInstance(ContainerLaunchContext.class); ContainerId containerId = recordFactory.newRecordInstance(ContainerId.class); - ApplicationId applicationId = - recordFactory.newRecordInstance(ApplicationId.class); + ApplicationId applicationId = ApplicationId.newInstance(0, 0); ApplicationAttemptId applicationAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); - applicationId.setClusterTimestamp(0); - applicationId.setId(0); applicationAttemptId.setApplicationId(applicationId); applicationAttemptId.setAttemptId(0); containerId.setApplicationAttemptId(applicationAttemptId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java index 80e3c215e8..928aec7bf8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java @@ -126,10 +126,7 @@ public long getRMIdentifier() { ContainerLaunchContext launchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); ContainerId cID = recordFactory.newRecordInstance(ContainerId.class); - ApplicationId applicationId = - recordFactory.newRecordInstance(ApplicationId.class); - applicationId.setClusterTimestamp(0); - applicationId.setId(0); + ApplicationId applicationId = ApplicationId.newInstance(0, 0); ApplicationAttemptId applicationAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); applicationAttemptId.setApplicationId(applicationId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java index ad2793e93a..ae1909766c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java @@ -249,9 +249,7 @@ private void createFiles(String dir, String subDir, int numOfFiles) { } private ContainerId createContainerId() { - ApplicationId appId = Records.newRecord(ApplicationId.class); - appId.setClusterTimestamp(0); - appId.setId(0); + ApplicationId appId = ApplicationId.newInstance(0, 0); ApplicationAttemptId appAttemptId = Records.newRecord(ApplicationAttemptId.class); appAttemptId.setApplicationId(appId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java index d5fe2f22f1..7864b364bb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java @@ -220,9 +220,8 @@ public ContainerManager run() { } public static ContainerId createContainerId() { - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(0); - appId.setId(0); + ApplicationId appId = ApplicationId.newInstance(0, 0); + ApplicationAttemptId appAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); appAttemptId.setApplicationId(appId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index 6c2f8c9731..7e1d2b685b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -151,8 +151,6 @@ public RegisterNodeManagerResponse registerNodeManager( return response; } - ApplicationId applicationID = recordFactory - .newRecordInstance(ApplicationId.class); ApplicationAttemptId appAttemptID = recordFactory .newRecordInstance(ApplicationAttemptId.class); ContainerId firstContainerID = recordFactory @@ -191,12 +189,15 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) getAppToContainerStatusMap(nodeStatus.getContainersStatuses()); org.apache.hadoop.yarn.api.records.Container mockContainer = mock(org.apache.hadoop.yarn.api.records.Container.class); + + ApplicationId appId1 = ApplicationId.newInstance(0, 1); + ApplicationId appId2 = ApplicationId.newInstance(0, 2); + if (heartBeatID == 1) { Assert.assertEquals(0, nodeStatus.getContainersStatuses().size()); // Give a container to the NM. - applicationID.setId(heartBeatID); - appAttemptID.setApplicationId(applicationID); + appAttemptID.setApplicationId(appId1); firstContainerID.setApplicationAttemptId(appAttemptID); firstContainerID.setId(heartBeatID); ContainerLaunchContext launchContext = recordFactory @@ -213,7 +214,7 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) Assert.assertEquals("Number of applications should only be one!", 1, nodeStatus.getContainersStatuses().size()); Assert.assertEquals("Number of container for the app should be one!", - 1, appToContainers.get(applicationID).size()); + 1, appToContainers.get(appId1).size()); // Checks on the NM end ConcurrentMap activeContainers = @@ -221,8 +222,7 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) Assert.assertEquals(1, activeContainers.size()); // Give another container to the NM. - applicationID.setId(heartBeatID); - appAttemptID.setApplicationId(applicationID); + appAttemptID.setApplicationId(appId2); secondContainerID.setApplicationAttemptId(appAttemptID); secondContainerID.setId(heartBeatID); ContainerLaunchContext launchContext = recordFactory @@ -239,7 +239,7 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) Assert.assertEquals("Number of applications should only be one!", 1, appToContainers.size()); Assert.assertEquals("Number of container for the app should be two!", - 2, appToContainers.get(applicationID).size()); + 2, appToContainers.get(appId2).size()); // Checks on the NM end ConcurrentMap activeContainers = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java index 46c9faa24b..38ced35a26 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java @@ -18,8 +18,12 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager; -import org.junit.Test; -import static org.junit.Assert.*; +import static org.apache.hadoop.yarn.service.Service.STATE.INITED; +import static org.apache.hadoop.yarn.service.Service.STATE.STARTED; +import static org.apache.hadoop.yarn.service.Service.STATE.STOPPED; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -30,17 +34,10 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServices; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEvent; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEventType; import org.apache.hadoop.yarn.service.AbstractService; import org.apache.hadoop.yarn.service.Service; - - -import static org.apache.hadoop.yarn.service.Service.STATE.*; +import org.junit.Test; public class TestAuxServices { private static final Log LOG = LogFactory.getLog(TestAuxServices.class); @@ -123,18 +120,17 @@ public void testAuxEventDispatch() { aux.init(conf); aux.start(); - ApplicationId appId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class); - appId.setId(65); + ApplicationId appId1 = ApplicationId.newInstance(0, 65); ByteBuffer buf = ByteBuffer.allocate(6); buf.putChar('A'); buf.putInt(65); buf.flip(); AuxServicesEvent event = new AuxServicesEvent( - AuxServicesEventType.APPLICATION_INIT, "user0", appId, "Asrv", buf); + AuxServicesEventType.APPLICATION_INIT, "user0", appId1, "Asrv", buf); aux.handle(event); - appId.setId(66); + ApplicationId appId2 = ApplicationId.newInstance(0, 66); event = new AuxServicesEvent( - AuxServicesEventType.APPLICATION_STOP, "user0", appId, "Bsrv", null); + AuxServicesEventType.APPLICATION_STOP, "user0", appId2, "Bsrv", null); // verify all services got the stop event aux.handle(event); Collection servs = aux.getServices(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java index 858c44b4e4..982ba6fcc5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java @@ -78,9 +78,7 @@ public TestContainerManager() throws UnsupportedFileSystemException { } private ContainerId createContainerId() { - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(0); - appId.setId(0); + ApplicationId appId = ApplicationId.newInstance(0, 0); ApplicationAttemptId appAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); appAttemptId.setApplicationId(appId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java index a5656a5265..b0a08cfee7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java @@ -165,9 +165,7 @@ public void testContainerEnvVariables() throws Exception { Container mockContainer = mock(Container.class); // ////// Construct the Container-id - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(0); - appId.setId(0); + ApplicationId appId = ApplicationId.newInstance(0, 0); ApplicationAttemptId appAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); appAttemptId.setApplicationId(appId); @@ -339,9 +337,7 @@ public void testDelayedKill() throws Exception { Container mockContainer = mock(Container.class); // ////// Construct the Container-id - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(1); - appId.setId(1); + ApplicationId appId = ApplicationId.newInstance(1, 1); ApplicationAttemptId appAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); appAttemptId.setApplicationId(appId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java index 32e2e33be6..2a09bb64e4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java @@ -709,10 +709,7 @@ public void testLogAggregationForRealContainerLaunch() throws IOException, recordFactory.newRecordInstance(ContainerLaunchContext.class); Container mockContainer = mock(Container.class); // ////// Construct the Container-id - ApplicationId appId = - recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(0); - appId.setId(0); + ApplicationId appId = ApplicationId.newInstance(0, 0); ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1); ContainerId cId = BuilderUtils.newContainerId(appAttemptId, 0); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java index 06d45aa4b1..ba421b7648 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java @@ -204,10 +204,7 @@ public void testContainerKillOnMemoryOverflow() throws IOException, recordFactory.newRecordInstance(ContainerLaunchContext.class); Container mockContainer = mock(Container.class); // ////// Construct the Container-id - ApplicationId appId = - recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(0); - appId.setId(0); + ApplicationId appId = ApplicationId.newInstance(0, 0); ApplicationAttemptId appAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); appAttemptId.setApplicationId(appId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java index 93e1125ec6..5588290eb1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java @@ -157,8 +157,7 @@ public void testGetApplicationReport() throws YarnRemoteException { RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); GetApplicationReportRequest request = recordFactory .newRecordInstance(GetApplicationReportRequest.class); - request.setApplicationId(recordFactory - .newRecordInstance(ApplicationId.class)); + request.setApplicationId(ApplicationId.newInstance(0, 0)); GetApplicationReportResponse applicationReport = rmService .getApplicationReport(request); Assert.assertNull("It should return null as application report for absent application.", @@ -436,11 +435,7 @@ private ConcurrentHashMap getRMApps( } private ApplicationId getApplicationId(int id) { - ApplicationId applicationId = recordFactory - .newRecordInstance(ApplicationId.class); - applicationId.setClusterTimestamp(123456); - applicationId.setId(id); - return applicationId; + return ApplicationId.newInstance(123456, id); } private RMAppImpl getRMApp(RMContext rmContext, YarnScheduler yarnScheduler, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index 1bb6a0e6d0..6e37df49b3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.when; import java.io.IOException; import java.util.Comparator; @@ -30,6 +31,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.net.NetworkTopology; +import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; @@ -51,13 +53,10 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; -import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; -import static org.mockito.Mockito.*; public class TestCapacityScheduler { @@ -468,15 +467,9 @@ public void testApplicationComparator() { CapacityScheduler cs = new CapacityScheduler(); Comparator appComparator= cs.getApplicationComparator(); - ApplicationId id1 = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class); - id1.setClusterTimestamp(1); - id1.setId(1); - ApplicationId id2 = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class); - id2.setClusterTimestamp(1); - id2.setId(2); - ApplicationId id3 = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class); - id3.setClusterTimestamp(2); - id3.setId(1); + ApplicationId id1 = ApplicationId.newInstance(1, 1); + ApplicationId id2 = ApplicationId.newInstance(1, 2); + ApplicationId id3 = ApplicationId.newInstance(2, 1); //same clusterId FiCaSchedulerApp app1 = Mockito.mock(FiCaSchedulerApp.class); when(app1.getApplicationId()).thenReturn(id1); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerApp.java index 62a1b9b978..3b545193a5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerApp.java @@ -36,8 +36,7 @@ public class TestFSSchedulerApp { private ApplicationAttemptId createAppAttemptId(int appId, int attemptId) { ApplicationAttemptId attId = recordFactory.newRecordInstance(ApplicationAttemptId.class); - ApplicationId appIdImpl = recordFactory.newRecordInstance(ApplicationId.class); - appIdImpl.setId(appId); + ApplicationId appIdImpl = ApplicationId.newInstance(0, appId); attId.setAttemptId(attemptId); attId.setApplicationId(appIdImpl); return attId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index 7eaba4183b..b5074f24a7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -143,8 +143,7 @@ private Configuration createConfiguration() { private ApplicationAttemptId createAppAttemptId(int appId, int attemptId) { ApplicationAttemptId attId = recordFactory.newRecordInstance(ApplicationAttemptId.class); - ApplicationId appIdImpl = recordFactory.newRecordInstance(ApplicationId.class); - appIdImpl.setId(appId); + ApplicationId appIdImpl = ApplicationId.newInstance(0, appId); attId.setAttemptId(attemptId); attId.setApplicationId(appIdImpl); return attId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java index 4ae3858b17..0420780ade 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java @@ -97,9 +97,7 @@ public void tearDown() throws Exception { private ApplicationAttemptId createAppAttemptId(int appId, int attemptId) { ApplicationAttemptId attId = recordFactory .newRecordInstance(ApplicationAttemptId.class); - ApplicationId appIdImpl = recordFactory - .newRecordInstance(ApplicationId.class); - appIdImpl.setId(appId); + ApplicationId appIdImpl = ApplicationId.newInstance(0, appId); attId.setAttemptId(attemptId); attId.setApplicationId(appIdImpl); return attId;