From 405920d5797d742d47c2a90313a1879fc2b4d7fb Mon Sep 17 00:00:00 2001 From: fanshilun Date: Wed, 5 Feb 2025 13:46:06 +0800 Subject: [PATCH] MAPREDUCE-7418. Fix CheckStyle & Junit Test. --- .../mapred/TestLocalContainerLauncher.java | 41 +- .../TestTaskAttemptFinishingMonitor.java | 4 +- .../mapred/TestTaskAttemptListenerImpl.java | 26 +- .../apache/hadoop/mapred/TestYarnChild.java | 7 +- .../TestJobHistoryEventHandler.java | 133 ++-- .../mapreduce/jobhistory/TestJobSummary.java | 9 +- .../apache/hadoop/mapreduce/v2/app/MRApp.java | 50 +- .../mapreduce/v2/app/MRAppBenchmark.java | 63 +- .../hadoop/mapreduce/v2/app/TestAMInfos.java | 12 +- .../app/TestCheckpointPreemptionPolicy.java | 30 +- .../hadoop/mapreduce/v2/app/TestFail.java | 35 +- .../mapreduce/v2/app/TestFetchFailure.java | 167 +++-- .../mapreduce/v2/app/TestJobEndNotifier.java | 87 ++- .../hadoop/mapreduce/v2/app/TestKill.java | 77 +-- .../v2/app/TestKillAMPreemptionPolicy.java | 4 +- .../hadoop/mapreduce/v2/app/TestMRApp.java | 42 +- .../app/TestMRAppComponentDependencies.java | 8 +- .../mapreduce/v2/app/TestMRAppMaster.java | 42 +- .../mapreduce/v2/app/TestMRClientService.java | 81 ++- .../hadoop/mapreduce/v2/app/TestRecovery.java | 175 +++-- .../mapreduce/v2/app/TestStagingCleanup.java | 27 +- .../v2/app/TestTaskHeartbeatHandler.java | 12 +- .../app/commit/TestCommitterEventHandler.java | 26 +- .../v2/app/job/impl/TestJobImpl.java | 149 +++-- .../app/job/impl/TestMapReduceChildJVM.java | 74 +-- .../v2/app/job/impl/TestShuffleProvider.java | 14 +- .../v2/app/job/impl/TestTaskAttempt.java | 312 +++++---- .../impl/TestTaskAttemptContainerRequest.java | 17 +- .../v2/app/job/impl/TestTaskImpl.java | 32 +- .../app/launcher/TestContainerLauncher.java | 62 +- .../launcher/TestContainerLauncherImpl.java | 23 +- .../local/TestLocalContainerAllocator.java | 60 +- .../v2/app/metrics/TestMRAppMetrics.java | 3 +- .../v2/app/rm/TestRMContainerAllocator.java | 605 +++++++++--------- .../app/rm/TestResourceCalculatorUtils.java | 23 +- .../v2/app/speculate/TestDataStatistics.java | 45 +- .../TestSimpleExponentialForecast.java | 18 +- .../webapp/EnvironmentVariablesExtension.java | 36 ++ .../mapreduce/v2/app/webapp/TestAMWebApp.java | 48 +- 39 files changed, 1369 insertions(+), 1310 deletions(-) create mode 100644 hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/EnvironmentVariablesExtension.java diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestLocalContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestLocalContainerLauncher.java index 40b3fb0edd496..42d6642d22485 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestLocalContainerLauncher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestLocalContainerLauncher.java @@ -54,14 +54,16 @@ import org.apache.hadoop.yarn.event.Event; import org.apache.hadoop.yarn.event.EventHandler; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.junit.jupiter.api.Assertions.assertNotEquals; + public class TestLocalContainerLauncher { private static final Logger LOG = LoggerFactory.getLogger(TestLocalContainerLauncher.class); @@ -104,11 +106,15 @@ public void testKillJob() throws Exception { AppContext context = mock(AppContext.class); // a simple event handler solely to detect the container cleaned event final CountDownLatch isDone = new CountDownLatch(1); - EventHandler handler = event -> { - LOG.info("handling event {} with type {}.", event.getClass(), event.getType()); - if (event instanceof TaskAttemptEvent) { - if (event.getType() == TaskAttemptEventType.TA_CONTAINER_CLEANED) { - isDone.countDown(); + EventHandler handler = new EventHandler() { + @Override + public void handle(Event event) { + LOG.info("handling event " + event.getClass() + + " with type " + event.getType()); + if (event instanceof TaskAttemptEvent) { + if (event.getType() == TaskAttemptEventType.TA_CONTAINER_CLEANED) { + isDone.countDown(); + } } } }; @@ -129,7 +135,7 @@ public void testKillJob() throws Exception { Job job = mock(Job.class); when(job.getTotalMaps()).thenReturn(1); when(job.getTotalReduces()).thenReturn(0); - Map jobs = new HashMap<>(); + Map jobs = new HashMap(); jobs.put(jobId, job); // app context returns the one and only job when(context.getAllJobs()).thenReturn(jobs); @@ -146,11 +152,14 @@ public void testKillJob() throws Exception { TaskAttemptID taskID = TypeConverter.fromYarn(taId); when(mapTask.getTaskID()).thenReturn(taskID); when(mapTask.getJobID()).thenReturn(taskID.getJobID()); - doAnswer((Answer) invocation -> { - // sleep for a long time - LOG.info("sleeping for 5 minutes..."); - Thread.sleep(5 * 60 * 1000); - return null; + doAnswer(new Answer() { + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + // sleep for a long time + LOG.info("sleeping for 5 minutes..."); + Thread.sleep(5*60*1000); + return null; + } }).when(mapTask).run(isA(JobConf.class), isA(TaskUmbilicalProtocol.class)); // pump in a task attempt launch event @@ -162,7 +171,7 @@ public void testKillJob() throws Exception { // now pump in a container clean-up event ContainerLauncherEvent cleanupEvent = new ContainerLauncherEvent(taId, null, null, null, - ContainerLauncher.EventType.CONTAINER_REMOTE_CLEANUP); + ContainerLauncher.EventType.CONTAINER_REMOTE_CLEANUP); launcher.handle(cleanupEvent); // wait for the event to fire: this should be received promptly @@ -188,11 +197,11 @@ public void testRenameMapOutputForReduce() throws Exception { // make sure both dirs are distinct // - conf.set(MRConfig.LOCAL_DIR, localDirs[0]); + conf.set(MRConfig.LOCAL_DIR, localDirs[0].toString()); final Path mapOut = mrOutputFiles.getOutputFileForWrite(1); - conf.set(MRConfig.LOCAL_DIR, localDirs[1]); + conf.set(MRConfig.LOCAL_DIR, localDirs[1].toString()); final Path mapOutIdx = mrOutputFiles.getOutputIndexFileForWrite(1); - Assertions.assertNotEquals( + assertNotEquals( mapOut.getParent(), mapOutIdx.getParent(), "Paths must be different!"); // make both dirs part of LOCAL_DIR diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptFinishingMonitor.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptFinishingMonitor.java index cffe396f6c524..7389aebbd30ec 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptFinishingMonitor.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptFinishingMonitor.java @@ -45,7 +45,7 @@ public class TestTaskAttemptFinishingMonitor { @Test - public void testFinishingAttemptTimeout() + public void testFinshingAttemptTimeout() throws IOException, InterruptedException { SystemClock clock = SystemClock.getInstance(); Configuration conf = new Configuration(); @@ -103,6 +103,6 @@ public void handle(Event event) { } } } - } + }; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java index 500f9c5577ba5..2b7b9c88b85dd 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java @@ -23,6 +23,7 @@ import java.util.List; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; @@ -187,7 +188,7 @@ public void testGetTask() throws IOException { startListener(false); // Verify ask before registration. - //The JVM ID has not been registered yet, so we should kill it. + //The JVM ID has not been registered yet so we should kill it. JvmContext context = new JvmContext(); context.jvmId = id; @@ -221,7 +222,7 @@ public void testGetTask() throws IOException { listener.unregister(attemptId, wid); - // Verify after un-registration. + // Verify after unregistration. result = listener.getTask(context); assertNotNull(result); assertTrue(result.shouldDie); @@ -245,7 +246,7 @@ public void testJVMId() { JVMId jvmid = new JVMId("test", 1, true, 2); JVMId jvmid1 = JVMId.forName("jvm_test_0001_m_000002"); - // test compare method should be the same + // test compare methot should be the same assertEquals(0, jvmid.compareTo(jvmid1)); } @@ -377,7 +378,7 @@ protected void registerHeartbeatHandler(Configuration conf) { TaskAttemptID tid = new TaskAttemptID("12345", 1, TaskType.REDUCE, 1, 0); - List partialOut = new ArrayList<>(); + List partialOut = new ArrayList(); partialOut.add(new Path("/prev1")); partialOut.add(new Path("/prev2")); @@ -555,13 +556,16 @@ protected void stopRpcServer() { long unregisterTimeout = conf.getLong(MRJobConfig.TASK_EXIT_TIMEOUT, MRJobConfig.TASK_EXIT_TIMEOUT_DEFAULT); clock.setTime(unregisterTimeout + 1); - GenericTestUtils.waitFor(() -> { - try { - AMFeedback response = - tal.statusUpdate(attemptID, firstReduceStatus); - return !response.getTaskFound(); - } catch (Exception e) { - throw new RuntimeException("status update failed", e); + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + try { + AMFeedback response = + tal.statusUpdate(attemptID, firstReduceStatus); + return !response.getTaskFound(); + } catch (Exception e) { + throw new RuntimeException("status update failed", e); + } } }, 10, 10000); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestYarnChild.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestYarnChild.java index daaabf3e863cc..7730dc6c0615b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestYarnChild.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestYarnChild.java @@ -24,7 +24,12 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import static org.mockito.Mockito.*; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; /** * Tests the behavior of YarnChild. diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java index 55588fc38a121..dacd17572ecba 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java @@ -83,7 +83,6 @@ import org.apache.hadoop.yarn.server.timeline.TimelineStore; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -242,12 +241,12 @@ public void testUnflushedTimer() throws Exception { } handleNextNEvents(jheh, 9); - Assertions.assertTrue(jheh.getFlushTimerStatus()); + assertTrue(jheh.getFlushTimerStatus()); verify(mockWriter, times(0)).flush(); Thread.sleep(2 * 4 * 1000l); // 4 seconds should be enough. Just be safe. verify(mockWriter).flush(); - Assertions.assertFalse(jheh.getFlushTimerStatus()); + assertFalse(jheh.getFlushTimerStatus()); } finally { jheh.stop(); verify(mockWriter).close(); @@ -428,13 +427,13 @@ public void testPropertyRedactionForJHS() throws Exception { // load the job_conf.xml in JHS directory and verify property redaction. Path jhsJobConfFile = getJobConfInIntermediateDoneDir(conf, params.jobId); - Assertions.assertTrue( + assertTrue( FileContext.getFileContext(conf).util().exists(jhsJobConfFile), "The job_conf.xml file is not in the JHS directory"); Configuration jhsJobConf = new Configuration(); try (InputStream input = FileSystem.get(conf).open(jhsJobConfFile)) { jhsJobConf.addResource(input); - Assertions.assertEquals( + assertEquals( MRJobConfUtil.REDACTION_REPLACEMENT_VAL , jhsJobConf.get(sensitivePropertyName), sensitivePropertyName + " is not redacted in HDFS."); @@ -517,7 +516,7 @@ public void testGetHistoryIntermediateDoneDirForUser() throws IOException { "/mapred/history/done_intermediate"); conf.set(MRJobConfig.USER_NAME, System.getProperty("user.name")); String pathStr = JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf); - Assertions.assertEquals("/mapred/history/done_intermediate/" + + assertEquals("/mapred/history/done_intermediate/" + System.getProperty("user.name"), pathStr); // Test fully qualified path @@ -531,7 +530,7 @@ public void testGetHistoryIntermediateDoneDirForUser() throws IOException { conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "file:///"); pathStr = JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf); - Assertions.assertEquals(dfsCluster.getURI().toString() + + assertEquals(dfsCluster.getURI().toString() + "/mapred/history/done_intermediate/" + System.getProperty("user.name"), pathStr); } @@ -608,13 +607,13 @@ public void testTimelineEventHandling() throws Exception { jheh.getDispatcher().await(); TimelineEntities entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null); - Assertions.assertEquals(1, entities.getEntities().size()); + assertEquals(1, entities.getEntities().size()); TimelineEntity tEntity = entities.getEntities().get(0); - Assertions.assertEquals(t.jobId.toString(), tEntity.getEntityId()); - Assertions.assertEquals(1, tEntity.getEvents().size()); - Assertions.assertEquals(EventType.AM_STARTED.toString(), + assertEquals(t.jobId.toString(), tEntity.getEntityId()); + assertEquals(1, tEntity.getEvents().size()); + assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(0).getEventType()); - Assertions.assertEquals(currentTime - 10, + assertEquals(currentTime - 10, tEntity.getEvents().get(0).getTimestamp()); handleEvent(jheh, new JobHistoryEvent(t.jobId, @@ -625,17 +624,17 @@ public void testTimelineEventHandling() throws Exception { jheh.getDispatcher().await(); entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null); - Assertions.assertEquals(1, entities.getEntities().size()); + assertEquals(1, entities.getEntities().size()); tEntity = entities.getEntities().get(0); - Assertions.assertEquals(t.jobId.toString(), tEntity.getEntityId()); - Assertions.assertEquals(2, tEntity.getEvents().size()); - Assertions.assertEquals(EventType.JOB_SUBMITTED.toString(), + assertEquals(t.jobId.toString(), tEntity.getEntityId()); + assertEquals(2, tEntity.getEvents().size()); + assertEquals(EventType.JOB_SUBMITTED.toString(), tEntity.getEvents().get(0).getEventType()); - Assertions.assertEquals(EventType.AM_STARTED.toString(), + assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(1).getEventType()); - Assertions.assertEquals(currentTime + 10, + assertEquals(currentTime + 10, tEntity.getEvents().get(0).getTimestamp()); - Assertions.assertEquals(currentTime - 10, + assertEquals(currentTime - 10, tEntity.getEvents().get(1).getTimestamp()); handleEvent(jheh, new JobHistoryEvent(t.jobId, @@ -644,21 +643,21 @@ public void testTimelineEventHandling() throws Exception { jheh.getDispatcher().await(); entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null); - Assertions.assertEquals(1, entities.getEntities().size()); + assertEquals(1, entities.getEntities().size()); tEntity = entities.getEntities().get(0); - Assertions.assertEquals(t.jobId.toString(), tEntity.getEntityId()); - Assertions.assertEquals(3, tEntity.getEvents().size()); - Assertions.assertEquals(EventType.JOB_SUBMITTED.toString(), + assertEquals(t.jobId.toString(), tEntity.getEntityId()); + assertEquals(3, tEntity.getEvents().size()); + assertEquals(EventType.JOB_SUBMITTED.toString(), tEntity.getEvents().get(0).getEventType()); - Assertions.assertEquals(EventType.AM_STARTED.toString(), + assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(1).getEventType()); - Assertions.assertEquals(EventType.JOB_QUEUE_CHANGED.toString(), + assertEquals(EventType.JOB_QUEUE_CHANGED.toString(), tEntity.getEvents().get(2).getEventType()); - Assertions.assertEquals(currentTime + 10, + assertEquals(currentTime + 10, tEntity.getEvents().get(0).getTimestamp()); - Assertions.assertEquals(currentTime - 10, + assertEquals(currentTime - 10, tEntity.getEvents().get(1).getTimestamp()); - Assertions.assertEquals(currentTime - 20, + assertEquals(currentTime - 20, tEntity.getEvents().get(2).getTimestamp()); handleEvent(jheh, new JobHistoryEvent(t.jobId, @@ -667,25 +666,25 @@ public void testTimelineEventHandling() throws Exception { jheh.getDispatcher().await(); entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null); - Assertions.assertEquals(1, entities.getEntities().size()); + assertEquals(1, entities.getEntities().size()); tEntity = entities.getEntities().get(0); - Assertions.assertEquals(t.jobId.toString(), tEntity.getEntityId()); - Assertions.assertEquals(4, tEntity.getEvents().size()); - Assertions.assertEquals(EventType.JOB_SUBMITTED.toString(), + assertEquals(t.jobId.toString(), tEntity.getEntityId()); + assertEquals(4, tEntity.getEvents().size()); + assertEquals(EventType.JOB_SUBMITTED.toString(), tEntity.getEvents().get(0).getEventType()); - Assertions.assertEquals(EventType.JOB_FINISHED.toString(), + assertEquals(EventType.JOB_FINISHED.toString(), tEntity.getEvents().get(1).getEventType()); - Assertions.assertEquals(EventType.AM_STARTED.toString(), + assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(2).getEventType()); - Assertions.assertEquals(EventType.JOB_QUEUE_CHANGED.toString(), + assertEquals(EventType.JOB_QUEUE_CHANGED.toString(), tEntity.getEvents().get(3).getEventType()); - Assertions.assertEquals(currentTime + 10, + assertEquals(currentTime + 10, tEntity.getEvents().get(0).getTimestamp()); - Assertions.assertEquals(currentTime, + assertEquals(currentTime, tEntity.getEvents().get(1).getTimestamp()); - Assertions.assertEquals(currentTime - 10, + assertEquals(currentTime - 10, tEntity.getEvents().get(2).getTimestamp()); - Assertions.assertEquals(currentTime - 20, + assertEquals(currentTime - 20, tEntity.getEvents().get(3).getTimestamp()); handleEvent(jheh, new JobHistoryEvent(t.jobId, @@ -695,29 +694,29 @@ public void testTimelineEventHandling() throws Exception { jheh.getDispatcher().await(); entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null); - Assertions.assertEquals(1, entities.getEntities().size()); + assertEquals(1, entities.getEntities().size()); tEntity = entities.getEntities().get(0); - Assertions.assertEquals(t.jobId.toString(), tEntity.getEntityId()); - Assertions.assertEquals(5, tEntity.getEvents().size()); - Assertions.assertEquals(EventType.JOB_KILLED.toString(), + assertEquals(t.jobId.toString(), tEntity.getEntityId()); + assertEquals(5, tEntity.getEvents().size()); + assertEquals(EventType.JOB_KILLED.toString(), tEntity.getEvents().get(0).getEventType()); - Assertions.assertEquals(EventType.JOB_SUBMITTED.toString(), + assertEquals(EventType.JOB_SUBMITTED.toString(), tEntity.getEvents().get(1).getEventType()); - Assertions.assertEquals(EventType.JOB_FINISHED.toString(), + assertEquals(EventType.JOB_FINISHED.toString(), tEntity.getEvents().get(2).getEventType()); - Assertions.assertEquals(EventType.AM_STARTED.toString(), + assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(3).getEventType()); - Assertions.assertEquals(EventType.JOB_QUEUE_CHANGED.toString(), + assertEquals(EventType.JOB_QUEUE_CHANGED.toString(), tEntity.getEvents().get(4).getEventType()); - Assertions.assertEquals(currentTime + 20, + assertEquals(currentTime + 20, tEntity.getEvents().get(0).getTimestamp()); - Assertions.assertEquals(currentTime + 10, + assertEquals(currentTime + 10, tEntity.getEvents().get(1).getTimestamp()); - Assertions.assertEquals(currentTime, + assertEquals(currentTime, tEntity.getEvents().get(2).getTimestamp()); - Assertions.assertEquals(currentTime - 10, + assertEquals(currentTime - 10, tEntity.getEvents().get(3).getTimestamp()); - Assertions.assertEquals(currentTime - 20, + assertEquals(currentTime - 20, tEntity.getEvents().get(4).getTimestamp()); handleEvent(jheh, new JobHistoryEvent(t.jobId, @@ -725,13 +724,13 @@ public void testTimelineEventHandling() throws Exception { jheh.getDispatcher().await(); entities = ts.getEntities("MAPREDUCE_TASK", null, null, null, null, null, null, null, null, null); - Assertions.assertEquals(1, entities.getEntities().size()); + assertEquals(1, entities.getEntities().size()); tEntity = entities.getEntities().get(0); - Assertions.assertEquals(t.taskID.toString(), tEntity.getEntityId()); - Assertions.assertEquals(1, tEntity.getEvents().size()); - Assertions.assertEquals(EventType.TASK_STARTED.toString(), + assertEquals(t.taskID.toString(), tEntity.getEntityId()); + assertEquals(1, tEntity.getEvents().size()); + assertEquals(EventType.TASK_STARTED.toString(), tEntity.getEvents().get(0).getEventType()); - Assertions.assertEquals(TaskType.MAP.toString(), + assertEquals(TaskType.MAP.toString(), tEntity.getEvents().get(0).getEventInfo().get("TASK_TYPE")); handleEvent(jheh, new JobHistoryEvent(t.jobId, @@ -739,15 +738,15 @@ public void testTimelineEventHandling() throws Exception { jheh.getDispatcher().await(); entities = ts.getEntities("MAPREDUCE_TASK", null, null, null, null, null, null, null, null, null); - Assertions.assertEquals(1, entities.getEntities().size()); + assertEquals(1, entities.getEntities().size()); tEntity = entities.getEntities().get(0); - Assertions.assertEquals(t.taskID.toString(), tEntity.getEntityId()); - Assertions.assertEquals(2, tEntity.getEvents().size()); - Assertions.assertEquals(EventType.TASK_STARTED.toString(), + assertEquals(t.taskID.toString(), tEntity.getEntityId()); + assertEquals(2, tEntity.getEvents().size()); + assertEquals(EventType.TASK_STARTED.toString(), tEntity.getEvents().get(1).getEventType()); - Assertions.assertEquals(TaskType.REDUCE.toString(), + assertEquals(TaskType.REDUCE.toString(), tEntity.getEvents().get(0).getEventInfo().get("TASK_TYPE")); - Assertions.assertEquals(TaskType.MAP.toString(), + assertEquals(TaskType.MAP.toString(), tEntity.getEvents().get(1).getEventInfo().get("TASK_TYPE")); } } @@ -786,7 +785,7 @@ public void testCountersToJSON() throws Exception { + "{\"NAME\":\"MATT_SMITH\",\"DISPLAY_NAME\":\"Matt Smith\",\"VALUE\":" + "11},{\"NAME\":\"PETER_CAPALDI\",\"DISPLAY_NAME\":\"Peter Capaldi\"," + "\"VALUE\":12}]}]"; - Assertions.assertEquals(expected, jsonStr); + assertEquals(expected, jsonStr); } @Test @@ -797,20 +796,20 @@ public void testCountersToJSONEmpty() throws Exception { JsonNode jsonNode = JobHistoryEventUtils.countersToJSON(counters); String jsonStr = new ObjectMapper().writeValueAsString(jsonNode); String expected = "[]"; - Assertions.assertEquals(expected, jsonStr); + assertEquals(expected, jsonStr); counters = new Counters(); jsonNode = JobHistoryEventUtils.countersToJSON(counters); jsonStr = new ObjectMapper().writeValueAsString(jsonNode); expected = "[]"; - Assertions.assertEquals(expected, jsonStr); + assertEquals(expected, jsonStr); counters.addGroup("DOCTORS", "Incarnations of the Doctor"); jsonNode = JobHistoryEventUtils.countersToJSON(counters); jsonStr = new ObjectMapper().writeValueAsString(jsonNode); expected = "[{\"NAME\":\"DOCTORS\",\"DISPLAY_NAME\":\"Incarnations of the " + "Doctor\",\"COUNTERS\":[]}]"; - Assertions.assertEquals(expected, jsonStr); + assertEquals(expected, jsonStr); } private void queueEvent(JHEvenHandlerForTest jheh, JobHistoryEvent event) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobSummary.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobSummary.java index 41835d4f3b71b..9ee402b5db7a2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobSummary.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobSummary.java @@ -19,12 +19,13 @@ package org.apache.hadoop.mapreduce.jobhistory; import org.apache.hadoop.mapreduce.v2.api.records.JobId; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -64,8 +65,8 @@ public void testEscapeJobSummary() { summary.setJobName("aa\rbb\ncc\r\ndd"); String out = summary.getJobSummaryString(); LOG.info("summary: " + out); - Assertions.assertFalse(out.contains("\r")); - Assertions.assertFalse(out.contains("\n")); - Assertions.assertTrue(out.contains("aa\\rbb\\ncc\\r\\ndd")); + assertFalse(out.contains("\r")); + assertFalse(out.contains("\n")); + assertTrue(out.contains("aa\\rbb\\ncc\\r\\ndd")); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java index 56eecb9b76724..c1a5ef8edb939 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java @@ -98,10 +98,11 @@ import org.apache.hadoop.yarn.state.StateMachineFactory; import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.SystemClock; -import org.junit.jupiter.api.Assertions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Mock MRAppMaster. Doesn't start RPC servers. @@ -326,8 +327,8 @@ public void waitForInternalState(JobImpl job, iState = job.getInternalState(); } LOG.info("Job {} Internal State is : {}", job.getID(), iState); - Assertions.assertEquals( - finalState, iState, "Task Internal state is not correct (timeout)"); + assertEquals( + finalState, iState, "Task Internal state is not correct (timeout)"); } public void waitForInternalState(TaskImpl task, @@ -339,8 +340,8 @@ public void waitForInternalState(TaskImpl task, iState = task.getInternalState(); } LOG.info("Task {} Internal State is : {}", task.getID(), iState); - Assertions.assertEquals( - finalState, iState, "Task Internal state is not correct (timeout)"); + assertEquals( + finalState, iState, "Task Internal state is not correct (timeout)"); } public void waitForInternalState(TaskAttemptImpl attempt, @@ -352,8 +353,8 @@ public void waitForInternalState(TaskAttemptImpl attempt, iState = attempt.getInternalState(); } LOG.info("TaskAttempt {} Internal State is : {}", attempt.getID(), iState); - Assertions.assertEquals( - finalState, iState, "TaskAttempt Internal state is not correct (timeout)"); + assertEquals(finalState, iState, + "TaskAttempt Internal state is not correct (timeout)"); } public void waitForState(TaskAttempt attempt, @@ -367,9 +368,8 @@ public void waitForState(TaskAttempt attempt, } LOG.info("TaskAttempt {} State is : {}", attempt.getID(), report.getTaskAttemptState()); - Assertions.assertEquals( - finalState -, report.getTaskAttemptState(), "TaskAttempt state is not correct (timeout)"); + assertEquals(finalState, + report.getTaskAttemptState(), "TaskAttempt state is not correct (timeout)"); } public void waitForState(Task task, TaskState finalState) throws Exception { @@ -381,8 +381,8 @@ public void waitForState(Task task, TaskState finalState) throws Exception { report = task.getReport(); } LOG.info("Task {} State is : {}", task.getID(), report.getTaskState()); - Assertions.assertEquals(finalState -, report.getTaskState(), "Task state is not correct (timeout)"); + assertEquals(finalState, + report.getTaskState(), "Task state is not correct (timeout)"); } public void waitForState(Job job, JobState finalState) throws Exception { @@ -394,14 +394,14 @@ public void waitForState(Job job, JobState finalState) throws Exception { Thread.sleep(WAIT_FOR_STATE_INTERVAL); } LOG.info("Job {} State is : {}", job.getID(), report.getJobState()); - Assertions.assertEquals(finalState, + assertEquals(finalState, job.getState(), "Job state is not correct (timeout)"); } public void waitForState(Service.STATE finalState) throws Exception { if (finalState == Service.STATE.STOPPED) { - Assertions.assertTrue( - waitForServiceToStop(20 * 1000), "Timeout while waiting for MRApp to stop"); + assertTrue(waitForServiceToStop(20 * 1000), + "Timeout while waiting for MRApp to stop"); } else { int timeoutSecs = 0; while (!finalState.equals(getServiceState()) @@ -409,8 +409,8 @@ public void waitForState(Service.STATE finalState) throws Exception { Thread.sleep(WAIT_FOR_STATE_INTERVAL); } LOG.info("MRApp State is : {}", getServiceState()); - Assertions.assertEquals(finalState, - getServiceState(), "MRApp state is not correct (timeout)"); + assertEquals(finalState, getServiceState(), + "MRApp state is not correct (timeout)"); } } @@ -419,22 +419,22 @@ public void verifyCompleted() { JobReport jobReport = job.getReport(); LOG.info("Job start time :{}", jobReport.getStartTime()); LOG.info("Job finish time :{}", jobReport.getFinishTime()); - Assertions.assertTrue( - jobReport.getStartTime() <= jobReport.getFinishTime(), "Job start time is not less than finish time"); - Assertions.assertTrue( - jobReport.getFinishTime() <= System.currentTimeMillis(), "Job finish time is in future"); + assertTrue(jobReport.getStartTime() <= jobReport.getFinishTime(), + "Job start time is not less than finish time"); + assertTrue(jobReport.getFinishTime() <= System.currentTimeMillis(), + "Job finish time is in future"); for (Task task : job.getTasks().values()) { TaskReport taskReport = task.getReport(); LOG.info("Task {} start time : {}", task.getID(), taskReport.getStartTime()); LOG.info("Task {} finish time : {}", task.getID(), taskReport.getFinishTime()); - Assertions.assertTrue( - taskReport.getStartTime() <= taskReport.getFinishTime(), "Task start time is not less than finish time"); + assertTrue(taskReport.getStartTime() <= taskReport.getFinishTime(), + "Task start time is not less than finish time"); for (TaskAttempt attempt : task.getAttempts().values()) { TaskAttemptReport attemptReport = attempt.getReport(); - Assertions.assertTrue( - attemptReport.getStartTime() <= attemptReport.getFinishTime(), "Attempt start time is not less than finish time"); + assertTrue(attemptReport.getStartTime() <= attemptReport.getFinishTime(), + "Attempt start time is not less than finish time"); } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java index cb2e6dca2f964..34f4c8c7164cf 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java @@ -122,7 +122,8 @@ class ThrottledContainerAllocator extends AbstractService implements ContainerAllocator, RMHeartbeatHandler { private int containerCount; private Thread thread; - private BlockingQueue eventQueue = new LinkedBlockingQueue<>(); + private BlockingQueue eventQueue = + new LinkedBlockingQueue(); public ThrottledContainerAllocator() { super("ThrottledContainerAllocator"); } @@ -136,36 +137,40 @@ public void handle(ContainerAllocatorEvent event) { } @Override protected void serviceStart() throws Exception { - thread = new Thread(() -> { - ContainerAllocatorEvent event; - while (!Thread.currentThread().isInterrupted()) { - try { - if (concurrentRunningTasks < maxConcurrentRunningTasks) { - event = eventQueue.take(); - ContainerId cId = - ContainerId.newContainerId(getContext() - .getApplicationAttemptId(), containerCount++); + thread = new Thread(new Runnable() { + @Override + @SuppressWarnings("unchecked") + public void run() { + ContainerAllocatorEvent event = null; + while (!Thread.currentThread().isInterrupted()) { + try { + if (concurrentRunningTasks < maxConcurrentRunningTasks) { + event = eventQueue.take(); + ContainerId cId = + ContainerId.newContainerId(getContext() + .getApplicationAttemptId(), containerCount++); - //System.out.println("Allocating " + containerCount); + //System.out.println("Allocating " + containerCount); - Container container = - recordFactory.newRecordInstance(Container.class); - container.setId(cId); - NodeId nodeId = NodeId.newInstance("dummy", 1234); - container.setNodeId(nodeId); - container.setContainerToken(null); - container.setNodeHttpAddress("localhost:8042"); - getContext().getEventHandler() - .handle( - new TaskAttemptContainerAssignedEvent(event - .getAttemptID(), container, null)); - concurrentRunningTasks++; - } else { - Thread.sleep(1000); + Container container = + recordFactory.newRecordInstance(Container.class); + container.setId(cId); + NodeId nodeId = NodeId.newInstance("dummy", 1234); + container.setNodeId(nodeId); + container.setContainerToken(null); + container.setNodeHttpAddress("localhost:8042"); + getContext().getEventHandler() + .handle( + new TaskAttemptContainerAssignedEvent(event + .getAttemptID(), container, null)); + concurrentRunningTasks++; + } else { + Thread.sleep(1000); + } + } catch (InterruptedException e) { + System.out.println("Returning, interrupted"); + return; } - } catch (InterruptedException e) { - System.out.println("Returning, interrupted"); - return; } } }); @@ -240,7 +245,7 @@ public AllocateResponse allocate(AllocateRequest request) AllocateResponse response = Records.newRecord(AllocateResponse.class); List askList = request.getAskList(); - List containers = new ArrayList<>(); + List containers = new ArrayList(); for (ResourceRequest req : askList) { if (!ResourceRequest.isAnyLocation(req.getResourceName())) { continue; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestAMInfos.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestAMInfos.java index 085013b774ae4..c86ea72690459 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestAMInfos.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestAMInfos.java @@ -21,8 +21,6 @@ import java.util.Iterator; import java.util.List; -import org.junit.jupiter.api.Assertions; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; @@ -35,6 +33,8 @@ import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.junit.jupiter.api.Test; +import static org.junit.jupiter.api.Assertions.assertEquals; + public class TestAMInfos { @Test @@ -50,7 +50,7 @@ public void testAMInfosWithoutRecoveryEnabled() throws Exception { long am1StartTime = app.getAllAMInfos().get(0).getStartTime(); - Assertions.assertEquals(1, job.getTasks().size(), "No of tasks not correct"); + assertEquals(1, job.getTasks().size(), "No of tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task mapTask = it.next(); app.waitForState(mapTask, TaskState.RUNNING); @@ -71,14 +71,14 @@ public void testAMInfosWithoutRecoveryEnabled() throws Exception { conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false); job = app.submit(conf); app.waitForState(job, JobState.RUNNING); - Assertions.assertEquals(1, job.getTasks().size(), "No of tasks not correct"); + assertEquals(1, job.getTasks().size(), "No of tasks not correct"); it = job.getTasks().values().iterator(); mapTask = it.next(); // There should be two AMInfos List amInfos = app.getAllAMInfos(); - Assertions.assertEquals(2, amInfos.size()); + assertEquals(2, amInfos.size()); AMInfo amInfoOne = amInfos.get(0); - Assertions.assertEquals(am1StartTime, amInfoOne.getStartTime()); + assertEquals(am1StartTime, amInfoOne.getStartTime()); app.stop(); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestCheckpointPreemptionPolicy.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestCheckpointPreemptionPolicy.java index 88e6469f93260..6598bd0c05902 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestCheckpointPreemptionPolicy.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestCheckpointPreemptionPolicy.java @@ -22,8 +22,9 @@ import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.util.resource.Resources; -import static org.junit.jupiter.api.Assertions.*; -import static org.mockito.Mockito.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.util.ArrayList; import java.util.Collections; @@ -67,11 +68,13 @@ public class TestCheckpointPreemptionPolicy { RMContainerAllocator r; JobId jid; RunningAppContext mActxt; - Set preemptedContainers = new HashSet<>(); - Map assignedContainers = new HashMap<>(); + Set preemptedContainers = new HashSet(); + Map assignedContainers = + new HashMap(); private final RecordFactory recordFactory = - RecordFactoryProvider.getRecordFactory(null); - HashMap contToResourceMap = new HashMap<>(); + RecordFactoryProvider.getRecordFactory(null); + HashMap contToResourceMap = + new HashMap(); private int minAlloc = 1024; @@ -118,7 +121,7 @@ public TaskAttemptId getTaskAttempt(ContainerId cId) { } @Override public List getContainers(TaskType t) { - List p = new ArrayList<>(); + List p = new ArrayList(); for (Map.Entry ent : assignedContainers.entrySet()) { if (ent.getValue().getTaskId().getTaskType().equals(t)) { @@ -161,7 +164,7 @@ public TaskAttemptId getTaskAttempt(ContainerId cId) { @Override public List getContainers(TaskType t) { - List p = new ArrayList<>(); + List p = new ArrayList(); for (Map.Entry ent : assignedContainers.entrySet()){ if(ent.getValue().getTaskId().getTaskType().equals(t)){ @@ -225,7 +228,7 @@ private List validatePreemption(PreemptionMessage pM, CheckpointAMPreemptionPolicy policy, int supposedMemPreemption) { Resource effectivelyPreempted = Resource.newInstance(0, 0); - List preempting = new ArrayList<>(); + List preempting = new ArrayList(); for (Map.Entry ent : assignedContainers.entrySet()) { @@ -254,7 +257,7 @@ private PreemptionMessage generatePreemptionMessage( Resource minimumAllocation, boolean strict) { Set currentContPreemption = Collections.unmodifiableSet( - new HashSet<>(containerToPreempt)); + new HashSet(containerToPreempt)); containerToPreempt.clear(); Resource tot = Resource.newInstance(0, 0); for(ContainerId c : currentContPreemption){ @@ -283,7 +286,7 @@ private PreemptionMessage generatePreemptionMessage(Allocation allocation) { pMsg = recordFactory.newRecordInstance(PreemptionMessage.class); StrictPreemptionContract pStrict = recordFactory.newRecordInstance(StrictPreemptionContract.class); - Set pCont = new HashSet<>(); + Set pCont = new HashSet(); for (ContainerId cId : allocation.getStrictContainerPreemptions()) { PreemptionContainer pc = recordFactory.newRecordInstance(PreemptionContainer.class); @@ -304,14 +307,15 @@ private PreemptionMessage generatePreemptionMessage(Allocation allocation) { } PreemptionContract contract = recordFactory.newRecordInstance(PreemptionContract.class); - Set pCont = new HashSet<>(); + Set pCont = new HashSet(); for (ContainerId cId : allocation.getContainerPreemptions()) { PreemptionContainer pc = recordFactory.newRecordInstance(PreemptionContainer.class); pc.setId(cId); pCont.add(pc); } - List pRes = new ArrayList<>(); + List pRes = + new ArrayList(); for (ResourceRequest crr : allocation.getResourcePreemptions()) { PreemptionResourceRequest prr = recordFactory.newRecordInstance(PreemptionResourceRequest.class); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFail.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFail.java index 2f0eaafbb68a8..716f25af2a75f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFail.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFail.java @@ -24,7 +24,6 @@ import java.util.Map; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptFailEvent; -import org.junit.jupiter.api.Assertions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapred.TaskAttemptListenerImpl; @@ -50,6 +49,8 @@ import org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy.ContainerManagementProtocolProxyData; import org.junit.jupiter.api.Test; +import static org.junit.jupiter.api.Assertions.assertEquals; + /** * Tests the state machine with respect to Job/Task/TaskAttempt failure * scenarios. @@ -68,20 +69,19 @@ public void testFailTask() throws Exception { Job job = app.submit(conf); app.waitForState(job, JobState.SUCCEEDED); Map tasks = job.getTasks(); - Assertions.assertEquals(1, tasks.size(), "Num tasks is not correct"); + assertEquals(1, tasks.size(), "Num tasks is not correct"); Task task = tasks.values().iterator().next(); - Assertions.assertEquals(TaskState.SUCCEEDED -, task.getReport().getTaskState(), "Task state not correct"); + assertEquals(TaskState.SUCCEEDED, task.getReport().getTaskState(), "Task state not correct"); Map attempts = tasks.values().iterator().next().getAttempts(); - Assertions.assertEquals(2, attempts.size(), "Num attempts is not correct"); + assertEquals(2, attempts.size(), "Num attempts is not correct"); //one attempt must be failed //and another must have succeeded Iterator it = attempts.values().iterator(); - Assertions.assertEquals(TaskAttemptState.FAILED -, it.next().getReport().getTaskAttemptState(), "Attempt state not correct"); - Assertions.assertEquals(TaskAttemptState.SUCCEEDED -, it.next().getReport().getTaskAttemptState(), "Attempt state not correct"); + assertEquals(TaskAttemptState.FAILED, it.next().getReport().getTaskAttemptState(), + "Attempt state not correct"); + assertEquals(TaskAttemptState.SUCCEEDED, it.next().getReport().getTaskAttemptState(), + "Attempt state not correct"); } @Test @@ -159,17 +159,15 @@ public void testTimedOutTask() throws Exception { Job job = app.submit(conf); app.waitForState(job, JobState.FAILED); Map tasks = job.getTasks(); - Assertions.assertEquals(1, tasks.size(), "Num tasks is not correct"); + assertEquals(1, tasks.size(), "Num tasks is not correct"); Task task = tasks.values().iterator().next(); - Assertions.assertEquals(TaskState.FAILED -, task.getReport().getTaskState(), "Task state not correct"); + assertEquals(TaskState.FAILED, task.getReport().getTaskState(), "Task state not correct"); Map attempts = tasks.values().iterator().next().getAttempts(); - Assertions.assertEquals(maxAttempts -, attempts.size(), "Num attempts is not correct"); + assertEquals(maxAttempts, attempts.size(), "Num attempts is not correct"); for (TaskAttempt attempt : attempts.values()) { - Assertions.assertEquals(TaskAttemptState.FAILED -, attempt.getReport().getTaskAttemptState(), "Attempt state not correct"); + assertEquals(TaskAttemptState.FAILED, attempt.getReport().getTaskAttemptState(), + "Attempt state not correct"); } } @@ -185,13 +183,12 @@ public void testTaskFailWithUnusedContainer() throws Exception { Job job = app.submit(conf); app.waitForState(job, JobState.RUNNING); Map tasks = job.getTasks(); - Assertions.assertEquals(1, tasks.size(), "Num tasks is not correct"); + assertEquals(1, tasks.size(), "Num tasks is not correct"); Task task = tasks.values().iterator().next(); app.waitForState(task, TaskState.SCHEDULED); Map attempts = tasks.values().iterator() .next().getAttempts(); - Assertions.assertEquals(maxAttempts, attempts - .size(), "Num attempts is not correct"); + assertEquals(maxAttempts, attempts.size(), "Num attempts is not correct"); TaskAttempt attempt = attempts.values().iterator().next(); app.waitForInternalState((TaskAttemptImpl) attempt, TaskAttemptStateInternal.ASSIGNED); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java index aaf85c6485331..c6fd9bcd98e77 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java @@ -19,14 +19,15 @@ package org.apache.hadoop.mapreduce.v2.app; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.Iterator; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapred.TaskCompletionEvent; import org.apache.hadoop.mapreduce.Counters; @@ -38,6 +39,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.Phase; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus; +import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState; import org.apache.hadoop.mapreduce.v2.api.records.TaskState; import org.apache.hadoop.mapreduce.v2.app.job.Job; @@ -49,7 +51,6 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.yarn.event.EventHandler; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; public class TestFetchFailure { @@ -64,8 +65,7 @@ public void testFetchFailure() throws Exception { Job job = app.submit(conf); app.waitForState(job, JobState.RUNNING); //all maps would be running - Assertions.assertEquals( - 2, job.getTasks().size(), "Num tasks not correct"); + assertEquals(2, job.getTasks().size(), "Num tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task mapTask = it.next(); Task reduceTask = it.next(); @@ -85,18 +85,20 @@ public void testFetchFailure() throws Exception { final int checkIntervalMillis = 10; final int waitForMillis = 800; - GenericTestUtils.waitFor(() -> { - TaskAttemptCompletionEvent[] events = job - .getTaskAttemptCompletionEvents(0, 100); - return events.length >= 1; + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + TaskAttemptCompletionEvent[] events = job + .getTaskAttemptCompletionEvents(0, 100); + return events.length >= 1; + } }, checkIntervalMillis, waitForMillis); TaskAttemptCompletionEvent[] events = job.getTaskAttemptCompletionEvents(0, 100); - Assertions.assertEquals( - 1, events.length, "Num completion events not correct"); - Assertions.assertEquals( - TaskAttemptCompletionEventStatus.SUCCEEDED, events[0].getStatus(), "Event status not correct"); + assertEquals(1, events.length, "Num completion events not correct"); + assertEquals(TaskAttemptCompletionEventStatus.SUCCEEDED, events[0].getStatus(), + "Event status not correct"); // wait for reduce to start running app.waitForState(reduceTask, TaskState.RUNNING); @@ -113,11 +115,11 @@ public void testFetchFailure() throws Exception { app.waitForState(mapTask, TaskState.RUNNING); //map attempt must have become FAILED - Assertions.assertEquals( - TaskAttemptState.FAILED, mapAttempt1.getState(), "Map TaskAttempt state not correct"); + assertEquals(TaskAttemptState.FAILED, mapAttempt1.getState(), + "Map TaskAttempt state not correct"); - Assertions.assertEquals( - 2, mapTask.getAttempts().size(), "Num attempts in Map Task not correct"); + assertEquals(2, mapTask.getAttempts().size(), + "Num attempts in Map Task not correct"); Iterator atIt = mapTask.getAttempts().values().iterator(); atIt.next(); @@ -140,46 +142,44 @@ public void testFetchFailure() throws Exception { app.waitForState(job, JobState.SUCCEEDED); //previous completion event now becomes obsolete - Assertions.assertEquals( + assertEquals( TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus(), "Event status not correct"); events = job.getTaskAttemptCompletionEvents(0, 100); - Assertions.assertEquals( - 4, events.length, "Num completion events not correct"); - Assertions.assertEquals( - mapAttempt1.getID(), events[0].getAttemptId(), "Event map attempt id not correct"); - Assertions.assertEquals( - mapAttempt1.getID(), events[1].getAttemptId(), "Event map attempt id not correct"); - Assertions.assertEquals( - mapAttempt2.getID(), events[2].getAttemptId(), "Event map attempt id not correct"); - Assertions.assertEquals( - reduceAttempt.getID(), events[3].getAttemptId(), "Event reduce attempt id not correct"); - Assertions.assertEquals( - TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus(), "Event status not correct for map attempt1"); - Assertions.assertEquals( - TaskAttemptCompletionEventStatus.FAILED, events[1].getStatus(), "Event status not correct for map attempt1"); - Assertions.assertEquals( - TaskAttemptCompletionEventStatus.SUCCEEDED, events[2].getStatus(), "Event status not correct for map attempt2"); - Assertions.assertEquals( - TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus(), "Event status not correct for reduce attempt1"); + assertEquals(4, events.length, "Num completion events not correct"); + assertEquals(mapAttempt1.getID(), events[0].getAttemptId(), + "Event map attempt id not correct"); + assertEquals(mapAttempt1.getID(), events[1].getAttemptId(), + "Event map attempt id not correct"); + assertEquals(mapAttempt2.getID(), events[2].getAttemptId(), + "Event map attempt id not correct"); + assertEquals(reduceAttempt.getID(), events[3].getAttemptId(), + "Event reduce attempt id not correct"); + assertEquals(TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus(), + "Event status not correct for map attempt1"); + assertEquals(TaskAttemptCompletionEventStatus.FAILED, events[1].getStatus(), + "Event status not correct for map attempt1"); + assertEquals(TaskAttemptCompletionEventStatus.SUCCEEDED, events[2].getStatus(), + "Event status not correct for map attempt2"); + assertEquals(TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus(), + "Event status not correct for reduce attempt1"); TaskCompletionEvent mapEvents[] = job.getMapAttemptCompletionEvents(0, 2); TaskCompletionEvent convertedEvents[] = TypeConverter.fromYarn(events); - Assertions.assertEquals(2, mapEvents.length, "Incorrect number of map events"); - Assertions.assertArrayEquals( + assertEquals(2, mapEvents.length, "Incorrect number of map events"); + assertArrayEquals( Arrays.copyOfRange(convertedEvents, 0, 2), mapEvents, "Unexpected map events"); mapEvents = job.getMapAttemptCompletionEvents(2, 200); - Assertions.assertEquals(1, mapEvents.length, "Incorrect number of map events"); - Assertions.assertEquals(convertedEvents[2] -, mapEvents[0], "Unexpected map event"); + assertEquals(1, mapEvents.length, "Incorrect number of map events"); + assertEquals(convertedEvents[2], mapEvents[0], "Unexpected map event"); } /** * This tests that if a map attempt was failed (say due to fetch failures), * then it gets re-run. When the next map attempt is running, if the AM dies, * then, on AM re-run, the AM does not incorrectly remember the first failed - * attempt. Currently, recovery does not recover running tasks. Effectively, + * attempt. Currently recovery does not recover running tasks. Effectively, * the AM re-runs the maps from scratch. */ @Test @@ -193,8 +193,7 @@ public void testFetchFailureWithRecovery() throws Exception { Job job = app.submit(conf); app.waitForState(job, JobState.RUNNING); //all maps would be running - Assertions.assertEquals( - 2, job.getTasks().size(), "Num tasks not correct"); + assertEquals(2, job.getTasks().size(), "Num tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task mapTask = it.next(); Task reduceTask = it.next(); @@ -214,9 +213,8 @@ public void testFetchFailureWithRecovery() throws Exception { TaskAttemptCompletionEvent[] events = job.getTaskAttemptCompletionEvents(0, 100); - Assertions.assertEquals( - 1, events.length, "Num completion events not correct"); - Assertions.assertEquals( + assertEquals(1, events.length, "Num completion events not correct"); + assertEquals( TaskAttemptCompletionEventStatus.SUCCEEDED, events[0].getStatus(), "Event status not correct"); // wait for reduce to start running @@ -246,8 +244,7 @@ public void testFetchFailureWithRecovery() throws Exception { job = app.submit(conf); app.waitForState(job, JobState.RUNNING); //all maps would be running - Assertions.assertEquals( - 2, job.getTasks().size(), "Num tasks not correct"); + assertEquals(2, job.getTasks().size(), "Num tasks not correct"); it = job.getTasks().values().iterator(); mapTask = it.next(); reduceTask = it.next(); @@ -273,7 +270,7 @@ public void testFetchFailureWithRecovery() throws Exception { app.waitForState(job, JobState.SUCCEEDED); events = job.getTaskAttemptCompletionEvents(0, 100); - Assertions.assertEquals(2, events.length, "Num completion events not correct"); + assertEquals(2, events.length, "Num completion events not correct"); } @Test @@ -286,8 +283,7 @@ public void testFetchFailureMultipleReduces() throws Exception { Job job = app.submit(conf); app.waitForState(job, JobState.RUNNING); //all maps would be running - Assertions.assertEquals( - 4, job.getTasks().size(), "Num tasks not correct"); + assertEquals(4, job.getTasks().size(), "Num tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task mapTask = it.next(); Task reduceTask = it.next(); @@ -309,10 +305,10 @@ public void testFetchFailureMultipleReduces() throws Exception { TaskAttemptCompletionEvent[] events = job.getTaskAttemptCompletionEvents(0, 100); - Assertions.assertEquals( - 1, events.length, "Num completion events not correct"); - Assertions.assertEquals( - TaskAttemptCompletionEventStatus.SUCCEEDED, events[0].getStatus(), "Event status not correct"); + assertEquals(1, events.length, + "Num completion events not correct"); + assertEquals(TaskAttemptCompletionEventStatus.SUCCEEDED, events[0].getStatus(), + "Event status not correct"); // wait for reduce to start running app.waitForState(reduceTask, TaskState.RUNNING); @@ -350,16 +346,16 @@ public void testFetchFailureMultipleReduces() throws Exception { app.waitForState(mapTask, TaskState.RUNNING); //map attempt must have become FAILED - Assertions.assertEquals( - TaskAttemptState.FAILED, mapAttempt1.getState(), "Map TaskAttempt state not correct"); + assertEquals(TaskAttemptState.FAILED, mapAttempt1.getState(), + "Map TaskAttempt state not correct"); assertThat(mapAttempt1.getDiagnostics().get(0)) .isEqualTo("Too many fetch failures. Failing the attempt. " + "Last failure reported by " + reduceAttempt3.getID().toString() + " from host host3"); - Assertions.assertEquals( - 2, mapTask.getAttempts().size(), "Num attempts in Map Task not correct"); + assertEquals(2, mapTask.getAttempts().size(), + "Num attempts in Map Task not correct"); Iterator atIt = mapTask.getAttempts().values().iterator(); atIt.next(); @@ -392,45 +388,43 @@ public void testFetchFailureMultipleReduces() throws Exception { app.waitForState(job, JobState.SUCCEEDED); //previous completion event now becomes obsolete - Assertions.assertEquals( - TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus(), "Event status not correct"); + assertEquals( + TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus(), "Event status not correct"); events = job.getTaskAttemptCompletionEvents(0, 100); - Assertions.assertEquals( - 6, events.length, "Num completion events not correct"); - Assertions.assertEquals( - mapAttempt1.getID(), events[0].getAttemptId(), "Event map attempt id not correct"); - Assertions.assertEquals( - mapAttempt1.getID(), events[1].getAttemptId(), "Event map attempt id not correct"); - Assertions.assertEquals( - mapAttempt2.getID(), events[2].getAttemptId(), "Event map attempt id not correct"); - Assertions.assertEquals( - reduceAttempt.getID(), events[3].getAttemptId(), "Event reduce attempt id not correct"); - Assertions.assertEquals( - TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus(), "Event status not correct for map attempt1"); - Assertions.assertEquals( - TaskAttemptCompletionEventStatus.FAILED, events[1].getStatus(), "Event status not correct for map attempt1"); - Assertions.assertEquals( - TaskAttemptCompletionEventStatus.SUCCEEDED, events[2].getStatus(), "Event status not correct for map attempt2"); - Assertions.assertEquals( - TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus(), "Event status not correct for reduce attempt1"); + assertEquals(6, events.length, "Num completion events not correct"); + assertEquals(mapAttempt1.getID(), events[0].getAttemptId(), + "Event map attempt id not correct"); + assertEquals(mapAttempt1.getID(), events[1].getAttemptId(), + "Event map attempt id not correct"); + assertEquals(mapAttempt2.getID(), events[2].getAttemptId(), + "Event map attempt id not correct"); + assertEquals(reduceAttempt.getID(), events[3].getAttemptId(), + "Event reduce attempt id not correct"); + assertEquals(TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus(), + "Event status not correct for map attempt1"); + assertEquals(TaskAttemptCompletionEventStatus.FAILED, events[1].getStatus(), + "Event status not correct for map attempt1"); + assertEquals(TaskAttemptCompletionEventStatus.SUCCEEDED, events[2].getStatus(), + "Event status not correct for map attempt2"); + assertEquals(TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus(), + "Event status not correct for reduce attempt1"); TaskCompletionEvent mapEvents[] = job.getMapAttemptCompletionEvents(0, 2); TaskCompletionEvent convertedEvents[] = TypeConverter.fromYarn(events); - Assertions.assertEquals(2, mapEvents.length, "Incorrect number of map events"); - Assertions.assertArrayEquals( + assertEquals(2, mapEvents.length, "Incorrect number of map events"); + assertArrayEquals( Arrays.copyOfRange(convertedEvents, 0, 2), mapEvents, "Unexpected map events"); mapEvents = job.getMapAttemptCompletionEvents(2, 200); - Assertions.assertEquals(1, mapEvents.length, "Incorrect number of map events"); - Assertions.assertEquals(convertedEvents[2] -, mapEvents[0], "Unexpected map event"); + assertEquals(1, mapEvents.length, "Incorrect number of map events"); + assertEquals(convertedEvents[2], mapEvents[0], "Unexpected map event"); } private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) { TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus(); status.counters = new Counters(); - status.fetchFailedMaps = new ArrayList<>(); + status.fetchFailedMaps = new ArrayList(); status.id = attempt.getID(); status.mapFinishTime = 0; status.phase = phase; @@ -449,7 +443,8 @@ private void sendFetchFailure(MRApp app, TaskAttempt reduceAttempt, app.getContext().getEventHandler().handle( new JobTaskAttemptFetchFailureEvent( reduceAttempt.getID(), - Collections.singletonList(mapAttempt.getID()), hostname)); + Arrays.asList(new TaskAttemptId[] {mapAttempt.getID()}), + hostname)); } static class MRAppWithHistory extends MRApp { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java index 158e494ff86df..460432d7868fb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java @@ -18,6 +18,10 @@ package org.apache.hadoop.mapreduce.v2.app; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; @@ -59,7 +63,6 @@ import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; /** @@ -74,15 +77,15 @@ private void testNumRetries(Configuration conf) { conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS, "0"); conf.set(MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS, "10"); setConf(conf); - Assertions.assertEquals(0, numTries, "Expected numTries to be 0, but was " + numTries); + assertEquals(0, numTries, "Expected numTries to be 0, but was " + numTries); conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS, "1"); setConf(conf); - Assertions.assertEquals(1, numTries, "Expected numTries to be 1, but was " + numTries); + assertEquals(1, numTries, "Expected numTries to be 1, but was " + numTries); conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS, "20"); setConf(conf); - Assertions.assertEquals(11, numTries, "Expected numTries to be 11, but was " + numTries); //11 because number of _retries_ is 10 + assertEquals(11, numTries, "Expected numTries to be 11, but was " + numTries); //11 because number of _retries_ is 10 } //Test maximum retry interval is capped by @@ -91,52 +94,49 @@ private void testWaitInterval(Configuration conf) { conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL, "5000"); conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL, "1000"); setConf(conf); - Assertions.assertEquals(1000, waitInterval, "Expected waitInterval to be 1000, but was " - + waitInterval); + assertEquals(1000, waitInterval, "Expected waitInterval to be 1000, but was " + waitInterval); conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL, "10000"); setConf(conf); - Assertions.assertEquals(5000, waitInterval, "Expected waitInterval to be 5000, but was " - + waitInterval); + assertEquals(5000, waitInterval, "Expected waitInterval to be 5000, but was " + waitInterval); //Test negative numbers are set to default conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL, "-10"); setConf(conf); - Assertions.assertEquals(5000, waitInterval, "Expected waitInterval to be 5000, but was " - + waitInterval); + assertEquals(5000, waitInterval, "Expected waitInterval to be 5000, but was " + waitInterval); } private void testTimeout(Configuration conf) { conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_TIMEOUT, "1000"); setConf(conf); - Assertions.assertEquals(1000, timeout, "Expected timeout to be 1000, but was " - + timeout); + assertEquals(1000, timeout, "Expected timeout to be 1000, but was " + timeout); } private void testProxyConfiguration(Configuration conf) { conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "somehost"); setConf(conf); - Assertions.assertSame(proxyToUse.type(), Proxy.Type.DIRECT, "Proxy shouldn't be set because port wasn't specified"); + assertTrue(proxyToUse.type() == Proxy.Type.DIRECT, + "Proxy shouldn't be set because port wasn't specified"); conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "somehost:someport"); setConf(conf); - Assertions.assertSame(proxyToUse.type(), Proxy.Type.DIRECT, "Proxy shouldn't be set because port wasn't numeric"); + assertTrue(proxyToUse.type() == Proxy.Type.DIRECT, + "Proxy shouldn't be set because port wasn't numeric"); conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "somehost:1000"); setConf(conf); - Assertions.assertEquals("Proxy should have been set but wasn't ", - "HTTP @ somehost:1000", proxyToUse.toString()); + assertEquals("HTTP @ somehost:1000", proxyToUse.toString(), + "Proxy should have been set but wasn't "); conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "socks@somehost:1000"); setConf(conf); - Assertions.assertEquals("Proxy should have been socks but wasn't ", - "SOCKS @ somehost:1000", proxyToUse.toString()); + assertEquals("SOCKS @ somehost:1000", proxyToUse.toString(), + "Proxy should have been socks but wasn't "); conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "SOCKS@somehost:1000"); setConf(conf); - Assertions.assertEquals("Proxy should have been socks but wasn't ", - "SOCKS @ somehost:1000", proxyToUse.toString()); + assertEquals("SOCKS @ somehost:1000", proxyToUse.toString(), + "Proxy should have been socks but wasn't "); conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "sfafn@somehost:1000"); setConf(conf); - Assertions.assertEquals("Proxy should have been http but wasn't ", - "HTTP @ somehost:1000", proxyToUse.toString()); - + assertEquals("HTTP @ somehost:1000", proxyToUse.toString(), + "Proxy should have been http but wasn't "); } /** @@ -176,9 +176,9 @@ public void testNotifyRetries() throws InterruptedException { this.setConf(conf); this.notify(jobReport); long endTime = System.currentTimeMillis(); - Assertions.assertEquals(1, this.notificationCount, "Only 1 try was expected but was : " + assertEquals(1, this.notificationCount, "Only 1 try was expected but was : " + this.notificationCount); - Assertions.assertTrue(endTime - startTime > 5000, "Should have taken more than 5 seconds it took " + assertTrue(endTime - startTime > 5000, "Should have taken more than 5 seconds it took " + (endTime - startTime)); conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS, "3"); @@ -191,10 +191,10 @@ public void testNotifyRetries() throws InterruptedException { this.setConf(conf); this.notify(jobReport); endTime = System.currentTimeMillis(); - Assertions.assertEquals(3, this.notificationCount, "Only 3 retries were expected but was : " - + this.notificationCount); - Assertions.assertTrue(endTime - startTime > 9000, "Should have taken more than 9 seconds it took " - + (endTime - startTime)); + assertEquals(3, this.notificationCount, "Only 3 retries were expected but was : " + + this.notificationCount); + assertTrue(endTime - startTime > 9000, "Should have taken more than 9 seconds it took " + + (endTime - startTime)); } @@ -217,12 +217,11 @@ private void testNotificationOnLastRetry(boolean withRuntimeException) doThrow(runtimeException).when(app).stop(); } app.shutDownJob(); - Assertions.assertTrue(app.isLastAMRetry()); - Assertions.assertEquals(1, JobEndServlet.calledTimes); - Assertions.assertEquals("jobid=" + job.getID() + "&status=SUCCEEDED", + assertTrue(app.isLastAMRetry()); + assertEquals(1, JobEndServlet.calledTimes); + assertEquals("jobid=" + job.getID() + "&status=SUCCEEDED", JobEndServlet.requestUri.getQuery()); - Assertions.assertEquals(JobState.SUCCEEDED.toString(), - JobEndServlet.foundJobState); + assertEquals(JobState.SUCCEEDED.toString(), JobEndServlet.foundJobState); server.stop(); } @@ -257,10 +256,10 @@ public void testAbsentNotificationOnNotLastRetryUnregistrationFailure() app.shutDownJob(); // Not the last AM attempt. So user should that the job is still running. app.waitForState(job, JobState.RUNNING); - Assertions.assertFalse(app.isLastAMRetry()); - Assertions.assertEquals(0, JobEndServlet.calledTimes); - Assertions.assertNull(JobEndServlet.requestUri); - Assertions.assertNull(JobEndServlet.foundJobState); + assertFalse(app.isLastAMRetry()); + assertEquals(0, JobEndServlet.calledTimes); + assertNull(JobEndServlet.requestUri); + assertNull(JobEndServlet.foundJobState); server.stop(); } @@ -289,11 +288,11 @@ public void testNotificationOnLastRetryUnregistrationFailure() // Unregistration fails: isLastAMRetry is recalculated, this is ///reboot will stop service internally, we don't need to shutdown twice app.waitForServiceToStop(10000); - Assertions.assertFalse(app.isLastAMRetry()); + assertFalse(app.isLastAMRetry()); // Since it's not last retry, JobEndServlet didn't called - Assertions.assertEquals(0, JobEndServlet.calledTimes); - Assertions.assertNull(JobEndServlet.requestUri); - Assertions.assertNull(JobEndServlet.foundJobState); + assertEquals(0, JobEndServlet.calledTimes); + assertNull(JobEndServlet.requestUri); + assertNull(JobEndServlet.foundJobState); server.stop(); } @@ -316,8 +315,8 @@ public void testCustomNotifierClass() throws InterruptedException { this.notify(jobReport); final URL urlToNotify = CustomNotifier.urlToNotify; - Assertions.assertEquals("http://example.com?jobId=mock-Id&jobStatus=SUCCEEDED", - urlToNotify.toString()); + assertEquals("http://example.com?jobId=mock-Id&jobStatus=SUCCEEDED", + urlToNotify.toString()); } public static final class CustomNotifier implements CustomJobEndNotifier { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java index 28b56891586f0..9a817622b1674 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java @@ -23,7 +23,6 @@ import java.util.concurrent.CountDownLatch; import org.apache.hadoop.service.Service; -import org.junit.jupiter.api.Assertions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.v2.api.records.JobId; @@ -50,6 +49,8 @@ import org.apache.hadoop.yarn.event.Event; import org.junit.jupiter.api.Test; +import static org.junit.jupiter.api.Assertions.assertEquals; + /** * Tests the state machine with respect to Job/Task/TaskAttempt kill scenarios. * @@ -83,18 +84,15 @@ public void testKillJob() throws Exception { app.waitForState(Service.STATE.STOPPED); Map tasks = job.getTasks(); - Assertions.assertEquals(1, - tasks.size(), "No of tasks is not correct"); + assertEquals(1, tasks.size(), "No of tasks is not correct"); Task task = tasks.values().iterator().next(); - Assertions.assertEquals(TaskState.KILLED, - task.getReport().getTaskState(), "Task state not correct"); + assertEquals(TaskState.KILLED, task.getReport().getTaskState(), "Task state not correct"); Map attempts = tasks.values().iterator().next().getAttempts(); - Assertions.assertEquals(1, - attempts.size(), "No of attempts is not correct"); + assertEquals(1, attempts.size(), "No of attempts is not correct"); Iterator it = attempts.values().iterator(); - Assertions.assertEquals(TaskAttemptState.KILLED, - it.next().getReport().getTaskAttemptState(), "Attempt state not correct"); + assertEquals(TaskAttemptState.KILLED, it.next().getReport().getTaskAttemptState(), + "Attempt state not correct"); } @Test @@ -107,8 +105,7 @@ public void testKillTask() throws Exception { //wait and validate for Job to become RUNNING app.waitForInternalState((JobImpl) job, JobStateInternal.RUNNING); Map tasks = job.getTasks(); - Assertions.assertEquals(2, - tasks.size(), "No of tasks is not correct"); + assertEquals(2, tasks.size(), "No of tasks is not correct"); Iterator it = tasks.values().iterator(); Task task1 = it.next(); Task task2 = it.next(); @@ -126,23 +123,22 @@ public void testKillTask() throws Exception { //first Task is killed and second is Succeeded //Job is succeeded - Assertions.assertEquals(TaskState.KILLED, - task1.getReport().getTaskState(), "Task state not correct"); - Assertions.assertEquals(TaskState.SUCCEEDED, - task2.getReport().getTaskState(), "Task state not correct"); + assertEquals(TaskState.KILLED, task1.getReport().getTaskState(), + "Task state not correct"); + assertEquals(TaskState.SUCCEEDED, task2.getReport().getTaskState(), + "Task state not correct"); Map attempts = task1.getAttempts(); - Assertions.assertEquals(1, - attempts.size(), "No of attempts is not correct"); + assertEquals(1, attempts.size(), + "No of attempts is not correct"); Iterator iter = attempts.values().iterator(); - Assertions.assertEquals(TaskAttemptState.KILLED, - iter.next().getReport().getTaskAttemptState(), "Attempt state not correct"); + assertEquals(TaskAttemptState.KILLED, iter.next().getReport().getTaskAttemptState(), + "Attempt state not correct"); attempts = task2.getAttempts(); - Assertions.assertEquals(1, - attempts.size(), "No of attempts is not correct"); + assertEquals(1, attempts.size(), "No of attempts is not correct"); iter = attempts.values().iterator(); - Assertions.assertEquals(TaskAttemptState.SUCCEEDED, - iter.next().getReport().getTaskAttemptState(), "Attempt state not correct"); + assertEquals(TaskAttemptState.SUCCEEDED, iter.next().getReport().getTaskAttemptState(), + "Attempt state not correct"); } @Test @@ -194,7 +190,7 @@ public Dispatcher createDispatcher() { Job job = app.submit(new Configuration()); JobId jobId = app.getJobId(); app.waitForState(job, JobState.RUNNING); - Assertions.assertEquals(2, job.getTasks().size(), "Num tasks not correct"); + assertEquals(2, job.getTasks().size(), "Num tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task mapTask = it.next(); Task reduceTask = it.next(); @@ -232,7 +228,7 @@ public Dispatcher createDispatcher() { Job job = app.submit(new Configuration()); JobId jobId = app.getJobId(); app.waitForState(job, JobState.RUNNING); - Assertions.assertEquals(2, job.getTasks().size(), "Num tasks not correct"); + assertEquals(2, job.getTasks().size(), "Num tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task mapTask = it.next(); Task reduceTask = it.next(); @@ -280,7 +276,7 @@ public Dispatcher createDispatcher() { Job job = app.submit(new Configuration()); JobId jobId = app.getJobId(); app.waitForState(job, JobState.RUNNING); - Assertions.assertEquals(2, job.getTasks().size(), "Num tasks not correct"); + assertEquals(2, job.getTasks().size(), "Num tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task mapTask = it.next(); Task reduceTask = it.next(); @@ -370,8 +366,7 @@ public void testKillTaskAttempt() throws Exception { //wait and validate for Job to become RUNNING app.waitForState(job, JobState.RUNNING); Map tasks = job.getTasks(); - Assertions.assertEquals(2, - tasks.size(), "No of tasks is not correct"); + assertEquals(2, tasks.size(), "No of tasks is not correct"); Iterator it = tasks.values().iterator(); Task task1 = it.next(); Task task2 = it.next(); @@ -394,26 +389,24 @@ public void testKillTaskAttempt() throws Exception { //first Task will have two attempts 1st is killed, 2nd Succeeds //both Tasks and Job succeeds - Assertions.assertEquals(TaskState.SUCCEEDED, - task1.getReport().getTaskState(), "Task state not correct"); - Assertions.assertEquals(TaskState.SUCCEEDED, - task2.getReport().getTaskState(), "Task state not correct"); + assertEquals(TaskState.SUCCEEDED, task1.getReport().getTaskState(), + "Task state not correct"); + assertEquals(TaskState.SUCCEEDED, task2.getReport().getTaskState(), + "Task state not correct"); Map attempts = task1.getAttempts(); - Assertions.assertEquals(2, - attempts.size(), "No of attempts is not correct"); + assertEquals(2, attempts.size(), "No of attempts is not correct"); Iterator iter = attempts.values().iterator(); - Assertions.assertEquals(TaskAttemptState.KILLED, - iter.next().getReport().getTaskAttemptState(), "Attempt state not correct"); - Assertions.assertEquals(TaskAttemptState.SUCCEEDED, - iter.next().getReport().getTaskAttemptState(), "Attempt state not correct"); + assertEquals(TaskAttemptState.KILLED, iter.next().getReport().getTaskAttemptState(), + "Attempt state not correct"); + assertEquals(TaskAttemptState.SUCCEEDED, iter.next().getReport().getTaskAttemptState(), + "Attempt state not correct"); attempts = task2.getAttempts(); - Assertions.assertEquals(1, - attempts.size(), "No of attempts is not correct"); + assertEquals(1, attempts.size(), "No of attempts is not correct"); iter = attempts.values().iterator(); - Assertions.assertEquals(TaskAttemptState.SUCCEEDED, - iter.next().getReport().getTaskAttemptState(), "Attempt state not correct"); + assertEquals(TaskAttemptState.SUCCEEDED, iter.next().getReport().getTaskAttemptState(), + "Attempt state not correct"); } static class BlockingMRApp extends MRApp { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKillAMPreemptionPolicy.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKillAMPreemptionPolicy.java index afc632d47f382..62e016a734b5d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKillAMPreemptionPolicy.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKillAMPreemptionPolicy.java @@ -64,7 +64,7 @@ public void testKillAMPreemptPolicy() { when(mPctxt.getTaskAttempt(any(ContainerId.class))).thenReturn( MRBuilderUtils.newTaskAttemptId(MRBuilderUtils.newTaskId( MRBuilderUtils.newJobId(appId, 1), 1, TaskType.MAP), 0)); - List p = new ArrayList<>(); + List p = new ArrayList(); p.add(Container.newInstance(container, null, null, null, null, null)); when(mPctxt.getContainers(any(TaskType.class))).thenReturn(p); @@ -123,7 +123,7 @@ private PreemptionMessage getPreemptionMessage(boolean strictContract, boolean contract, final ContainerId container) { PreemptionMessage preemptionMessage = recordFactory .newRecordInstance(PreemptionMessage.class); - Set cntrs = new HashSet<>(); + Set cntrs = new HashSet(); PreemptionContainer preemptContainer = recordFactory .newRecordInstance(PreemptionContainer.class); preemptContainer.setId(container); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java index ab19eb3603762..1ccad031b4cd4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java @@ -18,6 +18,8 @@ package org.apache.hadoop.mapreduce.v2.app; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertSame; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -30,7 +32,6 @@ import java.util.function.Supplier; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.jupiter.api.Assertions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.MRJobConfig; @@ -69,7 +70,6 @@ import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.junit.jupiter.api.Test; -import org.mockito.Mockito; /** * Tests the state machine of MR App. @@ -83,7 +83,7 @@ public void testMapReduce() throws Exception { Job job = app.submit(new Configuration()); app.waitForState(job, JobState.SUCCEEDED); app.verifyCompleted(); - Assertions.assertEquals(System.getProperty("user.name"),job.getUserName()); + assertEquals(System.getProperty("user.name"),job.getUserName()); } @Test @@ -106,7 +106,7 @@ public void testCommitPending() throws Exception { MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true); Job job = app.submit(new Configuration()); app.waitForState(job, JobState.RUNNING); - Assertions.assertEquals(1, job.getTasks().size(), "Num tasks not correct"); + assertEquals(1, job.getTasks().size(), "Num tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task task = it.next(); app.waitForState(task, TaskState.RUNNING); @@ -151,7 +151,7 @@ public void testCompletedMapsForReduceSlowstart() throws Exception { Job job = app.submit(conf); app.waitForState(job, JobState.RUNNING); //all maps would be running - Assertions.assertEquals(3, job.getTasks().size(), "Num tasks not correct"); + assertEquals(3, job.getTasks().size(), "Num tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task mapTask1 = it.next(); Task mapTask2 = it.next(); @@ -170,7 +170,7 @@ public void testCompletedMapsForReduceSlowstart() throws Exception { app.waitForState(task2Attempt, TaskAttemptState.RUNNING); // reduces must be in NEW state - Assertions.assertEquals( + assertEquals( TaskState.NEW, reduceTask.getReport().getTaskState(), "Reduce Task state not correct"); //send the done signal to the 1st map task @@ -210,7 +210,7 @@ public void testUpdatedNodes() throws Exception { int runCount = 0; AsyncDispatcher dispatcher = new AsyncDispatcher(); dispatcher.init(new Configuration()); - Dispatcher disp = Mockito.spy(dispatcher); + Dispatcher disp = spy(dispatcher); MRApp app = new MRAppWithHistory(2, 2, false, this.getClass().getName(), true, ++runCount, disp); Configuration conf = new Configuration(); @@ -224,7 +224,7 @@ public void testUpdatedNodes() throws Exception { final Job job1 = app.submit(conf); app.waitForState(job1, JobState.RUNNING); - Assertions.assertEquals(4, job1.getTasks().size(), "Num tasks not correct"); + assertEquals(4, job1.getTasks().size(), "Num tasks not correct"); Iterator it = job1.getTasks().values().iterator(); Task mapTask1 = it.next(); Task mapTask2 = it.next(); @@ -239,7 +239,7 @@ public void testUpdatedNodes() throws Exception { .next(); NodeId node1 = task1Attempt.getNodeId(); NodeId node2 = task2Attempt.getNodeId(); - Assertions.assertEquals(node1, node2); + assertEquals(node1, node2); // send the done signal to the task app.getContext() @@ -268,7 +268,7 @@ public void testUpdatedNodes() throws Exception { TaskAttemptCompletionEvent[] events = job1.getTaskAttemptCompletionEvents (0, 100); - Assertions.assertEquals(2 + assertEquals(2 , events.length, "Expecting 2 completion events for success"); // send updated nodes info @@ -291,7 +291,7 @@ public void testUpdatedNodes() throws Exception { }, checkIntervalMillis, waitForMillis); events = job1.getTaskAttemptCompletionEvents(0, 100); - Assertions.assertEquals(4 + assertEquals(4 , events.length, "Expecting 2 more completion events for killed"); // 2 map task attempts which were killed above should be requested from // container allocator with the previous map task marked as failed. If @@ -329,7 +329,7 @@ public Boolean get() { }, checkIntervalMillis, waitForMillis); events = job1.getTaskAttemptCompletionEvents(0, 100); - Assertions.assertEquals(5 + assertEquals(5 , events.length, "Expecting 1 more completion events for success"); // Crash the app again. @@ -345,7 +345,7 @@ public Boolean get() { final Job job2 = app.submit(conf); app.waitForState(job2, JobState.RUNNING); - Assertions.assertEquals(4, job2.getTasks().size(), "No of tasks not correct"); + assertEquals(4, job2.getTasks().size(), "No of tasks not correct"); it = job2.getTasks().values().iterator(); mapTask1 = it.next(); mapTask2 = it.next(); @@ -363,7 +363,7 @@ public Boolean get() { }, checkIntervalMillis, waitForMillis); events = job2.getTaskAttemptCompletionEvents(0, 100); - Assertions.assertEquals( + assertEquals( 2 , events.length, "Expecting 2 completion events for killed & success of map1"); @@ -382,7 +382,7 @@ public Boolean get() { }, checkIntervalMillis, waitForMillis); events = job2.getTaskAttemptCompletionEvents(0, 100); - Assertions.assertEquals(3 + assertEquals(3 , events.length, "Expecting 1 more completion events for success"); app.waitForState(reduceTask1, TaskState.RUNNING); @@ -418,7 +418,7 @@ public Boolean get() { return events14.length == 5; }, checkIntervalMillis, waitForMillis); events = job2.getTaskAttemptCompletionEvents(0, 100); - Assertions.assertEquals( + assertEquals( 5, events.length, "Expecting 2 more completion events for reduce success"); // job succeeds @@ -457,7 +457,7 @@ public void testJobError() throws Exception { MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true); Job job = app.submit(new Configuration()); app.waitForState(job, JobState.RUNNING); - Assertions.assertEquals(1, job.getTasks().size(), "Num tasks not correct"); + assertEquals(1, job.getTasks().size(), "Num tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task task = it.next(); app.waitForState(task, TaskState.RUNNING); @@ -478,7 +478,7 @@ public void testJobSuccess() throws Exception { JobImpl job = (JobImpl) app.submit(new Configuration()); app.waitForInternalState(job, JobStateInternal.SUCCEEDED); // AM is not unregistered - Assertions.assertEquals(JobState.RUNNING, job.getState()); + assertEquals(JobState.RUNNING, job.getState()); // imitate that AM is unregistered app.successfullyUnregistered.set(true); app.waitForState(job, JobState.SUCCEEDED); @@ -490,7 +490,7 @@ public void testJobRebootNotLastRetryOnUnregistrationFailure() MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true); Job job = app.submit(new Configuration()); app.waitForState(job, JobState.RUNNING); - Assertions.assertEquals(1, job.getTasks().size(), "Num tasks not correct"); + assertEquals(1, job.getTasks().size(), "Num tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task task = it.next(); app.waitForState(task, TaskState.RUNNING); @@ -515,7 +515,7 @@ public void testJobRebootOnLastRetryOnUnregistrationFailure() Configuration conf = new Configuration(); Job job = app.submit(conf); app.waitForState(job, JobState.RUNNING); - Assertions.assertEquals(1, job.getTasks().size(), "Num tasks not correct"); + assertEquals(1, job.getTasks().size(), "Num tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task task = it.next(); app.waitForState(task, TaskState.RUNNING); @@ -609,7 +609,7 @@ public void handle(ContainerLauncherEvent event) { (TaskAttemptImpl) taskAttempts.iterator().next(); // Container from RM should pass through to the launcher. Container object // should be the same. - Assertions.assertSame(taskAttempt.container, containerObtainedByContainerLauncher); + assertSame(taskAttempt.container, containerObtainedByContainerLauncher); } private final class MRAppWithHistory extends MRApp { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppComponentDependencies.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppComponentDependencies.java index afa8cd565db49..f39503ecef486 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppComponentDependencies.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppComponentDependencies.java @@ -20,8 +20,6 @@ import java.io.IOException; -import org.junit.jupiter.api.Assertions; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent; import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler; @@ -38,6 +36,8 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import static org.junit.jupiter.api.Assertions.assertEquals; + public class TestMRAppComponentDependencies { @Test @@ -56,8 +56,8 @@ public void testComponentStopOrder() throws Exception { } // assert JobHistoryEventHandlerStopped and then clientServiceStopped - Assertions.assertEquals(1, app.JobHistoryEventHandlerStopped); - Assertions.assertEquals(2, app.clientServiceStopped); + assertEquals(1, app.JobHistoryEventHandlerStopped); + assertEquals(2, app.clientServiceStopped); } private final class TestMRApp extends MRApp { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java index 13436fa86ad32..e1c48d842a0d1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java @@ -19,6 +19,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.anyBoolean; @@ -40,7 +41,6 @@ import java.util.HashMap; import java.util.Map; -import org.junit.jupiter.api.Assertions; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; @@ -467,38 +467,36 @@ public void testMRAppMasterCredentials() throws Exception { // Now validate the task credentials Credentials appMasterCreds = appMaster.getCredentials(); - Assertions.assertNotNull(appMasterCreds); - Assertions.assertEquals(1, appMasterCreds.numberOfSecretKeys()); - Assertions.assertEquals(1, appMasterCreds.numberOfTokens()); + assertNotNull(appMasterCreds); + assertEquals(1, appMasterCreds.numberOfSecretKeys()); + assertEquals(1, appMasterCreds.numberOfTokens()); // Validate the tokens - app token should not be present Token usedToken = appMasterCreds.getToken(tokenAlias); - Assertions.assertNotNull(usedToken); - Assertions.assertEquals(storedToken, usedToken); + assertNotNull(usedToken); + assertEquals(storedToken, usedToken); // Validate the keys byte[] usedKey = appMasterCreds.getSecretKey(keyAlias); - Assertions.assertNotNull(usedKey); - Assertions.assertEquals("mySecretKey", new String(usedKey)); + assertNotNull(usedKey); + assertEquals("mySecretKey", new String(usedKey)); // The credentials should also be added to conf so that OuputCommitter can // access it - app token should not be present Credentials confCredentials = conf.getCredentials(); - Assertions.assertEquals(1, confCredentials.numberOfSecretKeys()); - Assertions.assertEquals(1, confCredentials.numberOfTokens()); - Assertions.assertEquals(storedToken, confCredentials.getToken(tokenAlias)); - Assertions.assertEquals("mySecretKey", - new String(confCredentials.getSecretKey(keyAlias))); + assertEquals(1, confCredentials.numberOfSecretKeys()); + assertEquals(1, confCredentials.numberOfTokens()); + assertEquals(storedToken, confCredentials.getToken(tokenAlias)); + assertEquals("mySecretKey", new String(confCredentials.getSecretKey(keyAlias))); // Verify the AM's ugi - app token should be present Credentials ugiCredentials = appMaster.getUgi().getCredentials(); - Assertions.assertEquals(1, ugiCredentials.numberOfSecretKeys()); - Assertions.assertEquals(2, ugiCredentials.numberOfTokens()); - Assertions.assertEquals(storedToken, ugiCredentials.getToken(tokenAlias)); - Assertions.assertEquals(appToken, ugiCredentials.getToken(appTokenService)); - Assertions.assertEquals("mySecretKey", - new String(ugiCredentials.getSecretKey(keyAlias))); + assertEquals(1, ugiCredentials.numberOfSecretKeys()); + assertEquals(2, ugiCredentials.numberOfTokens()); + assertEquals(storedToken, ugiCredentials.getToken(tokenAlias)); + assertEquals(appToken, ugiCredentials.getToken(appTokenService)); + assertEquals("mySecretKey", new String(ugiCredentials.getSecretKey(keyAlias))); } @@ -527,9 +525,9 @@ public void testMRAppMasterShutDownJob() throws Exception, doNothing().when(appMaster).serviceStop(); // Test normal shutdown. appMaster.shutDownJob(); - Assertions.assertTrue( + assertTrue( ExitUtil.terminateCalled(), "Expected shutDownJob to terminate."); - Assertions.assertEquals( + assertEquals( 0, ExitUtil.getFirstExitException().status, "Expected shutDownJob to exit with status code of 0."); // Test shutdown with exception. @@ -540,7 +538,7 @@ public void testMRAppMasterShutDownJob() throws Exception, appMaster.shutDownJob(); assertTrue( ExitUtil.getFirstExitException().getMessage().contains(msg), "Expected message from ExitUtil.ExitException to be " + msg); - Assertions.assertEquals( + assertEquals( 1, ExitUtil.getFirstExitException().status, "Expected shutDownJob to exit with status code of 1."); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java index 6fcc17aacf371..b4f271f607f09 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java @@ -18,6 +18,10 @@ package org.apache.hadoop.mapreduce.v2.app; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; @@ -26,8 +30,6 @@ import java.util.List; import java.util.concurrent.atomic.AtomicReference; -import org.junit.jupiter.api.Assertions; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.JobID; @@ -82,7 +84,7 @@ public void test() throws Exception { Configuration conf = new Configuration(); Job job = app.submit(conf); app.waitForState(job, JobState.RUNNING); - Assertions.assertEquals(1, job.getTasks().size(), "Num tasks not correct"); + assertEquals(1, job.getTasks().size(), "Num tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task task = it.next(); app.waitForState(task, TaskState.RUNNING); @@ -116,8 +118,7 @@ public void test() throws Exception { GetCountersRequest gcRequest = recordFactory.newRecordInstance(GetCountersRequest.class); gcRequest.setJobId(job.getID()); - Assertions.assertNotNull( - proxy.getCounters(gcRequest).getCounters(), "Counters is null"); + assertNotNull(proxy.getCounters(gcRequest).getCounters(), "Counters is null"); GetJobReportRequest gjrRequest = recordFactory.newRecordInstance(GetJobReportRequest.class); @@ -131,14 +132,14 @@ public void test() throws Exception { gtaceRequest.setJobId(job.getID()); gtaceRequest.setFromEventId(0); gtaceRequest.setMaxEvents(10); - Assertions.assertNotNull( - proxy.getTaskAttemptCompletionEvents(gtaceRequest).getCompletionEventList(), "TaskCompletionEvents is null"); + assertNotNull(proxy.getTaskAttemptCompletionEvents(gtaceRequest).getCompletionEventList(), + "TaskCompletionEvents is null"); GetDiagnosticsRequest gdRequest = recordFactory.newRecordInstance(GetDiagnosticsRequest.class); gdRequest.setTaskAttemptId(attempt.getID()); - Assertions.assertNotNull( - proxy.getDiagnostics(gdRequest).getDiagnosticsList(), "Diagnostics is null"); + assertNotNull(proxy.getDiagnostics(gdRequest).getDiagnosticsList(), + "Diagnostics is null"); GetTaskAttemptReportRequest gtarRequest = recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class); @@ -151,31 +152,29 @@ public void test() throws Exception { GetTaskReportRequest gtrRequest = recordFactory.newRecordInstance(GetTaskReportRequest.class); gtrRequest.setTaskId(task.getID()); - Assertions.assertNotNull( - proxy.getTaskReport(gtrRequest).getTaskReport(), "TaskReport is null"); + assertNotNull(proxy.getTaskReport(gtrRequest).getTaskReport(), + "TaskReport is null"); GetTaskReportsRequest gtreportsRequest = recordFactory.newRecordInstance(GetTaskReportsRequest.class); gtreportsRequest.setJobId(job.getID()); gtreportsRequest.setTaskType(TaskType.MAP); - Assertions.assertNotNull( - proxy.getTaskReports(gtreportsRequest).getTaskReportList(), "TaskReports for map is null"); + assertNotNull(proxy.getTaskReports(gtreportsRequest).getTaskReportList(), + "TaskReports for map is null"); gtreportsRequest = recordFactory.newRecordInstance(GetTaskReportsRequest.class); gtreportsRequest.setJobId(job.getID()); gtreportsRequest.setTaskType(TaskType.REDUCE); - Assertions.assertNotNull( - proxy.getTaskReports(gtreportsRequest).getTaskReportList(), "TaskReports for reduce is null"); + assertNotNull(proxy.getTaskReports(gtreportsRequest).getTaskReportList(), + "TaskReports for reduce is null"); List diag = proxy.getDiagnostics(gdRequest).getDiagnosticsList(); - Assertions.assertEquals(1 , diag.size(), "Num diagnostics not correct"); - Assertions.assertEquals("Diag 1 not correct", - diagnostic1, diag.get(0)); + assertEquals(1, diag.size(), "Num diagnostics not correct"); + assertEquals(diagnostic1, diag.get(0), "Diag 1 not correct"); TaskReport taskReport = proxy.getTaskReport(gtrRequest).getTaskReport(); - Assertions.assertEquals(1 -, taskReport.getDiagnosticsCount(), "Num diagnostics not correct"); + assertEquals(1, taskReport.getDiagnosticsCount(), "Num diagnostics not correct"); //send the done signal to the task app.getContext().getEventHandler().handle( @@ -207,7 +206,7 @@ public void testViewAclOnlyCannotModify() throws Exception { conf.set(MRJobConfig.JOB_ACL_VIEW_JOB, "viewonlyuser"); Job job = app.submit(conf); app.waitForState(job, JobState.RUNNING); - Assertions.assertEquals(1, job.getTasks().size(), "Num tasks not correct"); + assertEquals(1, job.getTasks().size(), "Num tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task task = it.next(); app.waitForState(task, TaskState.RUNNING); @@ -217,10 +216,8 @@ public void testViewAclOnlyCannotModify() throws Exception { UserGroupInformation viewOnlyUser = UserGroupInformation.createUserForTesting( "viewonlyuser", new String[] {}); - Assertions.assertTrue( - job.checkAccess(viewOnlyUser, JobACL.VIEW_JOB), "viewonlyuser cannot view job"); - Assertions.assertFalse( - job.checkAccess(viewOnlyUser, JobACL.MODIFY_JOB), "viewonlyuser can modify job"); + assertTrue(job.checkAccess(viewOnlyUser, JobACL.VIEW_JOB), "viewonlyuser cannot view job"); + assertFalse(job.checkAccess(viewOnlyUser, JobACL.MODIFY_JOB), "viewonlyuser can modify job"); MRClientProtocol client = viewOnlyUser.doAs( (PrivilegedExceptionAction) () -> { YarnRPC rpc = YarnRPC.create(conf); @@ -270,29 +267,27 @@ public void testViewAclOnlyCannotModify() throws Exception { } private void verifyJobReport(JobReport jr) { - Assertions.assertNotNull(jr, "JobReport is null"); + assertNotNull(jr, "JobReport is null"); List amInfos = jr.getAMInfos(); - Assertions.assertEquals(1, amInfos.size()); - Assertions.assertEquals(JobState.RUNNING, jr.getJobState()); + assertEquals(1, amInfos.size()); + assertEquals(JobState.RUNNING, jr.getJobState()); AMInfo amInfo = amInfos.get(0); - Assertions.assertEquals(MRApp.NM_HOST, amInfo.getNodeManagerHost()); - Assertions.assertEquals(MRApp.NM_PORT, amInfo.getNodeManagerPort()); - Assertions.assertEquals(MRApp.NM_HTTP_PORT, amInfo.getNodeManagerHttpPort()); - Assertions.assertEquals(1, amInfo.getAppAttemptId().getAttemptId()); - Assertions.assertEquals(1, amInfo.getContainerId().getApplicationAttemptId() - .getAttemptId()); - Assertions.assertTrue(amInfo.getStartTime() > 0); - Assertions.assertFalse(jr.isUber()); + assertEquals(MRApp.NM_HOST, amInfo.getNodeManagerHost()); + assertEquals(MRApp.NM_PORT, amInfo.getNodeManagerPort()); + assertEquals(MRApp.NM_HTTP_PORT, amInfo.getNodeManagerHttpPort()); + assertEquals(1, amInfo.getAppAttemptId().getAttemptId()); + assertEquals(1, amInfo.getContainerId().getApplicationAttemptId().getAttemptId()); + assertTrue(amInfo.getStartTime() > 0); + assertFalse(jr.isUber()); } private void verifyTaskAttemptReport(TaskAttemptReport tar) { - Assertions.assertEquals(TaskAttemptState.RUNNING, tar.getTaskAttemptState()); - Assertions.assertNotNull(tar, "TaskAttemptReport is null"); - Assertions.assertEquals(MRApp.NM_HOST, tar.getNodeManagerHost()); - Assertions.assertEquals(MRApp.NM_PORT, tar.getNodeManagerPort()); - Assertions.assertEquals(MRApp.NM_HTTP_PORT, tar.getNodeManagerHttpPort()); - Assertions.assertEquals(1, tar.getContainerId().getApplicationAttemptId() - .getAttemptId()); + assertEquals(TaskAttemptState.RUNNING, tar.getTaskAttemptState()); + assertNotNull(tar, "TaskAttemptReport is null"); + assertEquals(MRApp.NM_HOST, tar.getNodeManagerHost()); + assertEquals(MRApp.NM_PORT, tar.getNodeManagerPort()); + assertEquals(MRApp.NM_HTTP_PORT, tar.getNodeManagerHttpPort()); + assertEquals(1, tar.getContainerId().getApplicationAttemptId().getAttemptId()); } class MRAppWithClientService extends MRApp { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java index c2e55ff47df59..5b443549c830e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java @@ -43,7 +43,6 @@ import org.apache.hadoop.mapreduce.util.MRJobConfUtil; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptFailEvent; -import org.junit.jupiter.api.Assertions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; @@ -160,8 +159,7 @@ public void testCrashed() throws Exception { app.waitForState(job, JobState.RUNNING); long jobStartTime = job.getReport().getStartTime(); //all maps would be running - Assertions.assertEquals( - 3, job.getTasks().size(), "No of tasks not correct"); + assertEquals(3, job.getTasks().size(), "No of tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task mapTask1 = it.next(); Task mapTask2 = it.next(); @@ -194,7 +192,7 @@ public void testCrashed() throws Exception { Thread.sleep(2000); LOG.info("Waiting for next attempt to start"); } - Assertions.assertEquals(2, mapTask1.getAttempts().size()); + assertEquals(2, mapTask1.getAttempts().size()); Iterator itr = mapTask1.getAttempts().values().iterator(); itr.next(); TaskAttempt task1Attempt2 = itr.next(); @@ -215,7 +213,7 @@ public void testCrashed() throws Exception { Thread.sleep(2000); LOG.info("Waiting for next attempt to start"); } - Assertions.assertEquals(3, mapTask1.getAttempts().size()); + assertEquals(3, mapTask1.getAttempts().size()); itr = mapTask1.getAttempts().values().iterator(); itr.next(); itr.next(); @@ -236,7 +234,7 @@ public void testCrashed() throws Exception { Thread.sleep(2000); LOG.info("Waiting for next attempt to start"); } - Assertions.assertEquals(4, mapTask1.getAttempts().size()); + assertEquals(4, mapTask1.getAttempts().size()); itr = mapTask1.getAttempts().values().iterator(); itr.next(); itr.next(); @@ -274,8 +272,7 @@ public void testCrashed() throws Exception { job = app.submit(conf); app.waitForState(job, JobState.RUNNING); //all maps would be running - Assertions.assertEquals( - 3, job.getTasks().size(), "No of tasks not correct"); + assertEquals(3, job.getTasks().size(), "No of tasks not correct"); it = job.getTasks().values().iterator(); mapTask1 = it.next(); mapTask2 = it.next(); @@ -310,30 +307,26 @@ public void testCrashed() throws Exception { app.waitForState(job, JobState.SUCCEEDED); app.verifyCompleted(); - Assertions.assertEquals( - jobStartTime, job.getReport().getStartTime(), "Job Start time not correct"); - Assertions.assertEquals( - task1StartTime, mapTask1.getReport().getStartTime(), "Task Start time not correct"); - Assertions.assertEquals( - task1FinishTime, mapTask1.getReport().getFinishTime(), "Task Finish time not correct"); - Assertions.assertEquals(2, job.getAMInfos().size()); + assertEquals(jobStartTime, job.getReport().getStartTime(), "Job Start time not correct"); + assertEquals(task1StartTime, mapTask1.getReport().getStartTime(), + "Task Start time not correct"); + assertEquals(task1FinishTime, mapTask1.getReport().getFinishTime(), + "Task Finish time not correct"); + assertEquals(2, job.getAMInfos().size()); int attemptNum = 1; // Verify AMInfo for (AMInfo amInfo : job.getAMInfos()) { - Assertions.assertEquals(attemptNum++, amInfo.getAppAttemptId() - .getAttemptId()); - Assertions.assertEquals(amInfo.getAppAttemptId(), amInfo.getContainerId() - .getApplicationAttemptId()); - Assertions.assertEquals(MRApp.NM_HOST, amInfo.getNodeManagerHost()); - Assertions.assertEquals(MRApp.NM_PORT, amInfo.getNodeManagerPort()); - Assertions.assertEquals(MRApp.NM_HTTP_PORT, amInfo.getNodeManagerHttpPort()); + assertEquals(attemptNum++, amInfo.getAppAttemptId().getAttemptId()); + assertEquals(amInfo.getAppAttemptId(), amInfo.getContainerId().getApplicationAttemptId()); + assertEquals(MRApp.NM_HOST, amInfo.getNodeManagerHost()); + assertEquals(MRApp.NM_PORT, amInfo.getNodeManagerPort()); + assertEquals(MRApp.NM_HTTP_PORT, amInfo.getNodeManagerHttpPort()); } long am1StartTimeReal = job.getAMInfos().get(0).getStartTime(); long am2StartTimeReal = job.getAMInfos().get(1).getStartTime(); - Assertions.assertTrue(am1StartTimeReal >= am1StartTimeEst - && am1StartTimeReal <= am2StartTimeEst); - Assertions.assertTrue(am2StartTimeReal >= am2StartTimeEst - && am2StartTimeReal <= System.currentTimeMillis()); + assertTrue(am1StartTimeReal >= am1StartTimeEst && am1StartTimeReal <= am2StartTimeEst); + assertTrue(am2StartTimeReal >= am2StartTimeEst && + am2StartTimeReal <= System.currentTimeMillis()); // TODO Add verification of additional data from jobHistory - whatever was // available in the failed attempt should be available here } @@ -369,7 +362,7 @@ public void testCrashOfMapsOnlyJob() throws Exception { app.waitForState(job, JobState.RUNNING); // all maps would be running - Assertions.assertEquals(3, job.getTasks().size(), "No of tasks not correct"); + assertEquals(3, job.getTasks().size(), "No of tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task mapTask1 = it.next(); Task mapTask2 = it.next(); @@ -427,7 +420,7 @@ public void testCrashOfMapsOnlyJob() throws Exception { job = app.submit(conf); app.waitForState(job, JobState.RUNNING); - Assertions.assertEquals(3, job.getTasks().size(), "No of tasks not correct"); + assertEquals(3, job.getTasks().size(), "No of tasks not correct"); it = job.getTasks().values().iterator(); mapTask1 = it.next(); mapTask2 = it.next(); @@ -514,7 +507,7 @@ public void testRecoverySuccessUsingCustomOutputCommitter() throws Exception { app.waitForState(job, JobState.RUNNING); // all maps would be running - Assertions.assertEquals(3, job.getTasks().size(), "No of tasks not correct"); + assertEquals(3, job.getTasks().size(), "No of tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task mapTask1 = it.next(); Task mapTask2 = it.next(); @@ -573,7 +566,7 @@ public void testRecoverySuccessUsingCustomOutputCommitter() throws Exception { job = app.submit(conf); app.waitForState(job, JobState.RUNNING); - Assertions.assertEquals(3, job.getTasks().size(), "No of tasks not correct"); + assertEquals(3, job.getTasks().size(), "No of tasks not correct"); it = job.getTasks().values().iterator(); mapTask1 = it.next(); mapTask2 = it.next(); @@ -692,7 +685,7 @@ public void testRecoveryFailsUsingCustomOutputCommitter() throws Exception { app.waitForState(job, JobState.RUNNING); // all maps would be running - Assertions.assertEquals(3, job.getTasks().size(), "No of tasks not correct"); + assertEquals(3, job.getTasks().size(), "No of tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task mapTask1 = it.next(); Task mapTask2 = it.next(); @@ -751,7 +744,7 @@ public void testRecoveryFailsUsingCustomOutputCommitter() throws Exception { job = app.submit(conf); app.waitForState(job, JobState.RUNNING); - Assertions.assertEquals(3, job.getTasks().size(), "No of tasks not correct"); + assertEquals(3, job.getTasks().size(), "No of tasks not correct"); it = job.getTasks().values().iterator(); mapTask1 = it.next(); mapTask2 = it.next(); @@ -811,8 +804,7 @@ public void testMultipleCrashes() throws Exception { Job job = app.submit(conf); app.waitForState(job, JobState.RUNNING); //all maps would be running - Assertions.assertEquals( - 3, job.getTasks().size(), "No of tasks not correct"); + assertEquals(3, job.getTasks().size(), "No of tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task mapTask1 = it.next(); Task mapTask2 = it.next(); @@ -831,8 +823,8 @@ public void testMultipleCrashes() throws Exception { app.waitForState(task2Attempt, TaskAttemptState.RUNNING); // reduces must be in NEW state - Assertions.assertEquals( - TaskState.RUNNING, reduceTask.getReport().getTaskState(), "Reduce Task state not correct"); + assertEquals(TaskState.RUNNING, reduceTask.getReport().getTaskState(), + "Reduce Task state not correct"); //send the done signal to the 1st map app.getContext().getEventHandler().handle( @@ -860,8 +852,7 @@ public void testMultipleCrashes() throws Exception { job = app.submit(conf); app.waitForState(job, JobState.RUNNING); //all maps would be running - Assertions.assertEquals( - 3, job.getTasks().size(), "No of tasks not correct"); + assertEquals(3, job.getTasks().size(), "No of tasks not correct"); it = job.getTasks().values().iterator(); mapTask1 = it.next(); mapTask2 = it.next(); @@ -903,8 +894,7 @@ public void testMultipleCrashes() throws Exception { job = app.submit(conf); app.waitForState(job, JobState.RUNNING); //all maps would be running - Assertions.assertEquals( - 3, job.getTasks().size(), "No of tasks not correct"); + assertEquals(3, job.getTasks().size(), "No of tasks not correct"); it = job.getTasks().values().iterator(); mapTask1 = it.next(); mapTask2 = it.next(); @@ -938,7 +928,7 @@ public void testOutputRecovery() throws Exception { conf.set(FileOutputFormat.OUTDIR, outputDir.toString()); Job job = app.submit(conf); app.waitForState(job, JobState.RUNNING); - Assertions.assertEquals( + assertEquals( 3, job.getTasks().size(), "No of tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task mapTask1 = it.next(); @@ -964,7 +954,7 @@ public void testOutputRecovery() throws Exception { app.waitForState(mapTask1, TaskState.SUCCEEDED); // Verify the shuffle-port - Assertions.assertEquals(5467, task1Attempt1.getShufflePort()); + assertEquals(5467, task1Attempt1.getShufflePort()); app.waitForState(reduceTask1, TaskState.RUNNING); TaskAttempt reduce1Attempt1 = reduceTask1.getAttempts().values().iterator().next(); @@ -996,8 +986,7 @@ public void testOutputRecovery() throws Exception { conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false); job = app.submit(conf); app.waitForState(job, JobState.RUNNING); - Assertions.assertEquals( - 3, job.getTasks().size(), "No of tasks not correct"); + assertEquals(3, job.getTasks().size(), "No of tasks not correct"); it = job.getTasks().values().iterator(); mapTask1 = it.next(); reduceTask1 = it.next(); @@ -1008,7 +997,7 @@ public void testOutputRecovery() throws Exception { // Verify the shuffle-port after recovery task1Attempt1 = mapTask1.getAttempts().values().iterator().next(); - Assertions.assertEquals(5467, task1Attempt1.getShufflePort()); + assertEquals(5467, task1Attempt1.getShufflePort()); // first reduce will be recovered, no need to send done app.waitForState(reduceTask1, TaskState.SUCCEEDED); @@ -1049,7 +1038,7 @@ public void testPreviousJobOutputCleanedWhenNoRecovery() throws Exception { conf.set(FileOutputFormat.OUTDIR, outputDir.toString()); Job job = app.submit(conf); app.waitForState(job, JobState.RUNNING); - Assertions.assertEquals(3, job.getTasks().size(), "No of tasks not correct"); + assertEquals(3, job.getTasks().size(), "No of tasks not correct"); //stop the app before the job completes. app.stop(); app.close(); @@ -1059,11 +1048,10 @@ public void testPreviousJobOutputCleanedWhenNoRecovery() throws Exception { ++runCount); job = app.submit(conf); app.waitForState(job, JobState.RUNNING); - Assertions.assertEquals(3, job.getTasks().size(), "No of tasks not correct"); + assertEquals(3, job.getTasks().size(), "No of tasks not correct"); TestFileOutputCommitter committer = ( TestFileOutputCommitter) app.getCommitter(); - assertTrue( - committer.isAbortJobCalled(), "commiter.abortJob() has not been called"); + assertTrue(committer.isAbortJobCalled(), "commiter.abortJob() has not been called"); app.close(); } @@ -1084,7 +1072,7 @@ public void testPreviousJobIsNotCleanedWhenRecovery() conf.set(FileOutputFormat.OUTDIR, outputDir.toString()); Job job = app.submit(conf); app.waitForState(job, JobState.RUNNING); - Assertions.assertEquals(3, job.getTasks().size(), "No of tasks not correct"); + assertEquals(3, job.getTasks().size(), "No of tasks not correct"); //stop the app before the job completes. app.stop(); app.close(); @@ -1094,7 +1082,7 @@ public void testPreviousJobIsNotCleanedWhenRecovery() ++runCount); job = app.submit(conf); app.waitForState(job, JobState.RUNNING); - Assertions.assertEquals(3, job.getTasks().size(), "No of tasks not correct"); + assertEquals(3, job.getTasks().size(), "No of tasks not correct"); TestFileOutputCommitter committer = ( TestFileOutputCommitter) app.getCommitter(); assertFalse( @@ -1114,8 +1102,7 @@ public void testOutputRecoveryMapsOnly() throws Exception { conf.set(FileOutputFormat.OUTDIR, outputDir.toString()); Job job = app.submit(conf); app.waitForState(job, JobState.RUNNING); - Assertions.assertEquals( - 3, job.getTasks().size(), "No of tasks not correct"); + assertEquals(3, job.getTasks().size(), "No of tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task mapTask1 = it.next(); Task mapTask2 = it.next(); @@ -1145,7 +1132,7 @@ public void testOutputRecoveryMapsOnly() throws Exception { app.waitForState(mapTask1, TaskState.SUCCEEDED); // Verify the shuffle-port - Assertions.assertEquals(5467, task1Attempt1.getShufflePort()); + assertEquals(5467, task1Attempt1.getShufflePort()); //stop the app before the job completes. app.stop(); @@ -1162,8 +1149,7 @@ public void testOutputRecoveryMapsOnly() throws Exception { conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false); job = app.submit(conf); app.waitForState(job, JobState.RUNNING); - Assertions.assertEquals( - 3, job.getTasks().size(), "No of tasks not correct"); + assertEquals(3, job.getTasks().size(), "No of tasks not correct"); it = job.getTasks().values().iterator(); mapTask1 = it.next(); mapTask2 = it.next(); @@ -1174,7 +1160,7 @@ public void testOutputRecoveryMapsOnly() throws Exception { // Verify the shuffle-port after recovery task1Attempt1 = mapTask1.getAttempts().values().iterator().next(); - Assertions.assertEquals(5467, task1Attempt1.getShufflePort()); + assertEquals(5467, task1Attempt1.getShufflePort()); app.waitForState(mapTask2, TaskState.RUNNING); @@ -1195,7 +1181,7 @@ public void testOutputRecoveryMapsOnly() throws Exception { app.waitForState(mapTask2, TaskState.SUCCEEDED); // Verify the shuffle-port - Assertions.assertEquals(5467, task2Attempt1.getShufflePort()); + assertEquals(5467, task2Attempt1.getShufflePort()); app.waitForState(reduceTask1, TaskState.RUNNING); TaskAttempt reduce1Attempt1 = reduceTask1.getAttempts().values().iterator().next(); @@ -1229,8 +1215,7 @@ public void testRecoveryWithOldCommiter() throws Exception { conf.set(FileOutputFormat.OUTDIR, outputDir.toString()); Job job = app.submit(conf); app.waitForState(job, JobState.RUNNING); - Assertions.assertEquals( - 3, job.getTasks().size(), "No of tasks not correct"); + assertEquals(3, job.getTasks().size(), "No of tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task mapTask1 = it.next(); Task reduceTask1 = it.next(); @@ -1255,7 +1240,7 @@ public void testRecoveryWithOldCommiter() throws Exception { app.waitForState(mapTask1, TaskState.SUCCEEDED); // Verify the shuffle-port - Assertions.assertEquals(5467, task1Attempt1.getShufflePort()); + assertEquals(5467, task1Attempt1.getShufflePort()); app.waitForState(reduceTask1, TaskState.RUNNING); TaskAttempt reduce1Attempt1 = reduceTask1.getAttempts().values().iterator().next(); @@ -1287,8 +1272,7 @@ public void testRecoveryWithOldCommiter() throws Exception { conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false); job = app.submit(conf); app.waitForState(job, JobState.RUNNING); - Assertions.assertEquals( - 3, job.getTasks().size(), "No of tasks not correct"); + assertEquals(3, job.getTasks().size(), "No of tasks not correct"); it = job.getTasks().values().iterator(); mapTask1 = it.next(); reduceTask1 = it.next(); @@ -1299,7 +1283,7 @@ public void testRecoveryWithOldCommiter() throws Exception { // Verify the shuffle-port after recovery task1Attempt1 = mapTask1.getAttempts().values().iterator().next(); - Assertions.assertEquals(5467, task1Attempt1.getShufflePort()); + assertEquals(5467, task1Attempt1.getShufflePort()); // first reduce will be recovered, no need to send done app.waitForState(reduceTask1, TaskState.SUCCEEDED); @@ -1349,8 +1333,7 @@ public void testSpeculative() throws Exception { app.waitForState(job, JobState.RUNNING); long jobStartTime = job.getReport().getStartTime(); //all maps would be running - Assertions.assertEquals( - 3, job.getTasks().size(), "No of tasks not correct"); + assertEquals(3, job.getTasks().size(), "No of tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task mapTask1 = it.next(); @@ -1423,7 +1406,7 @@ public void testSpeculative() throws Exception { job = app.submit(conf); app.waitForState(job, JobState.RUNNING); //all maps would be running - Assertions.assertEquals( + assertEquals( 3, job.getTasks().size(), "No of tasks not correct"); it = job.getTasks().values().iterator(); mapTask1 = it.next(); @@ -1460,30 +1443,26 @@ public void testSpeculative() throws Exception { app.waitForState(job, JobState.SUCCEEDED); app.verifyCompleted(); - Assertions.assertEquals( - jobStartTime, job.getReport().getStartTime(), "Job Start time not correct"); - Assertions.assertEquals( - task1StartTime, mapTask1.getReport().getStartTime(), "Task Start time not correct"); - Assertions.assertEquals( - task1FinishTime, mapTask1.getReport().getFinishTime(), "Task Finish time not correct"); - Assertions.assertEquals(2, job.getAMInfos().size()); + assertEquals(jobStartTime, job.getReport().getStartTime(), "Job Start time not correct"); + assertEquals(task1StartTime, mapTask1.getReport().getStartTime(), + "Task Start time not correct"); + assertEquals(task1FinishTime, mapTask1.getReport().getFinishTime(), + "Task Finish time not correct"); + assertEquals(2, job.getAMInfos().size()); int attemptNum = 1; // Verify AMInfo for (AMInfo amInfo : job.getAMInfos()) { - Assertions.assertEquals(attemptNum++, amInfo.getAppAttemptId() - .getAttemptId()); - Assertions.assertEquals(amInfo.getAppAttemptId(), amInfo.getContainerId() - .getApplicationAttemptId()); - Assertions.assertEquals(MRApp.NM_HOST, amInfo.getNodeManagerHost()); - Assertions.assertEquals(MRApp.NM_PORT, amInfo.getNodeManagerPort()); - Assertions.assertEquals(MRApp.NM_HTTP_PORT, amInfo.getNodeManagerHttpPort()); + assertEquals(attemptNum++, amInfo.getAppAttemptId().getAttemptId()); + assertEquals(amInfo.getAppAttemptId(), amInfo.getContainerId().getApplicationAttemptId()); + assertEquals(MRApp.NM_HOST, amInfo.getNodeManagerHost()); + assertEquals(MRApp.NM_PORT, amInfo.getNodeManagerPort()); + assertEquals(MRApp.NM_HTTP_PORT, amInfo.getNodeManagerHttpPort()); } long am1StartTimeReal = job.getAMInfos().get(0).getStartTime(); long am2StartTimeReal = job.getAMInfos().get(1).getStartTime(); - Assertions.assertTrue(am1StartTimeReal >= am1StartTimeEst - && am1StartTimeReal <= am2StartTimeEst); - Assertions.assertTrue(am2StartTimeReal >= am2StartTimeEst - && am2StartTimeReal <= System.currentTimeMillis()); + assertTrue(am1StartTimeReal >= am1StartTimeEst && am1StartTimeReal <= am2StartTimeEst); + assertTrue(am2StartTimeReal >= am2StartTimeEst && + am2StartTimeReal <= System.currentTimeMillis()); } @@ -1502,8 +1481,7 @@ public void testRecoveryWithoutShuffleSecret() throws Exception { Job job = app.submit(conf); app.waitForState(job, JobState.RUNNING); //all maps would be running - Assertions.assertEquals( - 3, job.getTasks().size(), "No of tasks not correct"); + assertEquals(3, job.getTasks().size(), "No of tasks not correct"); Iterator it = job.getTasks().values().iterator(); Task mapTask1 = it.next(); Task mapTask2 = it.next(); @@ -1549,7 +1527,7 @@ public void testRecoveryWithoutShuffleSecret() throws Exception { job = app.submit(conf); app.waitForState(job, JobState.RUNNING); //all maps would be running - Assertions.assertEquals( + assertEquals( 3, job.getTasks().size(), "No of tasks not correct"); it = job.getTasks().values().iterator(); mapTask1 = it.next(); @@ -1883,12 +1861,11 @@ private void recoveryChecker(MapTaskImpl checkTask, TaskState finalState, Map recoveredAttempts = checkTask.getAttempts(); - assertEquals( - finalAttemptStates.size(), recoveredAttempts.size(), "Expected Number of Task Attempts"); + assertEquals(finalAttemptStates.size(), recoveredAttempts.size(), + "Expected Number of Task Attempts"); for (TaskAttemptID taID : finalAttemptStates.keySet()) { - assertEquals( - finalAttemptStates.get(taID) -, recoveredAttempts.get(TypeConverter.toYarn(taID)).getState(), "Expected Task Attempt State"); + assertEquals(finalAttemptStates.get(taID), + recoveredAttempts.get(TypeConverter.toYarn(taID)).getState(), "Expected Task Attempt State"); } Iterator ie = arg.getAllValues().iterator(); @@ -1936,12 +1913,10 @@ private void recoveryChecker(MapTaskImpl checkTask, TaskState finalState, } } assertTrue(jobTaskEventReceived || (finalState == TaskState.RUNNING)); - assertEquals( - 0, expectedJobHistoryEvents.size(), "Did not process all expected JobHistoryEvents"); - assertEquals( - expectedMapLaunches, totalLaunchedMaps, "Expected Map Launches"); - assertEquals( - expectedFailedMaps, totalFailedMaps, "Expected Failed Maps"); + assertEquals(0, expectedJobHistoryEvents.size(), + "Did not process all expected JobHistoryEvents"); + assertEquals(expectedMapLaunches, totalLaunchedMaps, "Expected Map Launches"); + assertEquals(expectedFailedMaps, totalFailedMaps, "Expected Failed Maps"); } private MapTaskImpl getMockMapTask(long clusterTimestamp, EventHandler eh) { @@ -2153,4 +2128,4 @@ public static void main(String[] arg) throws Exception { test.testRecoveryTaskSuccessAllAttemptsSucceed(); test.testRecoveryAllAttemptsKilled(); } -} +} \ No newline at end of file diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java index ed812298f3173..f8f7c2092f409 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java @@ -18,6 +18,7 @@ package org.apache.hadoop.mapreduce.v2.app; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; @@ -62,7 +63,6 @@ import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -136,7 +136,7 @@ public void testDeletionofStaging() throws IOException { JobId jobid = recordFactory.newRecordInstance(JobId.class); jobid.setAppId(appId); ContainerAllocator mockAlloc = mock(ContainerAllocator.class); - Assertions.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1); + assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1); MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc, JobStateInternal.RUNNING, MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS); appMaster.init(conf); @@ -160,7 +160,7 @@ public void testNoDeletionofStagingOnReboot() throws IOException { 0); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1); ContainerAllocator mockAlloc = mock(ContainerAllocator.class); - Assertions.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1); + assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1); MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc, JobStateInternal.REBOOT, MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS); appMaster.init(conf); @@ -250,8 +250,8 @@ public void testDeletionofStagingOnKillLastTry() throws IOException { MRAppMaster.MRAppMasterShutdownHook hook = new MRAppMaster.MRAppMasterShutdownHook(appMaster); hook.run(); - assertTrue( - appMaster.isInState(Service.STATE.STOPPED), "MRAppMaster isn't stopped"); + assertTrue(appMaster.isInState(Service.STATE.STOPPED), + "MRAppMaster isn't stopped"); verify(fs).delete(stagingJobPath, true); } @@ -273,7 +273,7 @@ public void testByPreserveFailedStaging() throws IOException { JobId jobid = recordFactory.newRecordInstance(JobId.class); jobid.setAppId(appId); ContainerAllocator mockAlloc = mock(ContainerAllocator.class); - Assertions.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1); + assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1); MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc, JobStateInternal.FAILED, MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS); appMaster.init(conf); @@ -301,7 +301,7 @@ public void testPreservePatternMatchedStaging() throws IOException { JobId jobid = recordFactory.newRecordInstance(JobId.class); jobid.setAppId(appId); ContainerAllocator mockAlloc = mock(ContainerAllocator.class); - Assertions.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1); + assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1); MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc, JobStateInternal.RUNNING, MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS); appMaster.init(conf); @@ -327,7 +327,7 @@ public void testNotPreserveNotPatternMatchedStaging() throws IOException { JobId jobid = recordFactory.newRecordInstance(JobId.class); jobid.setAppId(appId); ContainerAllocator mockAlloc = mock(ContainerAllocator.class); - Assertions.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1); + assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1); MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc, JobStateInternal.RUNNING, MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS); appMaster.init(conf); @@ -358,7 +358,7 @@ public void testPreservePatternMatchedAndFailedStaging() throws IOException { JobId jobid = recordFactory.newRecordInstance(JobId.class); jobid.setAppId(appId); ContainerAllocator mockAlloc = mock(ContainerAllocator.class); - Assertions.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1); + assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1); MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc, JobStateInternal.RUNNING, MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS); appMaster.init(conf); @@ -418,7 +418,8 @@ protected Job createJob(Configuration conf, JobStateInternal forcedState, JobID jobID = JobID.forName("job_1234567890000_0001"); JobId jobId = TypeConverter.toYarn(jobID); when(jobImpl.getID()).thenReturn(jobId); - getContext().getAllJobs().put(jobImpl.getID(), jobImpl); + ((AppContext) getContext()) + .getAllJobs().put(jobImpl.getID(), jobImpl); return jobImpl; } @@ -519,7 +520,7 @@ protected Job createJob(Configuration conf, JobStateInternal forcedState, getCommitter(), isNewApiCommitter(), currentUser.getUserName(), getContext(), forcedState, diagnostic); - getContext().getAllJobs().put(newJob.getID(), newJob); + ((AppContext) getContext()).getAllJobs().put(newJob.getID(), newJob); getDispatcher().register(JobFinishEvent.Type.class, createJobFinishEventHandler()); @@ -601,7 +602,7 @@ public void testStagingCleanupOrder() throws Exception { } // assert ContainerAllocatorStopped and then tagingDirCleanedup - Assertions.assertEquals(1, app.ContainerAllocatorStopped); - Assertions.assertEquals(2, app.stagingDirCleanedup); + assertEquals(1, app.ContainerAllocatorStopped); + assertEquals(2, app.stagingDirCleanedup); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestTaskHeartbeatHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestTaskHeartbeatHandler.java index b0bfbe5c303da..d36b6d7d8ba1b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestTaskHeartbeatHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestTaskHeartbeatHandler.java @@ -18,7 +18,9 @@ package org.apache.hadoop.mapreduce.v2.app; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; @@ -39,7 +41,6 @@ import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.ControlledClock; import org.apache.hadoop.yarn.util.SystemClock; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import java.util.Map; @@ -213,11 +214,11 @@ public void testTaskUnregistered() throws Exception { JobId jobId = MRBuilderUtils.newJobId(appId, 4); TaskId tid = MRBuilderUtils.newTaskId(jobId, 3, TaskType.MAP); final TaskAttemptId taid = MRBuilderUtils.newTaskAttemptId(tid, 2); - Assertions.assertFalse(hb.hasRecentlyUnregistered(taid)); + assertFalse(hb.hasRecentlyUnregistered(taid)); hb.register(taid); - Assertions.assertFalse(hb.hasRecentlyUnregistered(taid)); + assertFalse(hb.hasRecentlyUnregistered(taid)); hb.unregister(taid); - Assertions.assertTrue(hb.hasRecentlyUnregistered(taid)); + assertTrue(hb.hasRecentlyUnregistered(taid)); long unregisterTimeout = conf.getLong(MRJobConfig.TASK_EXIT_TIMEOUT, MRJobConfig.TASK_EXIT_TIMEOUT_DEFAULT); clock.setTime(unregisterTimeout + 1); @@ -254,6 +255,7 @@ private static void verifyTaskTimeoutConfig(final Configuration conf, new TaskHeartbeatHandler(null, SystemClock.getInstance(), 1); hb.init(conf); - Assertions.assertEquals(hb.getTaskTimeOut(), expectedTimeout, "The value of the task timeout is incorrect."); + assertEquals(hb.getTaskTimeOut(), expectedTimeout, + "The value of the task timeout is incorrect."); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/commit/TestCommitterEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/commit/TestCommitterEventHandler.java index 0bf9d8d316975..61059c29e47a0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/commit/TestCommitterEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/commit/TestCommitterEventHandler.java @@ -18,7 +18,11 @@ package org.apache.hadoop.mapreduce.v2.app.commit; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; @@ -39,9 +43,6 @@ import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.event.EventHandler; -import static org.junit.jupiter.api.Assertions.*; -import static org.mockito.Mockito.*; - import java.io.File; import java.io.IOException; @@ -146,11 +147,11 @@ public void testCommitWindow() throws Exception { Thread.sleep(10); timeToWaitMs -= 10; } - Assertions.assertEquals( - 1, rmhh.getNumCallbacks(), "committer did not register a heartbeat callback"); + Assertions.assertEquals(1, rmhh.getNumCallbacks(), + "committer did not register a heartbeat callback"); verify(committer, never()).commitJob(any(JobContext.class)); - Assertions.assertEquals( - 0, jeh.numCommitCompletedEvents, "committer should not have committed"); + Assertions.assertEquals(0, jeh.numCommitCompletedEvents, + "committer should not have committed"); // set a fresh heartbeat and verify commit completes rmhh.setLastHeartbeatTime(clock.getTime()); @@ -159,8 +160,8 @@ public void testCommitWindow() throws Exception { Thread.sleep(10); timeToWaitMs -= 10; } - Assertions.assertEquals( - 1, jeh.numCommitCompletedEvents, "committer did not complete commit after RM heartbeat"); + Assertions.assertEquals(1, jeh.numCommitCompletedEvents, + "committer did not complete commit after RM heartbeat"); verify(committer, times(1)).commitJob(any()); //Clean up so we can try to commit again (Don't do this at home) @@ -174,8 +175,8 @@ public void testCommitWindow() throws Exception { Thread.sleep(10); timeToWaitMs -= 10; } - Assertions.assertEquals( - 2, jeh.numCommitCompletedEvents, "committer did not commit"); + Assertions.assertEquals(2, jeh.numCommitCompletedEvents, + "committer did not commit"); verify(committer, times(2)).commitJob(any()); ceh.stop(); @@ -185,7 +186,8 @@ public void testCommitWindow() throws Exception { private static class TestingRMHeartbeatHandler implements RMHeartbeatHandler { private long lastHeartbeatTime = 0; - private ConcurrentLinkedQueue callbacks = new ConcurrentLinkedQueue<>(); + private ConcurrentLinkedQueue callbacks = + new ConcurrentLinkedQueue(); @Override public long getLastHeartbeatTime() { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java index ed43cdbff1806..6311ee468cc59 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java @@ -18,11 +18,20 @@ package org.apache.hadoop.mapreduce.v2.app.job.impl; -import static org.mockito.ArgumentMatchers.any; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.timeout; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.File; @@ -105,12 +114,10 @@ import org.apache.hadoop.yarn.state.StateMachineFactory; import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.SystemClock; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import org.mockito.Mockito; /** @@ -170,9 +177,9 @@ public void testJobNoTasks() { dispatcher.stop(); commitHandler.stop(); try { - Assertions.assertTrue(jseHandler.getAssertValue()); + assertTrue(jseHandler.getAssertValue()); } catch (InterruptedException e) { - Assertions.fail("Workflow related attributes are not tested properly"); + fail("Workflow related attributes are not tested properly"); } } @@ -293,7 +300,7 @@ public synchronized void setupJob(JobContext jobContext) assertJobState(job, JobStateInternal.REBOOT); // return the external state as RUNNING since otherwise JobClient will // exit when it polls the AM for job state - Assertions.assertEquals(JobState.RUNNING, job.getState()); + assertEquals(JobState.RUNNING, job.getState()); dispatcher.stop(); commitHandler.stop(); @@ -326,9 +333,9 @@ public void testRebootedDuringCommit() throws Exception { job.handle(new JobEvent(job.getID(), JobEventType.JOB_AM_REBOOT)); assertJobState(job, JobStateInternal.REBOOT); // return the external state as ERROR since this is last retry. - Assertions.assertEquals(JobState.RUNNING, job.getState()); + assertEquals(JobState.RUNNING, job.getState()); when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true); - Assertions.assertEquals(JobState.ERROR, job.getState()); + assertEquals(JobState.ERROR, job.getState()); dispatcher.stop(); commitHandler.stop(); @@ -406,7 +413,7 @@ public void testAbortJobCalledAfterKillingTasks() throws IOException { InlineDispatcher dispatcher = new InlineDispatcher(); dispatcher.init(conf); dispatcher.start(); - OutputCommitter committer = Mockito.mock(OutputCommitter.class); + OutputCommitter committer = mock(OutputCommitter.class); CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer); commitHandler.init(conf); @@ -418,13 +425,13 @@ public void testAbortJobCalledAfterKillingTasks() throws IOException { MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP), TaskState.FAILED)); //Verify abort job hasn't been called - Mockito.verify(committer, Mockito.never()) - .abortJob((JobContext) Mockito.any(), (State) Mockito.any()); + verify(committer, never()) + .abortJob((JobContext) any(), (State) any()); assertJobState(job, JobStateInternal.FAIL_WAIT); //Verify abortJob is called once and the job failed - Mockito.verify(committer, Mockito.timeout(2000).times(1)) - .abortJob((JobContext) Mockito.any(), (State) Mockito.any()); + verify(committer, timeout(2000).times(1)) + .abortJob((JobContext) any(), (State) any()); assertJobState(job, JobStateInternal.FAILED); dispatcher.stop(); @@ -440,7 +447,7 @@ public void testFailAbortDoesntHang() throws IOException { DrainDispatcher dispatcher = new DrainDispatcher(); dispatcher.init(conf); dispatcher.start(); - OutputCommitter committer = Mockito.mock(OutputCommitter.class); + OutputCommitter committer = mock(OutputCommitter.class); CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer); commitHandler.init(conf); @@ -462,8 +469,8 @@ public void testFailAbortDoesntHang() throws IOException { dispatcher.await(); //Verify abortJob is called once and the job failed - Mockito.verify(committer, Mockito.timeout(2000).times(1)) - .abortJob((JobContext) Mockito.any(), (State) Mockito.any()); + verify(committer, timeout(2000).times(1)) + .abortJob((JobContext) any(), (State) any()); assertJobState(job, JobStateInternal.FAILED); dispatcher.stop(); @@ -576,11 +583,14 @@ public void testUnusableNodeTransition() throws Exception { // add a special task event handler to put the task back to running in case // of task rescheduling/killing EventHandler taskAttemptEventHandler = - event -> { - if (event.getType() == TaskAttemptEventType.TA_KILL) { - job.decrementSucceededMapperCount(); - } - }; + new EventHandler() { + @Override + public void handle(TaskAttemptEvent event) { + if (event.getType() == TaskAttemptEventType.TA_KILL) { + job.decrementSucceededMapperCount(); + } + } + }; dispatcher.register(TaskAttemptEventType.class, taskAttemptEventHandler); // replace the tasks with spied versions to return the right attempts @@ -607,7 +617,7 @@ public void testUnusableNodeTransition() throws Exception { job.handle(new JobTaskAttemptCompletedEvent(tce)); // complete the task itself job.handle(new JobTaskEvent(taskId, TaskState.SUCCEEDED)); - Assertions.assertEquals(JobState.RUNNING, job.getState()); + assertEquals(JobState.RUNNING, job.getState()); } } @@ -663,9 +673,12 @@ private void testJobCompletionWhenReducersAreFinished(boolean killMappers) dispatcher.init(conf); final List killedEvents = Collections.synchronizedList(new ArrayList()); - dispatcher.register(TaskEventType.class, (EventHandler) event -> { - if (event.getType() == TaskEventType.T_KILL) { - killedEvents.add(event); + dispatcher.register(TaskEventType.class, new EventHandler() { + @Override + public void handle(TaskEvent event) { + if (event.getType() == TaskEventType.T_KILL) { + killedEvents.add(event); + } } }); dispatcher.start(); @@ -704,13 +717,13 @@ private void testJobCompletionWhenReducersAreFinished(boolean killMappers) * much value. Instead, we validate the T_KILL events. */ if (killMappers) { - Assertions.assertEquals(2, killedEvents.size(), "Number of killed events"); - Assertions.assertEquals("AttemptID", "task_1234567890000_0001_m_000000", - killedEvents.get(0).getTaskID().toString()); - Assertions.assertEquals("AttemptID", "task_1234567890000_0001_m_000001", - killedEvents.get(1).getTaskID().toString()); + assertEquals(2, killedEvents.size(), "Number of killed events"); + assertEquals("task_1234567890000_0001_m_000000", + killedEvents.get(0).getTaskID().toString(), "AttemptID"); + assertEquals("task_1234567890000_0001_m_000001", + killedEvents.get(1).getTaskID().toString(), "AttemptID"); } else { - Assertions.assertEquals(0, killedEvents.size(), "Number of killed events"); + assertEquals(0, killedEvents.size(), "Number of killed events"); } } @@ -743,8 +756,8 @@ public void testCheckAccess() { // Verify access JobImpl job1 = new JobImpl(jobId, null, conf1, null, null, null, null, null, null, null, null, true, user1, 0, null, null, null, null); - Assertions.assertTrue(job1.checkAccess(ugi1, JobACL.VIEW_JOB)); - Assertions.assertFalse(job1.checkAccess(ugi2, JobACL.VIEW_JOB)); + assertTrue(job1.checkAccess(ugi1, JobACL.VIEW_JOB)); + assertFalse(job1.checkAccess(ugi2, JobACL.VIEW_JOB)); // Setup configuration access to the user1 (owner) and user2 Configuration conf2 = new Configuration(); @@ -754,8 +767,8 @@ public void testCheckAccess() { // Verify access JobImpl job2 = new JobImpl(jobId, null, conf2, null, null, null, null, null, null, null, null, true, user1, 0, null, null, null, null); - Assertions.assertTrue(job2.checkAccess(ugi1, JobACL.VIEW_JOB)); - Assertions.assertTrue(job2.checkAccess(ugi2, JobACL.VIEW_JOB)); + assertTrue(job2.checkAccess(ugi1, JobACL.VIEW_JOB)); + assertTrue(job2.checkAccess(ugi2, JobACL.VIEW_JOB)); // Setup configuration access with security enabled and access to all Configuration conf3 = new Configuration(); @@ -765,8 +778,8 @@ public void testCheckAccess() { // Verify access JobImpl job3 = new JobImpl(jobId, null, conf3, null, null, null, null, null, null, null, null, true, user1, 0, null, null, null, null); - Assertions.assertTrue(job3.checkAccess(ugi1, JobACL.VIEW_JOB)); - Assertions.assertTrue(job3.checkAccess(ugi2, JobACL.VIEW_JOB)); + assertTrue(job3.checkAccess(ugi1, JobACL.VIEW_JOB)); + assertTrue(job3.checkAccess(ugi2, JobACL.VIEW_JOB)); // Setup configuration access without security enabled Configuration conf4 = new Configuration(); @@ -776,8 +789,8 @@ public void testCheckAccess() { // Verify access JobImpl job4 = new JobImpl(jobId, null, conf4, null, null, null, null, null, null, null, null, true, user1, 0, null, null, null, null); - Assertions.assertTrue(job4.checkAccess(ugi1, JobACL.VIEW_JOB)); - Assertions.assertTrue(job4.checkAccess(ugi2, JobACL.VIEW_JOB)); + assertTrue(job4.checkAccess(ugi1, JobACL.VIEW_JOB)); + assertTrue(job4.checkAccess(ugi2, JobACL.VIEW_JOB)); // Setup configuration access without security enabled Configuration conf5 = new Configuration(); @@ -787,8 +800,8 @@ public void testCheckAccess() { // Verify access JobImpl job5 = new JobImpl(jobId, null, conf5, null, null, null, null, null, null, null, null, true, user1, 0, null, null, null, null); - Assertions.assertTrue(job5.checkAccess(ugi1, null)); - Assertions.assertTrue(job5.checkAccess(ugi2, null)); + assertTrue(job5.checkAccess(ugi1, null)); + assertTrue(job5.checkAccess(ugi2, null)); } @Test @@ -809,8 +822,8 @@ null, mock(JobTokenSecretManager.class), null, mrAppMetrics, null, true, null, 0, null, mockContext, null, null); job.handle(diagUpdateEvent); String diagnostics = job.getReport().getDiagnostics(); - Assertions.assertNotNull(diagnostics); - Assertions.assertTrue(diagnostics.contains(diagMsg)); + assertNotNull(diagnostics); + assertTrue(diagnostics.contains(diagMsg)); job = new JobImpl(jobId, Records .newRecord(ApplicationAttemptId.class), new Configuration(), @@ -821,8 +834,8 @@ null, mock(JobTokenSecretManager.class), null, job.handle(new JobEvent(jobId, JobEventType.JOB_KILL)); job.handle(diagUpdateEvent); diagnostics = job.getReport().getDiagnostics(); - Assertions.assertNotNull(diagnostics); - Assertions.assertTrue(diagnostics.contains(diagMsg)); + assertNotNull(diagnostics); + assertTrue(diagnostics.contains(diagMsg)); } @Test @@ -831,13 +844,13 @@ public void testUberDecision() throws Exception { // with default values, no of maps is 2 Configuration conf = new Configuration(); boolean isUber = testUberDecision(conf); - Assertions.assertFalse(isUber); + assertFalse(isUber); // enable uber mode, no of maps is 2 conf = new Configuration(); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, true); isUber = testUberDecision(conf); - Assertions.assertTrue(isUber); + assertTrue(isUber); // enable uber mode, no of maps is 2, no of reduces is 1 and uber task max // reduces is 0 @@ -846,7 +859,7 @@ public void testUberDecision() throws Exception { conf.setInt(MRJobConfig.JOB_UBERTASK_MAXREDUCES, 0); conf.setInt(MRJobConfig.NUM_REDUCES, 1); isUber = testUberDecision(conf); - Assertions.assertFalse(isUber); + assertFalse(isUber); // enable uber mode, no of maps is 2, no of reduces is 1 and uber task max // reduces is 1 @@ -855,14 +868,14 @@ public void testUberDecision() throws Exception { conf.setInt(MRJobConfig.JOB_UBERTASK_MAXREDUCES, 1); conf.setInt(MRJobConfig.NUM_REDUCES, 1); isUber = testUberDecision(conf); - Assertions.assertTrue(isUber); + assertTrue(isUber); // enable uber mode, no of maps is 2 and uber task max maps is 0 conf = new Configuration(); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, true); conf.setInt(MRJobConfig.JOB_UBERTASK_MAXMAPS, 1); isUber = testUberDecision(conf); - Assertions.assertFalse(isUber); + assertFalse(isUber); // enable uber mode of 0 reducer no matter how much memory assigned to reducer conf = new Configuration(); @@ -871,7 +884,7 @@ public void testUberDecision() throws Exception { conf.setInt(MRJobConfig.REDUCE_MEMORY_MB, 2048); conf.setInt(MRJobConfig.REDUCE_CPU_VCORES, 10); isUber = testUberDecision(conf); - Assertions.assertTrue(isUber); + assertTrue(isUber); } private boolean testUberDecision(Configuration conf) { @@ -936,9 +949,9 @@ public void testTransitionsAtFailed() throws IOException { assertJobState(job, JobStateInternal.FAILED); job.handle(new JobEvent(jobId, JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE)); assertJobState(job, JobStateInternal.FAILED); - Assertions.assertEquals(JobState.RUNNING, job.getState()); + assertEquals(JobState.RUNNING, job.getState()); when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true); - Assertions.assertEquals(JobState.FAILED, job.getState()); + assertEquals(JobState.FAILED, job.getState()); dispatcher.stop(); commitHandler.stop(); @@ -965,12 +978,12 @@ protected TaskSplitMetaInfo[] createSplits(JobImpl job, JobId jobId) { JobEvent mockJobEvent = mock(JobEvent.class); JobStateInternal jobSI = initTransition.transition(job, mockJobEvent); - Assertions.assertEquals(jobSI, JobStateInternal.NEW, + assertEquals(jobSI, JobStateInternal.NEW, "When init fails, return value from InitTransition.transition should equal NEW."); - Assertions.assertTrue( - job.getDiagnostics().toString().contains("YarnRuntimeException"), "Job diagnostics should contain YarnRuntimeException"); - Assertions.assertTrue( - job.getDiagnostics().toString().contains(EXCEPTIONMSG), "Job diagnostics should contain " + EXCEPTIONMSG); + assertTrue(job.getDiagnostics().toString().contains("YarnRuntimeException"), + "Job diagnostics should contain YarnRuntimeException"); + assertTrue(job.getDiagnostics().toString().contains(EXCEPTIONMSG), + "Job diagnostics should contain " + EXCEPTIONMSG); } @Test @@ -991,7 +1004,7 @@ public void testJobPriorityUpdate() throws Exception { assertJobState(job, JobStateInternal.SETUP); // Update priority of job to 5, and it will be updated job.setJobPriority(submittedPriority); - Assertions.assertEquals(submittedPriority, job.getReport().getJobPriority()); + assertEquals(submittedPriority, job.getReport().getJobPriority()); job.handle(new JobSetupCompletedEvent(jobId)); assertJobState(job, JobStateInternal.RUNNING); @@ -1001,10 +1014,10 @@ public void testJobPriorityUpdate() throws Exception { job.setJobPriority(updatedPriority); assertJobState(job, JobStateInternal.RUNNING); Priority jobPriority = job.getReport().getJobPriority(); - Assertions.assertNotNull(jobPriority); + assertNotNull(jobPriority); // Verify whether changed priority is same as what is set in Job. - Assertions.assertEquals(updatedPriority, jobPriority); + assertEquals(updatedPriority, jobPriority); } @Test @@ -1018,14 +1031,14 @@ public void testCleanupSharedCacheUploadPolicies() { filePolicies.put("file1", true); filePolicies.put("jar1", true); Job.setFileSharedCacheUploadPolicies(config, filePolicies); - Assertions.assertEquals( + assertEquals( 2, Job.getArchiveSharedCacheUploadPolicies(config).size()); - Assertions.assertEquals( + assertEquals( 2, Job.getFileSharedCacheUploadPolicies(config).size()); JobImpl.cleanupSharedCacheUploadPolicies(config); - Assertions.assertEquals( + assertEquals( 0, Job.getArchiveSharedCacheUploadPolicies(config).size()); - Assertions.assertEquals( + assertEquals( 0, Job.getFileSharedCacheUploadPolicies(config).size()); } @@ -1093,14 +1106,14 @@ private static void completeJobTasks(JobImpl job) { job.handle(new JobTaskEvent( MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP), TaskState.SUCCEEDED)); - Assertions.assertEquals(JobState.RUNNING, job.getState()); + assertEquals(JobState.RUNNING, job.getState()); } int numReduces = job.getTotalReduces(); for (int i = 0; i < numReduces; ++i) { job.handle(new JobTaskEvent( MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP), TaskState.SUCCEEDED)); - Assertions.assertEquals(JobState.RUNNING, job.getState()); + assertEquals(JobState.RUNNING, job.getState()); } } @@ -1114,7 +1127,7 @@ private static void assertJobState(JobImpl job, JobStateInternal state) { break; } } - Assertions.assertEquals(state, job.getInternalState()); + assertEquals(state, job.getInternalState()); } private void createSpiedMapTasks(Map diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestMapReduceChildJVM.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestMapReduceChildJVM.java index 26bf99a646874..7d824991b36e1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestMapReduceChildJVM.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestMapReduceChildJVM.java @@ -22,7 +22,6 @@ import java.util.Map; import org.apache.hadoop.mapreduce.TaskType; -import org.junit.jupiter.api.Assertions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapred.JobConf; @@ -42,6 +41,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + public class TestMapReduceChildJVM { private static final Logger LOG = @@ -58,7 +61,7 @@ public void testCommandLine() throws Exception { app.waitForState(job, JobState.SUCCEEDED); app.verifyCompleted(); - Assertions.assertEquals( + assertEquals( "[" + MRApps.crossPlatformify("JAVA_HOME") + "/bin/java" + " -Djava.net.preferIPv4Stack=true" + " -Dhadoop.metrics.log.level=WARN " + @@ -74,13 +77,12 @@ public void testCommandLine() throws Exception { " 1>/stdout" + " 2>/stderr ]", app.launchCmdList.get(0)); - Assertions.assertTrue( - app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"), "HADOOP_ROOT_LOGGER not set for job"); - Assertions.assertEquals("INFO,console", - app.cmdEnvironment.get("HADOOP_ROOT_LOGGER")); - Assertions.assertTrue( - app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS"), "HADOOP_CLIENT_OPTS not set for job"); - Assertions.assertEquals("", app.cmdEnvironment.get("HADOOP_CLIENT_OPTS")); + assertTrue(app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"), + "HADOOP_ROOT_LOGGER not set for job"); + assertEquals("INFO,console", app.cmdEnvironment.get("HADOOP_ROOT_LOGGER")); + assertTrue(app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS"), + "HADOOP_CLIENT_OPTS not set for job"); + assertEquals("", app.cmdEnvironment.get("HADOOP_CLIENT_OPTS")); } @Test @@ -125,7 +127,7 @@ private void testReduceCommandLine(Configuration conf) ? "shuffleCRLA" : "shuffleCLA"; - Assertions.assertEquals( + assertEquals( "[" + MRApps.crossPlatformify("JAVA_HOME") + "/bin/java" + " -Djava.net.preferIPv4Stack=true" + " -Dhadoop.metrics.log.level=WARN " + @@ -145,13 +147,12 @@ private void testReduceCommandLine(Configuration conf) " 1>/stdout" + " 2>/stderr ]", app.launchCmdList.get(0)); - Assertions.assertTrue( - app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"), "HADOOP_ROOT_LOGGER not set for job"); - Assertions.assertEquals("INFO,console", - app.cmdEnvironment.get("HADOOP_ROOT_LOGGER")); - Assertions.assertTrue( - app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS"), "HADOOP_CLIENT_OPTS not set for job"); - Assertions.assertEquals("", app.cmdEnvironment.get("HADOOP_CLIENT_OPTS")); + assertTrue(app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"), + "HADOOP_ROOT_LOGGER not set for job"); + assertEquals("INFO,console", app.cmdEnvironment.get("HADOOP_ROOT_LOGGER")); + assertTrue(app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS"), + "HADOOP_CLIENT_OPTS not set for job"); + assertEquals("", app.cmdEnvironment.get("HADOOP_CLIENT_OPTS")); } @Test @@ -168,7 +169,7 @@ public void testCommandLineWithLog4JConfig() throws Exception { app.waitForState(job, JobState.SUCCEEDED); app.verifyCompleted(); - Assertions.assertEquals( + assertEquals( "[" + MRApps.crossPlatformify("JAVA_HOME") + "/bin/java" + " -Djava.net.preferIPv4Stack=true" + " -Dhadoop.metrics.log.level=WARN " + @@ -210,10 +211,8 @@ private void testAutoHeapSize(int mapMb, int redMb, String xmxArg) MRJobConfig.DEFAULT_HEAP_MEMORY_MB_RATIO); // Verify map and reduce java opts are not set by default - Assertions.assertNull( - conf.get(MRJobConfig.MAP_JAVA_OPTS), "Default map java opts!"); - Assertions.assertNull( - conf.get(MRJobConfig.REDUCE_JAVA_OPTS), "Default reduce java opts!"); + assertNull(conf.get(MRJobConfig.MAP_JAVA_OPTS), "Default map java opts!"); + assertNull(conf.get(MRJobConfig.REDUCE_JAVA_OPTS), "Default reduce java opts!"); // Set the memory-mbs and java-opts if (mapMb > 0) { conf.setInt(MRJobConfig.MAP_MEMORY_MB, mapMb); @@ -249,8 +248,8 @@ private void testAutoHeapSize(int mapMb, int redMb, String xmxArg) : MRJobConfig.REDUCE_JAVA_OPTS); heapMb = JobConf.parseMaximumHeapSizeMB(javaOpts); } - Assertions.assertEquals( - heapMb, JobConf.parseMaximumHeapSizeMB(cmd), "Incorrect heapsize in the command opts"); + assertEquals(heapMb, JobConf.parseMaximumHeapSizeMB(cmd), + "Incorrect heapsize in the command opts"); } } @@ -295,13 +294,12 @@ public void testEnvironmentVariables() throws Exception { app.waitForState(job, JobState.SUCCEEDED); app.verifyCompleted(); - Assertions.assertTrue( - app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"), "HADOOP_ROOT_LOGGER not set for job"); - Assertions.assertEquals("WARN,console", - app.cmdEnvironment.get("HADOOP_ROOT_LOGGER")); - Assertions.assertTrue( - app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS"), "HADOOP_CLIENT_OPTS not set for job"); - Assertions.assertEquals("test", app.cmdEnvironment.get("HADOOP_CLIENT_OPTS")); + assertTrue(app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"), + "HADOOP_ROOT_LOGGER not set for job"); + assertEquals("WARN,console", app.cmdEnvironment.get("HADOOP_ROOT_LOGGER")); + assertTrue(app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS"), + "HADOOP_CLIENT_OPTS not set for job"); + assertEquals("test", app.cmdEnvironment.get("HADOOP_CLIENT_OPTS")); // Try one more. app = new MyMRApp(1, 0, true, this.getClass().getName(), true); @@ -311,10 +309,9 @@ public void testEnvironmentVariables() throws Exception { app.waitForState(job, JobState.SUCCEEDED); app.verifyCompleted(); - Assertions.assertTrue( - app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"), "HADOOP_ROOT_LOGGER not set for job"); - Assertions.assertEquals("trace", - app.cmdEnvironment.get("HADOOP_ROOT_LOGGER")); + assertTrue(app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"), + "HADOOP_ROOT_LOGGER not set for job"); + assertEquals("trace", app.cmdEnvironment.get("HADOOP_ROOT_LOGGER")); // Try one using the mapreduce.task.env.var=value syntax app = new MyMRApp(1, 0, true, this.getClass().getName(), true); @@ -325,9 +322,8 @@ public void testEnvironmentVariables() throws Exception { app.waitForState(job, JobState.SUCCEEDED); app.verifyCompleted(); - Assertions.assertTrue( - app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"), "HADOOP_ROOT_LOGGER not set for job"); - Assertions.assertEquals("DEBUG,console", - app.cmdEnvironment.get("HADOOP_ROOT_LOGGER")); + assertTrue(app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"), + "HADOOP_ROOT_LOGGER not set for job"); + assertEquals("DEBUG,console", app.cmdEnvironment.get("HADOOP_ROOT_LOGGER")); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestShuffleProvider.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestShuffleProvider.java index 81ee65e1ea328..3243488df9fb7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestShuffleProvider.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestShuffleProvider.java @@ -18,6 +18,8 @@ package org.apache.hadoop.mapreduce.v2.app.job.impl; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -54,7 +56,6 @@ import org.apache.hadoop.yarn.server.api.ApplicationTerminationContext; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Assertions; public class TestShuffleProvider { @@ -91,7 +92,7 @@ public void testShuffleProviders() throws Exception { + "," + TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID); Credentials credentials = new Credentials(); - Token jobToken = new Token<>( + Token jobToken = new Token( ("tokenid").getBytes(), ("tokenpw").getBytes(), new Text("tokenkind"), new Text("tokenservice")); TaskAttemptImpl taImpl = @@ -110,9 +111,12 @@ public void testShuffleProviders() throws Exception { credentials); Map serviceDataMap = launchCtx.getServiceData(); - Assertions.assertNotNull(serviceDataMap.get(TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID), "TestShuffleHandler1 is missing"); - Assertions.assertNotNull(serviceDataMap.get(TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID), "TestShuffleHandler2 is missing"); - Assertions.assertEquals(3, serviceDataMap.size(), "mismatch number of services in map"); // 2 that we entered + 1 for the built-in shuffle-provider + assertNotNull(serviceDataMap.get(TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID), + "TestShuffleHandler1 is missing"); + assertNotNull(serviceDataMap.get(TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID), + "TestShuffleHandler2 is missing"); + // 2 that we entered + 1 for the built-in shuffle-provider + assertEquals(3, serviceDataMap.size(), "mismatch number of services in map"); } static public class StubbedFS extends RawLocalFileSystem { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java index 87bfdf47c7a63..2064057943a41 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java @@ -22,7 +22,9 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; @@ -42,7 +44,6 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptFailEvent; import org.apache.hadoop.yarn.util.resource.CustomResourceTypesConfigurationProvider; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; import org.apache.hadoop.conf.Configuration; @@ -278,7 +279,7 @@ public void testSingleRackRequest() throws Exception { hosts[1] = "host2"; hosts[2] = "host3"; TaskSplitMetaInfo splitInfo = - new TaskSplitMetaInfo(hosts, 0, 128 * 1024 * 1024L); + new TaskSplitMetaInfo(hosts, 0, 128 * 1024 * 1024l); TaskAttemptImpl mockTaskAttempt = createMapTaskAttemptImplForTest(eventHandler, splitInfo); @@ -289,7 +290,7 @@ public void testSingleRackRequest() throws Exception { ArgumentCaptor arg = ArgumentCaptor.forClass(Event.class); verify(eventHandler, times(2)).handle(arg.capture()); if (!(arg.getAllValues().get(1) instanceof ContainerRequestEvent)) { - Assertions.fail("Second Event not of type ContainerRequestEvent"); + fail("Second Event not of type ContainerRequestEvent"); } ContainerRequestEvent cre = (ContainerRequestEvent) arg.getAllValues().get(1); @@ -323,7 +324,7 @@ public void testHostResolveAttempt() throws Exception { ArgumentCaptor arg = ArgumentCaptor.forClass(Event.class); verify(eventHandler, times(2)).handle(arg.capture()); if (!(arg.getAllValues().get(1) instanceof ContainerRequestEvent)) { - Assertions.fail("Second Event not of type ContainerRequestEvent"); + fail("Second Event not of type ContainerRequestEvent"); } Map expected = new HashMap(); expected.put("host1", true); @@ -361,16 +362,16 @@ public void verifyMillisCounters(Resource containerResource, Job job = app.submit(conf); app.waitForState(job, JobState.RUNNING); Map tasks = job.getTasks(); - Assertions.assertEquals(2, tasks.size(), "Num tasks is not correct"); + assertEquals(2, tasks.size(), "Num tasks is not correct"); Iterator taskIter = tasks.values().iterator(); Task mTask = taskIter.next(); app.waitForState(mTask, TaskState.RUNNING); Task rTask = taskIter.next(); app.waitForState(rTask, TaskState.RUNNING); Map mAttempts = mTask.getAttempts(); - Assertions.assertEquals(1, mAttempts.size(), "Num attempts is not correct"); + assertEquals(1, mAttempts.size(), "Num attempts is not correct"); Map rAttempts = rTask.getAttempts(); - Assertions.assertEquals(1, rAttempts.size(), "Num attempts is not correct"); + assertEquals(1, rAttempts.size(), "Num attempts is not correct"); TaskAttempt mta = mAttempts.values().iterator().next(); TaskAttempt rta = rAttempts.values().iterator().next(); app.waitForState(mta, TaskAttemptState.RUNNING); @@ -392,21 +393,21 @@ public void verifyMillisCounters(Resource containerResource, int memoryMb = (int) containerResource.getMemorySize(); int vcores = containerResource.getVirtualCores(); - Assertions.assertEquals((int) Math.ceil((float) memoryMb / minContainerSize), + assertEquals((int) Math.ceil((float) memoryMb / minContainerSize), counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue()); - Assertions.assertEquals((int) Math.ceil((float) memoryMb / minContainerSize), + assertEquals((int) Math.ceil((float) memoryMb / minContainerSize), counters.findCounter(JobCounter.SLOTS_MILLIS_REDUCES).getValue()); - Assertions.assertEquals(1, + assertEquals(1, counters.findCounter(JobCounter.MILLIS_MAPS).getValue()); - Assertions.assertEquals(1, + assertEquals(1, counters.findCounter(JobCounter.MILLIS_REDUCES).getValue()); - Assertions.assertEquals(memoryMb, + assertEquals(memoryMb, counters.findCounter(JobCounter.MB_MILLIS_MAPS).getValue()); - Assertions.assertEquals(memoryMb, + assertEquals(memoryMb, counters.findCounter(JobCounter.MB_MILLIS_REDUCES).getValue()); - Assertions.assertEquals(vcores, + assertEquals(vcores, counters.findCounter(JobCounter.VCORES_MILLIS_MAPS).getValue()); - Assertions.assertEquals(vcores, + assertEquals(vcores, counters.findCounter(JobCounter.VCORES_MILLIS_REDUCES).getValue()); } @@ -452,23 +453,23 @@ private void testMRAppHistory(MRApp app) throws Exception { app.waitForState(job, JobState.FAILED); Map tasks = job.getTasks(); - Assertions.assertEquals(1, tasks.size(), "Num tasks is not correct"); + assertEquals(1, tasks.size(), "Num tasks is not correct"); Task task = tasks.values().iterator().next(); - Assertions.assertEquals(TaskState.FAILED, task + assertEquals(TaskState.FAILED, task .getReport().getTaskState(), "Task state not correct"); Map attempts = tasks.values().iterator().next() .getAttempts(); - Assertions.assertEquals(4, attempts.size(), "Num attempts is not correct"); + assertEquals(4, attempts.size(), "Num attempts is not correct"); Iterator it = attempts.values().iterator(); TaskAttemptReport report = it.next().getReport(); - Assertions.assertEquals(TaskAttemptState.FAILED -, report.getTaskAttemptState(), "Attempt state not correct"); - Assertions.assertEquals("Diagnostic Information is not Correct", - "Test Diagnostic Event", report.getDiagnosticInfo()); + assertEquals(TaskAttemptState.FAILED, + report.getTaskAttemptState(), "Attempt state not correct"); + assertEquals("Test Diagnostic Event", report.getDiagnosticInfo(), + "Diagnostic Information is not Correct"); report = it.next().getReport(); - Assertions.assertEquals(TaskAttemptState.FAILED -, report.getTaskAttemptState(), "Attempt state not correct"); + assertEquals(TaskAttemptState.FAILED, + report.getTaskAttemptState(), "Attempt state not correct"); } private void testTaskAttemptAssignedFailHistory @@ -477,8 +478,8 @@ private void testMRAppHistory(MRApp app) throws Exception { Job job = app.submit(conf); app.waitForState(job, JobState.FAILED); Map tasks = job.getTasks(); - Assertions.assertTrue(app.getTaStartJHEvent(), "No Ta Started JH Event"); - Assertions.assertTrue(app.getTaFailedJHEvent(), "No Ta Failed JH Event"); + assertTrue(app.getTaStartJHEvent(), "No Ta Started JH Event"); + assertTrue(app.getTaFailedJHEvent(), "No Ta Failed JH Event"); } private void testTaskAttemptAssignedKilledHistory @@ -512,12 +513,15 @@ protected void attemptLaunched(TaskAttemptId attemptID) { protected EventHandler createJobHistoryHandler( AppContext context) { - return event -> { - if (event.getType() == org.apache.hadoop.mapreduce.jobhistory.EventType.MAP_ATTEMPT_FAILED) { - TaskAttemptUnsuccessfulCompletion datum = (TaskAttemptUnsuccessfulCompletion) event - .getHistoryEvent().getDatum(); - Assertions.assertEquals("Diagnostic Information is not Correct", - "Test Diagnostic Event", datum.get(8).toString()); + return new EventHandler() { + @Override + public void handle(JobHistoryEvent event) { + if (event.getType() == org.apache.hadoop.mapreduce.jobhistory.EventType.MAP_ATTEMPT_FAILED) { + TaskAttemptUnsuccessfulCompletion datum = (TaskAttemptUnsuccessfulCompletion) event + .getHistoryEvent().getDatum(); + assertEquals("Test Diagnostic Event", datum.get(8).toString(), + "Diagnostic Information is not Correct"); + } } }; } @@ -562,25 +566,28 @@ public boolean getTaKilledJHEvent(){ protected EventHandler createJobHistoryHandler( AppContext context) { - return event -> { - if (event.getType() == org.apache.hadoop.mapreduce.jobhistory. - EventType.MAP_ATTEMPT_FAILED) { - receiveTaFailedJHEvent = true; - } else if (event.getType() == org.apache.hadoop.mapreduce. - jobhistory.EventType.MAP_ATTEMPT_KILLED) { - receiveTaKilledJHEvent = true; - } else if (event.getType() == org.apache.hadoop.mapreduce. - jobhistory.EventType.MAP_ATTEMPT_STARTED) { - receiveTaStartJHEvent = true; - } else if (event.getType() == org.apache.hadoop.mapreduce. - jobhistory.EventType.REDUCE_ATTEMPT_FAILED) { - receiveTaFailedJHEvent = true; - } else if (event.getType() == org.apache.hadoop.mapreduce. - jobhistory.EventType.REDUCE_ATTEMPT_KILLED) { - receiveTaKilledJHEvent = true; - } else if (event.getType() == org.apache.hadoop.mapreduce. - jobhistory.EventType.REDUCE_ATTEMPT_STARTED) { - receiveTaStartJHEvent = true; + return new EventHandler() { + @Override + public void handle(JobHistoryEvent event) { + if (event.getType() == org.apache.hadoop.mapreduce.jobhistory. + EventType.MAP_ATTEMPT_FAILED) { + receiveTaFailedJHEvent = true; + } else if (event.getType() == org.apache.hadoop.mapreduce. + jobhistory.EventType.MAP_ATTEMPT_KILLED) { + receiveTaKilledJHEvent = true; + } else if (event.getType() == org.apache.hadoop.mapreduce. + jobhistory.EventType.MAP_ATTEMPT_STARTED) { + receiveTaStartJHEvent = true; + } else if (event.getType() == org.apache.hadoop.mapreduce. + jobhistory.EventType.REDUCE_ATTEMPT_FAILED) { + receiveTaFailedJHEvent = true; + } else if (event.getType() == org.apache.hadoop.mapreduce. + jobhistory.EventType.REDUCE_ATTEMPT_KILLED) { + receiveTaKilledJHEvent = true; + } else if (event.getType() == org.apache.hadoop.mapreduce. + jobhistory.EventType.REDUCE_ATTEMPT_STARTED) { + receiveTaStartJHEvent = true; + } } }; } @@ -632,8 +639,8 @@ public void testLaunchFailedWhileKilling() throws Exception { taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED)); assertFalse(eventHandler.internalError); - assertEquals( - Locality.NODE_LOCAL, taImpl.getLocality(), "Task attempt is not assigned on the local node"); + assertEquals(Locality.NODE_LOCAL, taImpl.getLocality(), + "Task attempt is not assigned on the local node"); } @Test @@ -689,10 +696,10 @@ public void testContainerCleanedWhileRunning() throws Exception { .isEqualTo(TaskAttemptState.RUNNING); taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_CONTAINER_CLEANED)); - assertFalse( - eventHandler.internalError, "InternalError occurred trying to handle TA_CONTAINER_CLEANED"); - assertEquals( - Locality.RACK_LOCAL, taImpl.getLocality(), "Task attempt is not assigned on the local rack"); + assertFalse(eventHandler.internalError, + "InternalError occurred trying to handle TA_CONTAINER_CLEANED"); + assertEquals(Locality.RACK_LOCAL, taImpl.getLocality(), + "Task attempt is not assigned on the local rack"); } @Test @@ -751,10 +758,10 @@ public void testContainerCleanedWhileCommitting() throws Exception { .isEqualTo(TaskAttemptState.COMMIT_PENDING); taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_CONTAINER_CLEANED)); - assertFalse( - eventHandler.internalError, "InternalError occurred trying to handle TA_CONTAINER_CLEANED"); - assertEquals(Locality.OFF_SWITCH -, taImpl.getLocality(), "Task attempt is assigned locally"); + assertFalse(eventHandler.internalError, + "InternalError occurred trying to handle TA_CONTAINER_CLEANED"); + assertEquals(Locality.OFF_SWITCH, + taImpl.getLocality(), "Task attempt is assigned locally"); } @Test @@ -826,8 +833,8 @@ public void testDoubleTooManyFetchFailure() throws Exception { assertThat(taImpl.getState()) .withFailMessage("Task attempt is not in FAILED state, still") .isEqualTo(TaskAttemptState.FAILED); - assertFalse( - eventHandler.internalError, "InternalError occurred trying to handle TA_CONTAINER_CLEANED"); + assertFalse(eventHandler.internalError, + "InternalError occurred trying to handle TA_CONTAINER_CLEANED"); } @@ -877,15 +884,14 @@ public void testAppDiagnosticEventOnUnassignedTask() { TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptDiagnosticsUpdateEvent(attemptId, "Task got killed")); - assertFalse( - - eventHandler.internalError, "InternalError occurred trying to handle TA_DIAGNOSTICS_UPDATE on assigned task"); + assertFalse(eventHandler.internalError, + "InternalError occurred trying to handle TA_DIAGNOSTICS_UPDATE on assigned task"); try { taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_KILL)); - Assertions.assertTrue(true, "No exception on UNASSIGNED STATE KILL event"); + assertTrue(true, "No exception on UNASSIGNED STATE KILL event"); } catch (Exception e) { - Assertions.fail("Exception not expected for UNASSIGNED STATE KILL event"); + fail("Exception not expected for UNASSIGNED STATE KILL event"); } } @@ -955,8 +961,8 @@ public void testTooManyFetchFailureAfterKill() throws Exception { assertThat(taImpl.getState()) .withFailMessage("Task attempt is not in KILLED state, still") .isEqualTo(TaskAttemptState.KILLED); - assertFalse( - eventHandler.internalError, "InternalError occurred trying to handle TA_CONTAINER_CLEANED"); + assertFalse(eventHandler.internalError, + "InternalError occurred trying to handle TA_CONTAINER_CLEANED"); } @Test @@ -1002,9 +1008,8 @@ public void testAppDiagnosticEventOnNewTask() { when(container.getNodeHttpAddress()).thenReturn("localhost:0"); taImpl.handle(new TaskAttemptDiagnosticsUpdateEvent(attemptId, "Task got killed")); - assertFalse( - - eventHandler.internalError, "InternalError occurred trying to handle TA_DIAGNOSTICS_UPDATE on assigned task"); + assertFalse(eventHandler.internalError, + "InternalError occurred trying to handle TA_DIAGNOSTICS_UPDATE on assigned task"); } @Test @@ -1065,8 +1070,8 @@ public void testFetchFailureAttemptFinishTime() throws Exception{ .withFailMessage("Task attempt is not in SUCCEEDED state") .isEqualTo(TaskAttemptState.SUCCEEDED); - assertTrue( - taImpl.getFinishTime() > 0, "Task Attempt finish time is not greater than 0"); + assertTrue(taImpl.getFinishTime() > 0, + "Task Attempt finish time is not greater than 0"); Long finishTime = taImpl.getFinishTime(); Thread.sleep(5); @@ -1077,8 +1082,8 @@ public void testFetchFailureAttemptFinishTime() throws Exception{ .withFailMessage("Task attempt is not in FAILED state") .isEqualTo(TaskAttemptState.FAILED); - assertEquals( - finishTime, Long.valueOf(taImpl.getFinishTime()), "After TA_TOO_MANY_FETCH_FAILURE," + assertEquals(finishTime, Long.valueOf(taImpl.getFinishTime()), + "After TA_TOO_MANY_FETCH_FAILURE," + " Task attempt finish time is not the same "); } @@ -1231,8 +1236,8 @@ public void testContainerKillWhileRunning() throws Exception { .isEqualTo(TaskAttemptState.RUNNING); taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_KILL)); - assertFalse( - eventHandler.internalError, "InternalError occurred trying to handle TA_KILL"); + assertFalse(eventHandler.internalError, + "InternalError occurred trying to handle TA_KILL"); assertThat(taImpl.getInternalState()) .withFailMessage("Task should be in KILL_CONTAINER_CLEANUP state") .isEqualTo(TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP); @@ -1294,8 +1299,8 @@ public void testContainerKillWhileCommitPending() throws Exception { .isEqualTo(TaskAttemptStateInternal.COMMIT_PENDING); taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_KILL)); - assertFalse( - eventHandler.internalError, "InternalError occurred trying to handle TA_KILL"); + assertFalse(eventHandler.internalError, + "InternalError occurred trying to handle TA_KILL"); assertThat(taImpl.getInternalState()) .withFailMessage("Task should be in KILL_CONTAINER_CLEANUP state") .isEqualTo(TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP); @@ -1352,30 +1357,28 @@ public void testKillMapOnlyTaskWhileSuccessFinishing() throws Exception { taImpl.handle(new TaskAttemptEvent(taImpl.getID(), TaskAttemptEventType.TA_DONE)); - assertEquals( - TaskAttemptState.SUCCEEDED, taImpl.getState(), "Task attempt is not in SUCCEEDED state"); - assertEquals( - TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER -, taImpl.getInternalState(), "Task attempt's internal state is not " + - "SUCCESS_FINISHING_CONTAINER"); + assertEquals(TaskAttemptState.SUCCEEDED, taImpl.getState(), + "Task attempt is not in SUCCEEDED state"); + assertEquals(TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER, + taImpl.getInternalState(), "Task attempt's internal state is not " + + "SUCCESS_FINISHING_CONTAINER"); // If the map only task is killed when it is in SUCCESS_FINISHING_CONTAINER // state, the state will move to SUCCESS_CONTAINER_CLEANUP taImpl.handle(new TaskAttemptEvent(taImpl.getID(), TaskAttemptEventType.TA_KILL)); - assertEquals( - TaskAttemptState.SUCCEEDED, taImpl.getState(), "Task attempt is not in SUCCEEDED state"); - assertEquals( - TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP -, taImpl.getInternalState(), "Task attempt's internal state is not " + - "SUCCESS_CONTAINER_CLEANUP"); + assertEquals(TaskAttemptState.SUCCEEDED, + taImpl.getState(), "Task attempt is not in SUCCEEDED state"); + assertEquals(TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP, + taImpl.getInternalState(), "Task attempt's internal state is not " + + "SUCCESS_CONTAINER_CLEANUP"); taImpl.handle(new TaskAttemptEvent(taImpl.getID(), TaskAttemptEventType.TA_CONTAINER_CLEANED)); - assertEquals( - TaskAttemptState.SUCCEEDED, taImpl.getState(), "Task attempt is not in SUCCEEDED state"); - assertEquals( - TaskAttemptStateInternal.SUCCEEDED, taImpl.getInternalState(), "Task attempt's internal state is not SUCCEEDED state"); + assertEquals(TaskAttemptState.SUCCEEDED, taImpl.getState(), + "Task attempt is not in SUCCEEDED state"); + assertEquals(TaskAttemptStateInternal.SUCCEEDED, taImpl.getInternalState(), + "Task attempt's internal state is not SUCCEEDED state"); assertFalse(eventHandler.internalError, "InternalError occurred"); } @@ -1399,7 +1402,7 @@ public void testKillMapTaskAfterSuccess() throws Exception { taImpl.handle(new TaskAttemptEvent(taImpl.getID(), TaskAttemptEventType.TA_CONTAINER_CLEANED)); // Send a map task attempt kill event indicating next map attempt has to be - // re-schedule + // reschedule taImpl.handle(new TaskAttemptKillEvent(taImpl.getID(), "", true)); assertThat(taImpl.getState()) .withFailMessage("Task attempt is not in KILLED state") @@ -1423,21 +1426,20 @@ public void testKillMapOnlyTaskAfterSuccess() throws Exception { taImpl.handle(new TaskAttemptEvent(taImpl.getID(), TaskAttemptEventType.TA_DONE)); - assertEquals( - TaskAttemptState.SUCCEEDED, taImpl.getState(), "Task attempt is not in SUCCEEDED state"); - assertEquals( - TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER -, taImpl.getInternalState(), "Task attempt's internal state is not " + - "SUCCESS_FINISHING_CONTAINER"); + assertEquals(TaskAttemptState.SUCCEEDED, taImpl.getState(), + "Task attempt is not in SUCCEEDED state"); + assertEquals(TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER, + taImpl.getInternalState(), "Task attempt's internal state is not " + + "SUCCESS_FINISHING_CONTAINER"); taImpl.handle(new TaskAttemptEvent(taImpl.getID(), TaskAttemptEventType.TA_CONTAINER_CLEANED)); // Succeeded taImpl.handle(new TaskAttemptKillEvent(taImpl.getID(),"", true)); - assertEquals( - TaskAttemptState.SUCCEEDED, taImpl.getState(), "Task attempt is not in SUCCEEDED state"); - assertEquals( - TaskAttemptStateInternal.SUCCEEDED, taImpl.getInternalState(), "Task attempt's internal state is not SUCCEEDED"); + assertEquals(TaskAttemptState.SUCCEEDED, taImpl.getState(), + "Task attempt is not in SUCCEEDED state"); + assertEquals(TaskAttemptStateInternal.SUCCEEDED, + taImpl.getInternalState(), "Task attempt's internal state is not SUCCEEDED"); assertFalse(eventHandler.internalError, "InternalError occurred"); TaskEvent event = eventHandler.lastTaskEvent; assertEquals(TaskEventType.T_ATTEMPT_SUCCEEDED, event.getType()); @@ -1629,8 +1631,8 @@ public void testMapperCustomResourceTypes() { ResourceInformation resourceInfo = getResourceInfoFromContainerRequest(taImpl, eventHandler). getResourceInformation(CUSTOM_RESOURCE_NAME); - assertEquals("Expecting the default unit (G)", - "G", resourceInfo.getUnits()); + assertEquals("G", resourceInfo.getUnits(), + "Expecting the default unit (G)"); assertEquals(7L, resourceInfo.getValue()); } @@ -1647,8 +1649,8 @@ public void testReducerCustomResourceTypes() { ResourceInformation resourceInfo = getResourceInfoFromContainerRequest(taImpl, eventHandler). getResourceInformation(CUSTOM_RESOURCE_NAME); - assertEquals("Expecting the specified unit (m)", - "m", resourceInfo.getUnits()); + assertEquals("m", resourceInfo.getUnits(), + "Expecting the specified unit (m)"); assertEquals(3L, resourceInfo.getValue()); } @@ -1747,17 +1749,18 @@ public void testReducerMemoryRequestOverriding() { @Test public void testReducerMemoryRequestMultipleName() { - EventHandler eventHandler = mock(EventHandler.class); - Clock clock = SystemClock.getInstance(); - JobConf jobConf = new JobConf(); - for (String memoryName : ImmutableList.of( - MRJobConfig.RESOURCE_TYPE_NAME_MEMORY, - MRJobConfig.RESOURCE_TYPE_ALTERNATIVE_NAME_MEMORY)) { - jobConf.set(MRJobConfig.REDUCE_RESOURCE_TYPE_PREFIX + memoryName, - "3Gi"); - } - // expected=IllegalArgumentException.class - createReduceTaskAttemptImplForTest(eventHandler, clock, jobConf); + assertThrows(IllegalArgumentException.class, ()->{ + EventHandler eventHandler = mock(EventHandler.class); + Clock clock = SystemClock.getInstance(); + JobConf jobConf = new JobConf(); + for (String memoryName : ImmutableList.of( + MRJobConfig.RESOURCE_TYPE_NAME_MEMORY, + MRJobConfig.RESOURCE_TYPE_ALTERNATIVE_NAME_MEMORY)) { + jobConf.set(MRJobConfig.REDUCE_RESOURCE_TYPE_PREFIX + memoryName, + "3Gi"); + } + createReduceTaskAttemptImplForTest(eventHandler, clock, jobConf); + }); } @Test @@ -1847,22 +1850,23 @@ private Resource getResourceInfoFromContainerRequest( containerRequestEvents.add((ContainerRequestEvent) e); } } - assertEquals(1, containerRequestEvents.size(), "Expected one ContainerRequestEvent after scheduling " - + "task attempt"); + assertEquals(1, containerRequestEvents.size(), + "Expected one ContainerRequestEvent after scheduling task attempt"); return containerRequestEvents.get(0).getCapability(); } @Test public void testReducerCustomResourceTypeWithInvalidUnit() { - initResourceTypes(); - EventHandler eventHandler = mock(EventHandler.class); - Clock clock = SystemClock.getInstance(); - JobConf jobConf = new JobConf(); - jobConf.set(MRJobConfig.REDUCE_RESOURCE_TYPE_PREFIX - + CUSTOM_RESOURCE_NAME, "3z"); - // (expected=IllegalArgumentException.class) - createReduceTaskAttemptImplForTest(eventHandler, clock, jobConf); + assertThrows(IllegalArgumentException.class, () -> { + initResourceTypes(); + EventHandler eventHandler = mock(EventHandler.class); + Clock clock = SystemClock.getInstance(); + JobConf jobConf = new JobConf(); + jobConf.set(MRJobConfig.REDUCE_RESOURCE_TYPE_PREFIX + + CUSTOM_RESOURCE_NAME, "3z"); + createReduceTaskAttemptImplForTest(eventHandler, clock, jobConf); + }); } @Test @@ -1877,22 +1881,19 @@ public void testKillingTaskWhenContainerCleanup() { // move in two steps to the desired state (cannot get there directly) taImpl.handle(new TaskAttemptEvent(taImpl.getID(), TaskAttemptEventType.TA_DONE)); - assertEquals( - TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER -, taImpl.getInternalState(), "Task attempt's internal state is not " + + assertEquals(TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER, + taImpl.getInternalState(), "Task attempt's internal state is not " + "SUCCESS_FINISHING_CONTAINER"); taImpl.handle(new TaskAttemptEvent(taImpl.getID(), TaskAttemptEventType.TA_TIMED_OUT)); - assertEquals( - TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP -, taImpl.getInternalState(), "Task attempt's internal state is not " + + assertEquals(TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP, + taImpl.getInternalState(), "Task attempt's internal state is not " + "SUCCESS_CONTAINER_CLEANUP"); taImpl.handle(new TaskAttemptKillEvent(mapTAId, "", true)); - assertEquals( - TaskAttemptState.KILLED -, taImpl.getState(), "Task attempt is not in KILLED state"); + assertEquals(TaskAttemptState.KILLED, + taImpl.getState(), "Task attempt is not in KILLED state"); } @Test @@ -1907,23 +1908,20 @@ public void testTooManyFetchFailureWhileContainerCleanup() { // move in two steps to the desired state (cannot get there directly) taImpl.handle(new TaskAttemptEvent(taImpl.getID(), TaskAttemptEventType.TA_DONE)); - assertEquals( - TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER -, taImpl.getInternalState(), "Task attempt's internal state is not " + + assertEquals(TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER, + taImpl.getInternalState(), "Task attempt's internal state is not " + "SUCCESS_FINISHING_CONTAINER"); taImpl.handle(new TaskAttemptEvent(taImpl.getID(), TaskAttemptEventType.TA_TIMED_OUT)); - assertEquals( - TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP -, taImpl.getInternalState(), "Task attempt's internal state is not " + + assertEquals(TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP, + taImpl.getInternalState(), "Task attempt's internal state is not " + "SUCCESS_CONTAINER_CLEANUP"); taImpl.handle(new TaskAttemptTooManyFetchFailureEvent(taImpl.getID(), reduceTAId, "Host")); - assertEquals( - TaskAttemptState.FAILED -, taImpl.getState(), "Task attempt is not in FAILED state"); + assertEquals(TaskAttemptState.FAILED, + taImpl.getState(), "Task attempt is not in FAILED state"); assertFalse(eventHandler.internalError, "InternalError occurred"); } @@ -1946,16 +1944,14 @@ public void testTooManyFetchFailureWhileSuccessFinishing() throws Exception { taImpl.handle(new TaskAttemptEvent(taImpl.getID(), TaskAttemptEventType.TA_DONE)); - assertEquals( - TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER -, taImpl.getInternalState(), "Task attempt's internal state is not " + + assertEquals(TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER, + taImpl.getInternalState(), "Task attempt's internal state is not " + "SUCCESS_FINISHING_CONTAINER"); taImpl.handle(new TaskAttemptTooManyFetchFailureEvent(taImpl.getID(), reduceTAId, "Host")); - assertEquals( - TaskAttemptState.FAILED -, taImpl.getState(), "Task attempt is not in FAILED state"); + assertEquals(TaskAttemptState.FAILED, + taImpl.getState(), "Task attempt is not in FAILED state"); assertFalse(eventHandler.internalError, "InternalError occurred"); } @@ -2045,5 +2041,5 @@ public void handle(Event event) { } } - } + }; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttemptContainerRequest.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttemptContainerRequest.java index 70ef7c6e45b2e..fc9dcc83fa005 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttemptContainerRequest.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttemptContainerRequest.java @@ -18,6 +18,9 @@ package org.apache.hadoop.mapreduce.v2.app.job.impl; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -28,7 +31,6 @@ import java.util.Map; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileStatus; @@ -113,7 +115,7 @@ public void testAttemptContainerRequest() throws Exception { mock(WrappedJvmID.class), taListener, credentials); - Assertions.assertEquals(acls, launchCtx.getApplicationACLs(), "ACLs mismatch"); + assertEquals(acls, launchCtx.getApplicationACLs(), "ACLs mismatch"); Credentials launchCredentials = new Credentials(); DataInputByteBuffer dibb = new DataInputByteBuffer(); @@ -124,16 +126,13 @@ public void testAttemptContainerRequest() throws Exception { for (Token token : credentials.getAllTokens()) { Token launchToken = launchCredentials.getToken(token.getService()); - Assertions.assertNotNull( - launchToken, "Token " + token.getService() + " is missing"); - Assertions.assertEquals( - token, launchToken, "Token " + token.getService() + " mismatch"); + assertNotNull(launchToken, "Token " + token.getService() + " is missing"); + assertEquals(token, launchToken, "Token " + token.getService() + " mismatch"); } // verify the secret key is in the launch context - Assertions.assertNotNull( - launchCredentials.getSecretKey(SECRET_KEY_ALIAS), "Secret key missing"); - Assertions.assertTrue(Arrays.equals(SECRET_KEY, + assertNotNull(launchCredentials.getSecretKey(SECRET_KEY_ALIAS), "Secret key missing"); + assertTrue(Arrays.equals(SECRET_KEY, launchCredentials.getSecretKey(SECRET_KEY_ALIAS)), "Secret key mismatch"); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java index 55dd3f74bf1a4..da4d22b8b29ae 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java @@ -372,7 +372,7 @@ private void assertTaskRunningState() { } /** - * {@link TaskStateInternal#KILL_WAIT} + * {@link TaskState#KILL_WAIT} */ private void assertTaskKillWaitState() { assertEquals(TaskStateInternal.KILL_WAIT, mockTask.getInternalState()); @@ -407,7 +407,7 @@ public void testInit() { } @Test - /* + /** * {@link TaskState#NEW}->{@link TaskState#SCHEDULED} */ public void testScheduleTask() { @@ -418,7 +418,7 @@ public void testScheduleTask() { } @Test - /* + /** * {@link TaskState#SCHEDULED}->{@link TaskState#KILL_WAIT} */ public void testKillScheduledTask() { @@ -430,7 +430,7 @@ public void testKillScheduledTask() { } @Test - /* + /** * Kill attempt * {@link TaskState#SCHEDULED}->{@link TaskState#SCHEDULED} */ @@ -445,7 +445,7 @@ public void testKillScheduledTaskAttempt() { } @Test - /* + /** * Launch attempt * {@link TaskState#SCHEDULED}->{@link TaskState#RUNNING} */ @@ -458,7 +458,7 @@ public void testLaunchTaskAttempt() { } @Test - /* + /** * Kill running attempt * {@link TaskState#RUNNING}->{@link TaskState#RUNNING} */ @@ -475,7 +475,7 @@ public void testKillRunningTaskAttempt() { @Test public void testKillSuccessfulTask() { - LOG.info("--- START: testKillSuccessfulTask ---"); + LOG.info("--- START: testKillSuccesfulTask ---"); mockTask = createMockTask(TaskType.MAP); TaskId taskId = getNewTaskID(); scheduleTaskAttempt(taskId); @@ -489,7 +489,7 @@ public void testKillSuccessfulTask() { } @Test - /* + /** * Kill map attempt for succeeded map task * {@link TaskState#SUCCEEDED}->{@link TaskState#SCHEDULED} */ @@ -587,10 +587,10 @@ public void testFailureDuringTaskAttemptCommit() { mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(), TaskEventType.T_ATTEMPT_SUCCEEDED)); - assertFalse( - mockTask.canCommit(taskAttempts.get(0).getAttemptId()), "First attempt should not commit"); - assertTrue( - mockTask.canCommit(getLastAttempt().getAttemptId()), "Second attempt should commit"); + assertFalse(mockTask.canCommit(taskAttempts.get(0).getAttemptId()), + "First attempt should not commit"); + assertTrue(mockTask.canCommit(getLastAttempt().getAttemptId()), + "Second attempt should commit"); assertTaskSucceededState(); } @@ -820,16 +820,16 @@ protected int getMaxAttempts() { assertEquals(3, taskAttempts.size()); // verify the speculative attempt(#2) is not a rescheduled attempt - assertFalse(taskAttempts.get(1).getRescheduled()); + assertEquals(false, taskAttempts.get(1).getRescheduled()); // verify the third attempt is a rescheduled attempt - assertTrue(taskAttempts.get(2).getRescheduled()); + assertEquals(true, taskAttempts.get(2).getRescheduled()); // now launch the latest attempt(#3) and set the internal state to running launchTaskAttempt(getLastAttempt().getAttemptId()); // have the speculative attempt(#2) fail, verify task still since it - // hasn't reached the max attempts which is 4 + // hasn't reach the max attempts which is 4 MockTaskAttemptImpl taskAttempt1 = taskAttempts.get(1); taskAttempt1.setState(TaskAttemptState.FAILED); mockTask.handle(new TaskTAttemptFailedEvent(taskAttempt1.getAttemptId())); @@ -890,5 +890,5 @@ public void handle(Event event) { lastTaskAttemptEvent = (TaskAttemptEvent)event; } } - } + }; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java index 84596c94a7d5c..2debee5c7e505 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java @@ -19,6 +19,9 @@ package org.apache.hadoop.mapreduce.v2.app.launcher; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import java.io.IOException; @@ -44,7 +47,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.ResourceLocalizationResponse; import org.apache.hadoop.yarn.api.protocolrecords.RestartContainerResponse; import org.apache.hadoop.yarn.api.protocolrecords.RollbackResponse; -import org.junit.jupiter.api.Assertions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.ipc.Server; @@ -129,10 +131,10 @@ public void testPoolSize() throws InterruptedException { // No events yet assertThat(containerLauncher.initialPoolSize).isEqualTo( MRJobConfig.DEFAULT_MR_AM_CONTAINERLAUNCHER_THREADPOOL_INITIAL_SIZE); - Assertions.assertEquals(0, threadPool.getPoolSize()); - Assertions.assertEquals(containerLauncher.initialPoolSize, + assertEquals(0, threadPool.getPoolSize()); + assertEquals(containerLauncher.initialPoolSize, threadPool.getCorePoolSize()); - Assertions.assertNull(containerLauncher.foundErrors); + assertNull(containerLauncher.foundErrors); containerLauncher.expectedCorePoolSize = containerLauncher.initialPoolSize; for (int i = 0; i < 10; i++) { @@ -143,8 +145,8 @@ public void testPoolSize() throws InterruptedException { ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH)); } waitForEvents(containerLauncher, 10); - Assertions.assertEquals(10, threadPool.getPoolSize()); - Assertions.assertNull(containerLauncher.foundErrors); + assertEquals(10, threadPool.getPoolSize()); + assertNull(containerLauncher.foundErrors); // Same set of hosts, so no change containerLauncher.finishEventHandling = true; @@ -155,7 +157,7 @@ public void testPoolSize() throws InterruptedException { + ". Timeout is " + timeOut); Thread.sleep(1000); } - Assertions.assertEquals(10, containerLauncher.numEventsProcessed.get()); + assertEquals(10, containerLauncher.numEventsProcessed.get()); containerLauncher.finishEventHandling = false; for (int i = 0; i < 10; i++) { ContainerId containerId = ContainerId.newContainerId(appAttemptId, @@ -167,8 +169,8 @@ public void testPoolSize() throws InterruptedException { ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH)); } waitForEvents(containerLauncher, 20); - Assertions.assertEquals(10, threadPool.getPoolSize()); - Assertions.assertNull(containerLauncher.foundErrors); + assertEquals(10, threadPool.getPoolSize()); + assertNull(containerLauncher.foundErrors); // Different hosts, there should be an increase in core-thread-pool size to // 21(11hosts+10buffer) @@ -181,8 +183,8 @@ public void testPoolSize() throws InterruptedException { containerId, "host11:1234", null, ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH)); waitForEvents(containerLauncher, 21); - Assertions.assertEquals(11, threadPool.getPoolSize()); - Assertions.assertNull(containerLauncher.foundErrors); + assertEquals(11, threadPool.getPoolSize()); + assertNull(containerLauncher.foundErrors); containerLauncher.stop(); @@ -225,8 +227,8 @@ public void testPoolLimits() throws InterruptedException { ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH)); } waitForEvents(containerLauncher, 10); - Assertions.assertEquals(10, threadPool.getPoolSize()); - Assertions.assertNull(containerLauncher.foundErrors); + assertEquals(10, threadPool.getPoolSize()); + assertNull(containerLauncher.foundErrors); // 4 more different hosts, but thread pool size should be capped at 12 containerLauncher.expectedCorePoolSize = 12 ; @@ -236,14 +238,14 @@ public void testPoolLimits() throws InterruptedException { ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH)); } waitForEvents(containerLauncher, 12); - Assertions.assertEquals(12, threadPool.getPoolSize()); - Assertions.assertNull(containerLauncher.foundErrors); + assertEquals(12, threadPool.getPoolSize()); + assertNull(containerLauncher.foundErrors); // Make some threads ideal so that remaining events are also done. containerLauncher.finishEventHandling = true; waitForEvents(containerLauncher, 14); - Assertions.assertEquals(12, threadPool.getPoolSize()); - Assertions.assertNull(containerLauncher.foundErrors); + assertEquals(12, threadPool.getPoolSize()); + assertNull(containerLauncher.foundErrors); containerLauncher.stop(); } @@ -257,8 +259,7 @@ private void waitForEvents(CustomContainerLauncher containerLauncher, + ". It is now " + containerLauncher.numEventsProcessing.get()); Thread.sleep(1000); } - Assertions.assertEquals(expectedNumEvents, - containerLauncher.numEventsProcessing.get()); + assertEquals(expectedNumEvents, containerLauncher.numEventsProcessing.get()); } @Test @@ -294,15 +295,14 @@ public void testSlowNM() throws Exception { app.waitForState(job, JobState.RUNNING); Map tasks = job.getTasks(); - Assertions.assertEquals(1, tasks.size(), "Num tasks is not correct"); + assertEquals(1, tasks.size(), "Num tasks is not correct"); Task task = tasks.values().iterator().next(); app.waitForState(task, TaskState.SCHEDULED); Map attempts = tasks.values().iterator() .next().getAttempts(); - Assertions.assertEquals(maxAttempts -, attempts.size(), "Num attempts is not correct"); + assertEquals(maxAttempts, attempts.size(), "Num attempts is not correct"); TaskAttempt attempt = attempts.values().iterator().next(); app.waitForInternalState((TaskAttemptImpl) attempt, @@ -313,11 +313,10 @@ public void testSlowNM() throws Exception { String diagnostics = attempt.getDiagnostics().toString(); LOG.info("attempt.getDiagnostics: " + diagnostics); - Assertions.assertTrue(diagnostics.contains("Container launch failed for " + assertTrue(diagnostics.contains("Container launch failed for " + "container_0_0000_01_000000 : ")); - Assertions - .assertTrue(diagnostics - .contains("java.net.SocketTimeoutException: 3000 millis timeout while waiting for channel")); + assertTrue(diagnostics.contains( + "java.net.SocketTimeoutException: 3000 millis timeout while waiting for channel")); } finally { server.stop(); @@ -363,6 +362,7 @@ public void run() { try { wait(1000); } catch (InterruptedException e) { + ; } } } @@ -418,7 +418,8 @@ cmProxy.new ContainerManagementProtocolProxyData( return proxy; } }; - } + + }; } public class DummyContainerManager implements ContainerManagementProtocol { @@ -428,7 +429,7 @@ public class DummyContainerManager implements ContainerManagementProtocol { @Override public GetContainerStatusesResponse getContainerStatuses( GetContainerStatusesRequest request) throws IOException { - List statuses = new ArrayList<>(); + List statuses = new ArrayList(); statuses.add(status); return GetContainerStatusesResponse.newInstance(statuses, null); } @@ -442,7 +443,7 @@ public StartContainersResponse startContainers(StartContainersRequest requests) MRApp.newContainerTokenIdentifier(request.getContainerToken()); // Validate that the container is what RM is giving. - Assertions.assertEquals(MRApp.NM_HOST + ":" + MRApp.NM_PORT, + assertEquals(MRApp.NM_HOST + ":" + MRApp.NM_PORT, containerTokenIdentifier.getNmHostAddress()); StartContainersResponse response = recordFactory @@ -472,7 +473,8 @@ public StopContainersResponse stopContainers(StopContainersRequest request) @Override @Deprecated public IncreaseContainersResourceResponse increaseContainersResource( - IncreaseContainersResourceRequest request) throws IOException { + IncreaseContainersResourceRequest request) throws IOException, + IOException { Exception e = new Exception("Dummy function", new Exception( "Dummy function cause")); throw new IOException(e); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java index aa347ae5ab418..d98b8031ec99f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java @@ -92,7 +92,8 @@ public class TestContainerLauncherImpl { private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); - private Map serviceResponse = new HashMap<>(); + private Map serviceResponse = + new HashMap(); @BeforeEach public void setup() throws IOException { @@ -135,14 +136,14 @@ public void waitForPoolToIdle() throws InterruptedException { // That the other thread had time to insert the event into the queue and // start processing it. For some reason we were getting interrupted // exceptions within eventQueue without this sleep. - Thread.sleep(100L); + Thread.sleep(100l); LOG.debug("POOL SIZE 1: "+this.eventQueue.size()+ " POOL SIZE 2: "+this.launcherPool.getQueue().size()+ " ACTIVE COUNT: "+ this.launcherPool.getActiveCount()); while(!this.eventQueue.isEmpty() || !this.launcherPool.getQueue().isEmpty() || this.launcherPool.getActiveCount() > 0) { - Thread.sleep(100L); + Thread.sleep(100l); LOG.debug("POOL SIZE 1: "+this.eventQueue.size()+ " POOL SIZE 2: "+this.launcherPool.getQueue().size()+ " ACTIVE COUNT: "+ this.launcherPool.getActiveCount()); @@ -186,8 +187,8 @@ public void testHandle() throws Exception { ut.init(conf); ut.start(); try { - ContainerId contId = makeContainerId(0L, 0, 0, 1); - TaskAttemptId taskAttemptId = makeTaskAttemptId(0L, 0, 0, TaskType.MAP, 0); + ContainerId contId = makeContainerId(0l, 0, 0, 1); + TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0); StartContainersResponse startResp = recordFactory.newRecordInstance(StartContainersResponse.class); startResp.setAllServicesMetaData(serviceResponse); @@ -245,8 +246,8 @@ public void testOutOfOrder() throws Exception { ut.init(conf); ut.start(); try { - ContainerId contId = makeContainerId(0L, 0, 0, 1); - TaskAttemptId taskAttemptId = makeTaskAttemptId(0L, 0, 0, TaskType.MAP, 0); + ContainerId contId = makeContainerId(0l, 0, 0, 1); + TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0); String cmAddress = "127.0.0.1:8000"; StartContainersResponse startResp = recordFactory.newRecordInstance(StartContainersResponse.class); @@ -321,8 +322,8 @@ public void testMyShutdown() throws Exception { ut.init(conf); ut.start(); try { - ContainerId contId = makeContainerId(0L, 0, 0, 1); - TaskAttemptId taskAttemptId = makeTaskAttemptId(0L, 0, 0, TaskType.MAP, 0); + ContainerId contId = makeContainerId(0l, 0, 0, 1); + TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0); String cmAddress = "127.0.0.1:8000"; StartContainersResponse startResp = recordFactory.newRecordInstance(StartContainersResponse.class); @@ -377,8 +378,8 @@ public void testContainerCleaned() throws Exception { ut.init(conf); ut.start(); try { - ContainerId contId = makeContainerId(0L, 0, 0, 1); - TaskAttemptId taskAttemptId = makeTaskAttemptId(0L, 0, 0, TaskType.MAP, 0); + ContainerId contId = makeContainerId(0l, 0, 0, 1); + TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0); String cmAddress = "127.0.0.1:8000"; StartContainersResponse startResp = recordFactory.newRecordInstance(StartContainersResponse.class); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java index c1c0481154ca4..eef1a4a10835f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java @@ -18,6 +18,10 @@ package org.apache.hadoop.mapreduce.v2.app.local; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.isA; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -55,8 +59,12 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.NMToken; +import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.UpdatedContainer; import org.apache.hadoop.yarn.client.ClientRMProxy; import org.apache.hadoop.yarn.event.Event; import org.apache.hadoop.yarn.event.EventHandler; @@ -65,7 +73,6 @@ import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.util.resource.Resources; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.mockito.ArgumentCaptor; @@ -86,7 +93,7 @@ public void testRMConnectionRetry() throws Exception { lca.start(); try { lca.heartbeat(); - Assertions.fail("heartbeat was supposed to throw"); + fail("heartbeat was supposed to throw"); } catch (YarnException e) { // YarnException is expected } finally { @@ -100,7 +107,7 @@ public void testRMConnectionRetry() throws Exception { lca.start(); try { lca.heartbeat(); - Assertions.fail("heartbeat was supposed to throw"); + fail("heartbeat was supposed to throw"); } catch (YarnRuntimeException e) { // YarnRuntimeException is expected } finally { @@ -130,10 +137,10 @@ public void testAMRMTokenUpdate() throws Exception { ApplicationId.newInstance(1, 1), 1); AMRMTokenIdentifier oldTokenId = new AMRMTokenIdentifier(attemptId, 1); AMRMTokenIdentifier newTokenId = new AMRMTokenIdentifier(attemptId, 2); - Token oldToken = new Token<>( - oldTokenId.getBytes(), "oldpassword".getBytes(), oldTokenId.getKind(), - new Text()); - Token newToken = new Token<>( + Token oldToken = new Token( + oldTokenId.getBytes(), "oldpassword".getBytes(), oldTokenId.getKind(), + new Text()); + Token newToken = new Token( newTokenId.getBytes(), "newpassword".getBytes(), newTokenId.getKind(), new Text()); @@ -148,9 +155,12 @@ public void testAMRMTokenUpdate() throws Exception { UserGroupInformation testUgi = UserGroupInformation.createUserForTesting( "someuser", new String[0]); testUgi.addToken(oldToken); - testUgi.doAs((PrivilegedExceptionAction) () -> { - lca.heartbeat(); - return null; + testUgi.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + lca.heartbeat(); + return null; + } }); lca.close(); @@ -165,14 +175,13 @@ public void testAMRMTokenUpdate() throws Exception { } } - Assertions.assertEquals(1, tokenCount, "too many AMRM tokens"); - Assertions.assertArrayEquals( - newToken.getIdentifier(), ugiToken.getIdentifier(), "token identifier not updated"); - Assertions.assertArrayEquals( - newToken.getPassword(), ugiToken.getPassword(), "token password not updated"); - Assertions.assertEquals( - new Text(ClientRMProxy.getAMRMTokenService(conf)) -, ugiToken.getService(), "AMRM token service not updated"); + assertEquals(1, tokenCount, "too many AMRM tokens"); + assertArrayEquals(newToken.getIdentifier(), ugiToken.getIdentifier(), + "token identifier not updated"); + assertArrayEquals(newToken.getPassword(), ugiToken.getPassword(), + "token password not updated"); + assertEquals(new Text(ClientRMProxy.getAMRMTokenService(conf)), + ugiToken.getService(), "AMRM token service not updated"); } @Test @@ -195,7 +204,7 @@ public void testAllocatedContainerResourceIsNotNull() { verify(eventHandler, times(1)).handle(containerAssignedCaptor.capture()); Container container = containerAssignedCaptor.getValue().getContainer(); Resource containerResource = container.getResource(); - Assertions.assertNotNull(containerResource); + assertNotNull(containerResource); assertThat(containerResource.getMemorySize()).isEqualTo(0); assertThat(containerResource.getVirtualCores()).isEqualTo(0); } @@ -275,8 +284,7 @@ public FinishApplicationMasterResponse finishApplicationMaster( @Override public AllocateResponse allocate(AllocateRequest request) throws YarnException, IOException { - Assertions.assertEquals( - responseId, request.getResponseId(), "response ID mismatch"); + assertEquals(responseId, request.getResponseId(), "response ID mismatch"); ++responseId; org.apache.hadoop.yarn.api.records.Token yarnToken = null; if (amToken != null) { @@ -285,13 +293,13 @@ public AllocateResponse allocate(AllocateRequest request) amToken.getPassword(), amToken.getService().toString()); } AllocateResponse response = AllocateResponse.newInstance(responseId, - Collections.emptyList(), - Collections.emptyList(), - Collections.emptyList(), + Collections.emptyList(), + Collections.emptyList(), + Collections.emptyList(), Resources.none(), null, 1, null, - Collections.emptyList(), + Collections.emptyList(), yarnToken, - Collections.emptyList()); + Collections.emptyList()); response.setApplicationPriority(Priority.newInstance(0)); return response; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/metrics/TestMRAppMetrics.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/metrics/TestMRAppMetrics.java index 0cac55ad98f93..6f50c611a01d8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/metrics/TestMRAppMetrics.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/metrics/TestMRAppMetrics.java @@ -28,7 +28,8 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; -import static org.mockito.Mockito.*; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class TestMRAppMetrics { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java index 5d6edd113fd8d..456fbf2c3140d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java @@ -20,7 +20,15 @@ import static org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestCreator.createRequest; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyFloat; import static org.mockito.ArgumentMatchers.anyInt; @@ -150,7 +158,6 @@ import org.apache.hadoop.yarn.util.resource.Resources; import org.apache.hadoop.yarn.util.SystemClock; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -232,8 +239,8 @@ public void testSimple() throws Exception { // as nodes are not added, no allocations List assigned = allocator.schedule(); rm.drainEvents(); - Assertions.assertEquals(0, assigned.size(), "No of assignments must be 0"); - Assertions.assertEquals(4, rm.getMyFifoScheduler().lastAsk.size()); + assertEquals(0, assigned.size(), "No of assignments must be 0"); + assertEquals(4, rm.getMyFifoScheduler().lastAsk.size()); // send another request with different resource and priority ContainerRequestEvent event3 = ContainerRequestCreator.createRequest(jobId, @@ -244,8 +251,8 @@ public void testSimple() throws Exception { // as nodes are not added, no allocations assigned = allocator.schedule(); rm.drainEvents(); - Assertions.assertEquals(0, assigned.size(), "No of assignments must be 0"); - Assertions.assertEquals(3, rm.getMyFifoScheduler().lastAsk.size()); + assertEquals(0, assigned.size(), "No of assignments must be 0"); + assertEquals(3, rm.getMyFifoScheduler().lastAsk.size()); // update resources in scheduler nodeManager1.nodeHeartbeat(true); // Node heartbeat @@ -255,14 +262,14 @@ public void testSimple() throws Exception { assigned = allocator.schedule(); rm.drainEvents(); - Assertions.assertEquals(0, rm.getMyFifoScheduler().lastAsk.size()); + assertEquals(0, rm.getMyFifoScheduler().lastAsk.size()); checkAssignments(new ContainerRequestEvent[] {event1, event2, event3}, assigned, false); // check that the assigned container requests are cancelled allocator.schedule(); rm.drainEvents(); - Assertions.assertEquals(5, rm.getMyFifoScheduler().lastAsk.size()); + assertEquals(5, rm.getMyFifoScheduler().lastAsk.size()); } @Test @@ -324,11 +331,11 @@ public void testMapNodeLocality() throws Exception { // as nodes are not added, no allocations List assigned = allocator.schedule(); rm.drainEvents(); - Assertions.assertEquals(0, assigned.size(), "No of assignments must be 0"); + assertEquals(0, assigned.size(), "No of assignments must be 0"); // update resources in scheduler // Node heartbeat from rack-local first. This makes node h3 the first in the - // list of allocated containers, but it should not be assigned to task1. + // list of allocated containers but it should not be assigned to task1. nodeManager3.nodeHeartbeat(true); // Node heartbeat from node-local next. This allocates 2 node local // containers for task1 and task2. These should be matched with those tasks. @@ -343,7 +350,7 @@ public void testMapNodeLocality() throws Exception { for(TaskAttemptContainerAssignedEvent event : assigned) { if(event.getTaskAttemptID().equals(event3.getAttemptID())) { assigned.remove(event); - Assertions.assertEquals("h3", event.getContainer().getNodeId().getHost()); + assertEquals("h3", event.getContainer().getNodeId().getHost()); break; } } @@ -403,7 +410,7 @@ public void testResource() throws Exception { // as nodes are not added, no allocations List assigned = allocator.schedule(); rm.drainEvents(); - Assertions.assertEquals(0, assigned.size(), "No of assignments must be 0"); + assertEquals(0, assigned.size(), "No of assignments must be 0"); // update resources in scheduler nodeManager1.nodeHeartbeat(true); // Node heartbeat @@ -469,8 +476,8 @@ public void testReducerRampdownDiagnostics() throws Exception { } final String killEventMessage = allocator.getTaskAttemptKillEvents().get(0) .getMessage(); - Assertions.assertTrue( - killEventMessage.contains(RMContainerAllocator.RAMPDOWN_DIAGNOSTIC), "No reducer rampDown preemption message"); + assertTrue(killEventMessage.contains(RMContainerAllocator.RAMPDOWN_DIAGNOSTIC), + "No reducer rampDown preemption message"); } @Test @@ -517,8 +524,8 @@ public void testPreemptReducers() throws Exception { mock(Container.class)); allocator.preemptReducesIfNeeded(); - Assertions.assertEquals( - 1, assignedRequests.preemptionWaitingReduces.size(), "The reducer is not preempted"); + assertEquals(1, assignedRequests.preemptionWaitingReduces.size(), + "The reducer is not preempted"); } @Test @@ -575,13 +582,13 @@ public void testNonAggressivelyPreemptReducers() throws Exception { clock.setTime(clock.getTime() + 1); allocator.preemptReducesIfNeeded(); - Assertions.assertEquals(0, - assignedRequests.preemptionWaitingReduces.size(), "The reducer is aggressively preempted"); + assertEquals(0, assignedRequests.preemptionWaitingReduces.size(), + "The reducer is aggressively preempted"); clock.setTime(clock.getTime() + (preemptThreshold) * 1000); allocator.preemptReducesIfNeeded(); - Assertions.assertEquals(1, - assignedRequests.preemptionWaitingReduces.size(), "The reducer is not preempted"); + assertEquals(1, assignedRequests.preemptionWaitingReduces.size(), + "The reducer is not preempted"); } @Test @@ -640,13 +647,13 @@ public void testUnconditionalPreemptReducers() throws Exception { clock.setTime(clock.getTime() + 1); allocator.preemptReducesIfNeeded(); - Assertions.assertEquals(0 -, assignedRequests.preemptionWaitingReduces.size(), "The reducer is preempted too soon"); + assertEquals(0, assignedRequests.preemptionWaitingReduces.size(), + "The reducer is preempted too soon"); clock.setTime(clock.getTime() + 1000 * forcePreemptThresholdSecs); allocator.preemptReducesIfNeeded(); - Assertions.assertEquals(1 -, assignedRequests.preemptionWaitingReduces.size(), "The reducer is not preempted"); + assertEquals(1, assignedRequests.preemptionWaitingReduces.size(), + "The reducer is not preempted"); } @Test @@ -737,7 +744,7 @@ protected ApplicationMasterProtocol createSchedulerProxy() { ContainerRequestEvent reqMapEvents; reqMapEvents = ContainerRequestCreator.createRequest(jobId, 0, Resource.newInstance(1024, 1), new String[]{"map"}); - allocator.sendRequests(Collections.singletonList(reqMapEvents)); + allocator.sendRequests(Arrays.asList(reqMapEvents)); // create some reduce requests ContainerRequestEvent reqReduceEvents; @@ -745,11 +752,11 @@ protected ApplicationMasterProtocol createSchedulerProxy() { createRequest(jobId, 0, Resource.newInstance(2048, 1), new String[] {"reduce"}, false, true); - allocator.sendRequests(Collections.singletonList(reqReduceEvents)); + allocator.sendRequests(Arrays.asList(reqReduceEvents)); allocator.schedule(); - // verify all the host-specific asks were sent plus one for the + // verify all of the host-specific asks were sent plus one for the // default rack and one for the ANY request - Assertions.assertEquals(3, mockScheduler.lastAsk.size()); + assertEquals(3, mockScheduler.lastAsk.size()); // verify ResourceRequest sent for MAP have appropriate node // label expression as per the configuration validateLabelsRequests(mockScheduler.lastAsk.get(0), false); @@ -760,7 +767,7 @@ protected ApplicationMasterProtocol createSchedulerProxy() { ContainerId cid0 = mockScheduler.assignContainer("map", false); allocator.schedule(); // default rack and one for the ANY request - Assertions.assertEquals(3, mockScheduler.lastAsk.size()); + assertEquals(3, mockScheduler.lastAsk.size()); validateLabelsRequests(mockScheduler.lastAsk.get(0), true); validateLabelsRequests(mockScheduler.lastAsk.get(1), true); validateLabelsRequests(mockScheduler.lastAsk.get(2), true); @@ -775,14 +782,14 @@ private void validateLabelsRequests(ResourceRequest resourceRequest, case "map": case "reduce": case NetworkTopology.DEFAULT_RACK: - Assertions.assertNull(resourceRequest.getNodeLabelExpression()); + assertNull(resourceRequest.getNodeLabelExpression()); break; case "*": - Assertions.assertEquals(isReduce ? "ReduceNodes" : "MapNodes", + assertEquals(isReduce ? "ReduceNodes" : "MapNodes", resourceRequest.getNodeLabelExpression()); break; default: - Assertions.fail("Invalid resource location " + fail("Invalid resource location " + resourceRequest.getResourceName()); } } @@ -809,7 +816,7 @@ public void testUpdateCollectorInfo() throws Exception { new Text("renewer"), null); ident.setSequenceNumber(1); Token collectorToken = - new Token<>(ident.getBytes(), + new Token(ident.getBytes(), new byte[0], TimelineDelegationTokenIdentifier.KIND_NAME, new Text(localAddr)); org.apache.hadoop.yarn.api.records.Token token = @@ -857,7 +864,7 @@ protected ApplicationMasterProtocol createSchedulerProxy() { // new token. ident.setSequenceNumber(100); Token collectorToken1 = - new Token<>(ident.getBytes(), + new Token(ident.getBytes(), new byte[0], TimelineDelegationTokenIdentifier.KIND_NAME, new Text(localAddr)); token = org.apache.hadoop.yarn.api.records.Token.newInstance( @@ -936,7 +943,7 @@ public void testMapReduceScheduling() throws Exception { // as nodes are not added, no allocations List assigned = allocator.schedule(); rm.drainEvents(); - Assertions.assertEquals(0, assigned.size(), "No of assignments must be 0"); + assertEquals(0, assigned.size(), "No of assignments must be 0"); // update resources in scheduler nodeManager1.nodeHeartbeat(true); // Node heartbeat @@ -951,8 +958,8 @@ public void testMapReduceScheduling() throws Exception { // validate that no container is assigned to h1 as it doesn't have 2048 for (TaskAttemptContainerAssignedEvent assig : assigned) { - Assertions.assertNotEquals("h1", assig - .getContainer().getNodeId().getHost(), "Assigned count not correct"); + assertNotEquals("h1", assig.getContainer().getNodeId().getHost(), + "Assigned count not correct"); } } @@ -972,14 +979,19 @@ public MyResourceManager(Configuration conf, RMStateStore store) { public void serviceStart() throws Exception { super.serviceStart(); // Ensure that the application attempt IDs for all the tests are the same - // The application attempt IDs will be used as the login usernames + // The application attempt IDs will be used as the login user names MyResourceManager.setClusterTimeStamp(fakeClusterTimeStamp); } @Override protected EventHandler createSchedulerEventDispatcher() { // Dispatch inline for test sanity - return event -> scheduler.handle(event); + return new EventHandler() { + @Override + public void handle(SchedulerEvent event) { + scheduler.handle(event); + } + }; } @Override protected ResourceScheduler createScheduler() { @@ -1035,10 +1047,10 @@ protected Dispatcher createDispatcher() { protected ContainerAllocator createContainerAllocator( ClientService clientService, AppContext context) { return new MyContainerAllocator(rm, appAttemptId, context); - } + }; }; - Assertions.assertEquals(0.0, rmApp.getProgress(), 0.0); + assertEquals(0.0, rmApp.getProgress(), 0.0); mrApp.submit(conf); Job job = mrApp.getContext().getAllJobs().entrySet().iterator().next() @@ -1077,23 +1089,23 @@ protected ContainerAllocator createContainerAllocator( allocator.schedule(); // Send heartbeat rm.drainEvents(); - Assertions.assertEquals(0.05f, job.getProgress(), 0.001f); - Assertions.assertEquals(0.05f, rmApp.getProgress(), 0.001f); + assertEquals(0.05f, job.getProgress(), 0.001f); + assertEquals(0.05f, rmApp.getProgress(), 0.001f); // Finish off 1 map. Iterator it = job.getTasks().values().iterator(); finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 1); allocator.schedule(); rm.drainEvents(); - Assertions.assertEquals(0.095f, job.getProgress(), 0.001f); - Assertions.assertEquals(0.095f, rmApp.getProgress(), 0.001f); + assertEquals(0.095f, job.getProgress(), 0.001f); + assertEquals(0.095f, rmApp.getProgress(), 0.001f); // Finish off 7 more so that map-progress is 80% finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 7); allocator.schedule(); rm.drainEvents(); - Assertions.assertEquals(0.41f, job.getProgress(), 0.001f); - Assertions.assertEquals(0.41f, rmApp.getProgress(), 0.001f); + assertEquals(0.41f, job.getProgress(), 0.001f); + assertEquals(0.41f, rmApp.getProgress(), 0.001f); // Finish off the 2 remaining maps finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 2); @@ -1117,16 +1129,16 @@ protected ContainerAllocator createContainerAllocator( allocator.schedule(); rm.drainEvents(); - Assertions.assertEquals(0.59f, job.getProgress(), 0.001f); - Assertions.assertEquals(0.59f, rmApp.getProgress(), 0.001f); + assertEquals(0.59f, job.getProgress(), 0.001f); + assertEquals(0.59f, rmApp.getProgress(), 0.001f); // Finish off the remaining 8 reduces. finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 8); allocator.schedule(); rm.drainEvents(); // Remaining is JobCleanup - Assertions.assertEquals(0.95f, job.getProgress(), 0.001f); - Assertions.assertEquals(0.95f, rmApp.getProgress(), 0.001f); + assertEquals(0.95f, job.getProgress(), 0.001f); + assertEquals(0.95f, rmApp.getProgress(), 0.001f); } private void finishNextNTasks(DrainDispatcher rmDispatcher, MockNM node, @@ -1141,10 +1153,11 @@ private void finishNextNTasks(DrainDispatcher rmDispatcher, MockNM node, private void finishTask(DrainDispatcher rmDispatcher, MockNM node, MRApp mrApp, Task task) throws Exception { TaskAttempt attempt = task.getAttempts().values().iterator().next(); - List contStatus = new ArrayList<>(1); + List contStatus = new ArrayList(1); contStatus.add(ContainerStatus.newInstance(attempt.getAssignedContainerID(), ContainerState.COMPLETE, "", 0)); - Map> statusUpdate = new HashMap<>(1); + Map> statusUpdate = + new HashMap>(1); statusUpdate.put(mrApp.getAppID(), contStatus); node.nodeHeartbeat(statusUpdate, true); rmDispatcher.await(); @@ -1186,10 +1199,10 @@ protected Dispatcher createDispatcher() { protected ContainerAllocator createContainerAllocator( ClientService clientService, AppContext context) { return new MyContainerAllocator(rm, appAttemptId, context); - } + }; }; - Assertions.assertEquals(0.0, rmApp.getProgress(), 0.0); + assertEquals(0.0, rmApp.getProgress(), 0.0); mrApp.submit(conf); Job job = mrApp.getContext().getAllJobs().entrySet().iterator().next() @@ -1224,8 +1237,8 @@ protected ContainerAllocator createContainerAllocator( allocator.schedule(); // Send heartbeat rm.drainEvents(); - Assertions.assertEquals(0.05f, job.getProgress(), 0.001f); - Assertions.assertEquals(0.05f, rmApp.getProgress(), 0.001f); + assertEquals(0.05f, job.getProgress(), 0.001f); + assertEquals(0.05f, rmApp.getProgress(), 0.001f); Iterator it = job.getTasks().values().iterator(); @@ -1233,22 +1246,22 @@ protected ContainerAllocator createContainerAllocator( finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 1); allocator.schedule(); rm.drainEvents(); - Assertions.assertEquals(0.14f, job.getProgress(), 0.001f); - Assertions.assertEquals(0.14f, rmApp.getProgress(), 0.001f); + assertEquals(0.14f, job.getProgress(), 0.001f); + assertEquals(0.14f, rmApp.getProgress(), 0.001f); // Finish off 5 more map so that map-progress is 60% finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 5); allocator.schedule(); rm.drainEvents(); - Assertions.assertEquals(0.59f, job.getProgress(), 0.001f); - Assertions.assertEquals(0.59f, rmApp.getProgress(), 0.001f); + assertEquals(0.59f, job.getProgress(), 0.001f); + assertEquals(0.59f, rmApp.getProgress(), 0.001f); // Finish off remaining map so that map-progress is 100% finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 4); allocator.schedule(); rm.drainEvents(); - Assertions.assertEquals(0.95f, job.getProgress(), 0.001f); - Assertions.assertEquals(0.95f, rmApp.getProgress(), 0.001f); + assertEquals(0.95f, job.getProgress(), 0.001f); + assertEquals(0.95f, rmApp.getProgress(), 0.001f); } @Test @@ -1299,17 +1312,17 @@ public void testUpdatedNodes() throws Exception { nm1.nodeHeartbeat(true); rm.drainEvents(); - Assertions.assertEquals(1, allocator.getJobUpdatedNodeEvents().size()); - Assertions.assertEquals(3, allocator.getJobUpdatedNodeEvents().get(0).getUpdatedNodes().size()); + assertEquals(1, allocator.getJobUpdatedNodeEvents().size()); + assertEquals(3, allocator.getJobUpdatedNodeEvents().get(0).getUpdatedNodes().size()); allocator.getJobUpdatedNodeEvents().clear(); // get the assignment assigned = allocator.schedule(); rm.drainEvents(); - Assertions.assertEquals(1, assigned.size()); - Assertions.assertEquals(nm1.getNodeId(), assigned.get(0).getContainer().getNodeId()); + assertEquals(1, assigned.size()); + assertEquals(nm1.getNodeId(), assigned.get(0).getContainer().getNodeId()); // no updated nodes reported - Assertions.assertTrue(allocator.getJobUpdatedNodeEvents().isEmpty()); - Assertions.assertTrue(allocator.getTaskAttemptKillEvents().isEmpty()); + assertTrue(allocator.getJobUpdatedNodeEvents().isEmpty()); + assertTrue(allocator.getTaskAttemptKillEvents().isEmpty()); // mark nodes bad nm1.nodeHeartbeat(false); @@ -1319,23 +1332,23 @@ public void testUpdatedNodes() throws Exception { // schedule response returns updated nodes assigned = allocator.schedule(); rm.drainEvents(); - Assertions.assertEquals(0, assigned.size()); + assertEquals(0, assigned.size()); // updated nodes are reported - Assertions.assertEquals(1, allocator.getJobUpdatedNodeEvents().size()); - Assertions.assertEquals(1, allocator.getTaskAttemptKillEvents().size()); - Assertions.assertEquals(2, + assertEquals(1, allocator.getJobUpdatedNodeEvents().size()); + assertEquals(1, allocator.getTaskAttemptKillEvents().size()); + assertEquals(2, allocator.getJobUpdatedNodeEvents().get(0).getUpdatedNodes().size()); - Assertions.assertEquals(attemptId, + assertEquals(attemptId, allocator.getTaskAttemptKillEvents().get(0).getTaskAttemptID()); allocator.getJobUpdatedNodeEvents().clear(); allocator.getTaskAttemptKillEvents().clear(); assigned = allocator.schedule(); rm.drainEvents(); - Assertions.assertEquals(0, assigned.size()); + assertEquals(0, assigned.size()); // no updated nodes reported - Assertions.assertTrue(allocator.getJobUpdatedNodeEvents().isEmpty()); - Assertions.assertTrue(allocator.getTaskAttemptKillEvents().isEmpty()); + assertTrue(allocator.getJobUpdatedNodeEvents().isEmpty()); + assertTrue(allocator.getTaskAttemptKillEvents().isEmpty()); } @Test @@ -1404,7 +1417,7 @@ public void testBlackListedNodes() throws Exception { // as nodes are not added, no allocations List assigned = allocator.schedule(); rm.drainEvents(); - Assertions.assertEquals(0, assigned.size(), "No of assignments must be 0"); + assertEquals(0, assigned.size(), "No of assignments must be 0"); // Send events to blacklist nodes h1 and h2 ContainerFailedEvent f1 = createFailEvent(jobId, 1, "h1", false); @@ -1418,9 +1431,9 @@ public void testBlackListedNodes() throws Exception { rm.drainEvents(); assigned = allocator.schedule(); - Assertions.assertEquals(0, assigned.size(), "No of assignments must be 0"); + assertEquals(0, assigned.size(), "No of assignments must be 0"); rm.drainEvents(); - Assertions.assertEquals(0, assigned.size(), "No of assignments must be 0"); + assertEquals(0, assigned.size(), "No of assignments must be 0"); assertBlacklistAdditionsAndRemovals(2, 0, rm); // mark h1/h2 as bad nodes @@ -1431,7 +1444,7 @@ public void testBlackListedNodes() throws Exception { assigned = allocator.schedule(); rm.drainEvents(); assertBlacklistAdditionsAndRemovals(0, 0, rm); - Assertions.assertEquals(0, assigned.size(), "No of assignments must be 0"); + assertEquals(0, assigned.size(), "No of assignments must be 0"); nodeManager3.nodeHeartbeat(true); // Node heartbeat rm.drainEvents(); @@ -1443,8 +1456,8 @@ public void testBlackListedNodes() throws Exception { // validate that all containers are assigned to h3 for (TaskAttemptContainerAssignedEvent assig : assigned) { - assertEquals("h3", assig - .getContainer().getNodeId().getHost(), "Assigned container host not correct"); + assertEquals("h3", assig.getContainer().getNodeId().getHost(), + "Assigned container host not correct"); } } @@ -1489,7 +1502,7 @@ public void testIgnoreBlacklisting() throws Exception { assigned = getContainerOnHost(jobId, 1, 1024, new String[] {"h1"}, nodeManagers[0], allocator, 0, 0, 0, 0, rm); - Assertions.assertEquals(1, assigned.size(), "No of assignments must be 1"); + assertEquals(1, assigned.size(), "No of assignments must be 1"); LOG.info("Failing container _1 on H1 (Node should be blacklisted and" + " ignore blacklisting enabled"); @@ -1504,47 +1517,47 @@ public void testIgnoreBlacklisting() throws Exception { assigned = getContainerOnHost(jobId, 2, 1024, new String[] {"h1"}, nodeManagers[0], allocator, 1, 0, 0, 1, rm); - Assertions.assertEquals(0, assigned.size(), "No of assignments must be 0"); + assertEquals(0, assigned.size(), "No of assignments must be 0"); // Known=1, blacklisted=1, ignore should be true - assign 1 assigned = getContainerOnHost(jobId, 2, 1024, new String[] {"h1"}, nodeManagers[0], allocator, 0, 0, 0, 0, rm); - Assertions.assertEquals(1, assigned.size(), "No of assignments must be 1"); + assertEquals(1, assigned.size(), "No of assignments must be 1"); nodeManagers[nmNum] = registerNodeManager(nmNum++, rm); // Known=2, blacklisted=1, ignore should be true - assign 1 anyway. assigned = getContainerOnHost(jobId, 3, 1024, new String[] {"h2"}, nodeManagers[1], allocator, 0, 0, 0, 0, rm); - Assertions.assertEquals(1, assigned.size(), "No of assignments must be 1"); + assertEquals(1, assigned.size(), "No of assignments must be 1"); nodeManagers[nmNum] = registerNodeManager(nmNum++, rm); // Known=3, blacklisted=1, ignore should be true - assign 1 anyway. assigned = getContainerOnHost(jobId, 4, 1024, new String[] {"h3"}, nodeManagers[2], allocator, 0, 0, 0, 0, rm); - Assertions.assertEquals(1, assigned.size(), "No of assignments must be 1"); + assertEquals(1, assigned.size(), "No of assignments must be 1"); // Known=3, blacklisted=1, ignore should be true - assign 1 assigned = getContainerOnHost(jobId, 5, 1024, new String[] {"h1"}, nodeManagers[0], allocator, 0, 0, 0, 0, rm); - Assertions.assertEquals(1, assigned.size(), "No of assignments must be 1"); + assertEquals(1, assigned.size(), "No of assignments must be 1"); nodeManagers[nmNum] = registerNodeManager(nmNum++, rm); // Known=4, blacklisted=1, ignore should be false - assign 1 anyway assigned = getContainerOnHost(jobId, 6, 1024, new String[] {"h4"}, nodeManagers[3], allocator, 0, 0, 1, 0, rm); - Assertions.assertEquals(1, assigned.size(), "No of assignments must be 1"); + assertEquals(1, assigned.size(), "No of assignments must be 1"); // Test blacklisting re-enabled. // Known=4, blacklisted=1, ignore should be false - no assignment on h1 assigned = getContainerOnHost(jobId, 7, 1024, new String[] {"h1"}, nodeManagers[0], allocator, 0, 0, 0, 0, rm); - Assertions.assertEquals(0, assigned.size(), "No of assignments must be 0"); + assertEquals(0, assigned.size(), "No of assignments must be 0"); // RMContainerRequestor would have created a replacement request. // Blacklist h2 @@ -1557,20 +1570,20 @@ public void testIgnoreBlacklisting() throws Exception { assigned = getContainerOnHost(jobId, 8, 1024, new String[] {"h1"}, nodeManagers[0], allocator, 1, 0, 0, 2, rm); - Assertions.assertEquals(0, assigned.size(), "No of assignments must be 0"); + assertEquals(0, assigned.size(), "No of assignments must be 0"); // Known=4, blacklisted=2, ignore should be true. Should assign 2 // containers. assigned = getContainerOnHost(jobId, 8, 1024, new String[] {"h1"}, nodeManagers[0], allocator, 0, 0, 0, 0, rm); - Assertions.assertEquals(2, assigned.size(), "No of assignments must be 2"); + assertEquals(2, assigned.size(), "No of assignments must be 2"); // Known=4, blacklisted=2, ignore should be true. assigned = getContainerOnHost(jobId, 9, 1024, new String[] {"h2"}, nodeManagers[1], allocator, 0, 0, 0, 0, rm); - Assertions.assertEquals(1, assigned.size(), "No of assignments must be 1"); + assertEquals(1, assigned.size(), "No of assignments must be 1"); // Test blacklist while ignore blacklisting enabled ContainerFailedEvent f3 = createFailEvent(jobId, 4, "h3", false); @@ -1581,7 +1594,7 @@ public void testIgnoreBlacklisting() throws Exception { assigned = getContainerOnHost(jobId, 10, 1024, new String[] {"h3"}, nodeManagers[2], allocator, 0, 0, 0, 0, rm); - Assertions.assertEquals(1, assigned.size(), "No of assignments must be 1"); + assertEquals(1, assigned.size(), "No of assignments must be 1"); // Assign on 5 more nodes - to re-enable blacklisting for (int i = 0; i < 5; i++) { @@ -1590,14 +1603,14 @@ public void testIgnoreBlacklisting() throws Exception { getContainerOnHost(jobId, 11 + i, 1024, new String[] {String.valueOf(5 + i)}, nodeManagers[4 + i], allocator, 0, 0, (i == 4 ? 3 : 0), 0, rm); - Assertions.assertEquals(1, assigned.size(), "No of assignments must be 1"); + assertEquals(1, assigned.size(), "No of assignments must be 1"); } // Test h3 (blacklisted while ignoring blacklisting) is blacklisted. assigned = getContainerOnHost(jobId, 20, 1024, new String[] {"h3"}, nodeManagers[2], allocator, 0, 0, 0, 0, rm); - Assertions.assertEquals(0, assigned.size(), "No of assignments must be 0"); + assertEquals(0, assigned.size(), "No of assignments must be 0"); } private MockNM registerNodeManager(int i, MyResourceManager rm) @@ -1624,7 +1637,7 @@ List getContainerOnHost(JobId jobId, rm.drainEvents(); assertBlacklistAdditionsAndRemovals( expectedAdditions1, expectedRemovals1, rm); - Assertions.assertEquals(0, assigned.size(), "No of assignments must be 0"); + assertEquals(0, assigned.size(), "No of assignments must be 0"); // Heartbeat from the required nodeManager mockNM.nodeHeartbeat(true); @@ -1689,7 +1702,7 @@ public void testBlackListedNodesWithSchedulingToThatNode() throws Exception { // as nodes are not added, no allocations List assigned = allocator.schedule(); rm.drainEvents(); - Assertions.assertEquals(0, assigned.size(), "No of assignments must be 0"); + assertEquals(0, assigned.size(), "No of assignments must be 0"); LOG.info("h1 Heartbeat (To actually schedule the containers)"); // update resources in scheduler @@ -1700,7 +1713,7 @@ public void testBlackListedNodesWithSchedulingToThatNode() throws Exception { assigned = allocator.schedule(); rm.drainEvents(); assertBlacklistAdditionsAndRemovals(0, 0, rm); - Assertions.assertEquals(1, assigned.size(), "No of assignments must be 1"); + assertEquals(1, assigned.size(), "No of assignments must be 1"); LOG.info("Failing container _1 on H1 (should blacklist the node)"); // Send events to blacklist nodes h1 and h2 @@ -1718,7 +1731,7 @@ public void testBlackListedNodesWithSchedulingToThatNode() throws Exception { assigned = allocator.schedule(); rm.drainEvents(); assertBlacklistAdditionsAndRemovals(1, 0, rm); - Assertions.assertEquals(0, assigned.size(), "No of assignments must be 0"); + assertEquals(0, assigned.size(), "No of assignments must be 0"); // send another request with different resource and priority ContainerRequestEvent event3 = @@ -1739,7 +1752,7 @@ public void testBlackListedNodesWithSchedulingToThatNode() throws Exception { assigned = allocator.schedule(); rm.drainEvents(); assertBlacklistAdditionsAndRemovals(0, 0, rm); - Assertions.assertEquals(0, assigned.size(), "No of assignments must be 0"); + assertEquals(0, assigned.size(), "No of assignments must be 0"); //RMContainerAllocator gets assigned a p:5 on a blacklisted node. @@ -1748,9 +1761,9 @@ public void testBlackListedNodesWithSchedulingToThatNode() throws Exception { assigned = allocator.schedule(); rm.drainEvents(); assertBlacklistAdditionsAndRemovals(0, 0, rm); - Assertions.assertEquals(0, assigned.size(), "No of assignments must be 0"); + assertEquals(0, assigned.size(), "No of assignments must be 0"); - //Heartbeat from H3 to schedule on this host. + //Hearbeat from H3 to schedule on this host. LOG.info("h3 Heartbeat (To re-schedule the containers)"); nodeManager3.nodeHeartbeat(true); // Node heartbeat rm.drainEvents(); @@ -1767,27 +1780,27 @@ public void testBlackListedNodesWithSchedulingToThatNode() throws Exception { " with priority " + assig.getContainer().getPriority()); } - Assertions.assertEquals(2, assigned.size(), "No of assignments must be 2"); + assertEquals(2, assigned.size(), "No of assignments must be 2"); // validate that all containers are assigned to h3 for (TaskAttemptContainerAssignedEvent assig : assigned) { - Assertions.assertEquals("Assigned container " + assig.getContainer().getId() - + " host not correct", "h3", assig.getContainer().getNodeId().getHost()); + assertEquals("h3", assig.getContainer().getNodeId().getHost(), + "Assigned container " + assig.getContainer().getId() + " host not correct"); } } private static void assertBlacklistAdditionsAndRemovals( int expectedAdditions, int expectedRemovals, MyResourceManager rm) { - Assertions.assertEquals(expectedAdditions, + assertEquals(expectedAdditions, rm.getMyFifoScheduler().lastBlacklistAdditions.size()); - Assertions.assertEquals(expectedRemovals, + assertEquals(expectedRemovals, rm.getMyFifoScheduler().lastBlacklistRemovals.size()); } private static void assertAsksAndReleases(int expectedAsk, int expectedRelease, MyResourceManager rm) { - Assertions.assertEquals(expectedAsk, rm.getMyFifoScheduler().lastAsk.size()); - Assertions.assertEquals(expectedRelease, + assertEquals(expectedAsk, rm.getMyFifoScheduler().lastAsk.size()); + assertEquals(expectedRelease, rm.getMyFifoScheduler().lastRelease.size()); } @@ -1819,7 +1832,7 @@ public synchronized Allocation allocate( List schedulingRequests, List release, List blacklistAdditions, List blacklistRemovals, ContainerUpdates updateRequests) { - List askCopy = new ArrayList<>(); + List askCopy = new ArrayList(); for (ResourceRequest req : ask) { ResourceRequest reqCopy = ResourceRequest.newInstance(req .getPriority(), req.getResourceName(), req.getCapability(), req @@ -1866,7 +1879,7 @@ public synchronized Allocation allocate( List schedulingRequests, List release, List blacklistAdditions, List blacklistRemovals, ContainerUpdates updateRequests) { - List askCopy = new ArrayList<>(); + List askCopy = new ArrayList(); for (ResourceRequest req : ask) { ResourceRequest reqCopy = ResourceRequest.newInstance(req .getPriority(), req.getResourceName(), req.getCapability(), req @@ -1930,17 +1943,16 @@ private ContainerAllocatorEvent createDeallocateEvent(JobId jobId, private void checkAssignments(ContainerRequestEvent[] requests, List assignments, boolean checkHostMatch) { - Assertions.assertNotNull(assignments, "Container not assigned"); - Assertions.assertEquals(requests.length -, assignments.size(), "Assigned count not correct"); + assertNotNull(assignments, "Container not assigned"); + assertEquals(requests.length, assignments.size(), "Assigned count not correct"); // check for uniqueness of containerIDs Set containerIds = new HashSet<>(); for (TaskAttemptContainerAssignedEvent assigned : assignments) { containerIds.add(assigned.getContainer().getId()); } - Assertions.assertEquals(assignments - .size(), containerIds.size(), "Assigned containers must be different"); + assertEquals(assignments.size(), containerIds.size(), + "Assigned containers must be different"); // check for all assignment for (ContainerRequestEvent req : requests) { @@ -1957,12 +1969,12 @@ private void checkAssignments(ContainerRequestEvent[] requests, private void checkAssignment(ContainerRequestEvent request, TaskAttemptContainerAssignedEvent assigned, boolean checkHostMatch) { - Assertions.assertNotNull(assigned, "Nothing assigned to attempt " + assertNotNull(assigned, "Nothing assigned to attempt " + request.getAttemptID()); - Assertions.assertEquals(request.getAttemptID() -, assigned.getTaskAttemptID(), "assigned to wrong attempt"); + assertEquals(request.getAttemptID(), assigned.getTaskAttemptID(), + "assigned to wrong attempt"); if (checkHostMatch) { - Assertions.assertTrue(Arrays.asList( + assertTrue(Arrays.asList( request.getHosts()).contains( assigned.getContainer().getNodeId().getHost()), "Not assigned to requested host"); } @@ -1991,16 +2003,19 @@ private static AppContext createAppContext( when(context.getClock()).thenReturn(new ControlledClock()); when(context.getClusterInfo()).thenReturn( new ClusterInfo(Resource.newInstance(10240, 1))); - when(context.getEventHandler()).thenReturn((EventHandler) event -> { - // Only capture interesting events. - if (event instanceof TaskAttemptContainerAssignedEvent) { - events.add((TaskAttemptContainerAssignedEvent) event); - } else if (event instanceof TaskAttemptKillEvent) { - taskAttemptKillEvents.add((TaskAttemptKillEvent)event); - } else if (event instanceof JobUpdatedNodesEvent) { - jobUpdatedNodeEvents.add((JobUpdatedNodesEvent)event); - } else if (event instanceof JobEvent) { - jobEvents.add((JobEvent)event); + when(context.getEventHandler()).thenReturn(new EventHandler() { + @Override + public void handle(Event event) { + // Only capture interesting events. + if (event instanceof TaskAttemptContainerAssignedEvent) { + events.add((TaskAttemptContainerAssignedEvent) event); + } else if (event instanceof TaskAttemptKillEvent) { + taskAttemptKillEvents.add((TaskAttemptKillEvent)event); + } else if (event instanceof JobUpdatedNodesEvent) { + jobUpdatedNodeEvents.add((JobUpdatedNodesEvent)event); + } else if (event instanceof JobEvent) { + jobEvents.add((JobEvent)event); + } } }); return context; @@ -2101,7 +2116,11 @@ public List schedule() throws Exception { // before doing heartbeat with RM, drain all the outstanding events to // ensure all the requests before this heartbeat is to be handled - GenericTestUtils.waitFor(() -> eventQueue.isEmpty(), 100, 10000); + GenericTestUtils.waitFor(new Supplier() { + public Boolean get() { + return eventQueue.isEmpty(); + } + }, 100, 10000); // run the scheduler super.heartbeat(); @@ -2225,7 +2244,7 @@ public void testReduceScheduling() throws Exception { verify(allocator, times(1)).setIsReduceStarted(true); // Test reduce ramp-up - doReturn(Resources.createResource(100 * 1024, 100)).when(allocator) + doReturn(Resources.createResource(100 * 1024, 100 * 1)).when(allocator) .getResourceLimit(); allocator.scheduleReduces( totalMaps, succeededMaps, @@ -2239,7 +2258,7 @@ public void testReduceScheduling() throws Exception { // Test reduce ramp-down scheduledReduces = 3; - doReturn(Resources.createResource(10 * 1024, 10)).when(allocator) + doReturn(Resources.createResource(10 * 1024, 10 * 1)).when(allocator) .getResourceLimit(); allocator.scheduleReduces( totalMaps, succeededMaps, @@ -2255,7 +2274,7 @@ public void testReduceScheduling() throws Exception { // should be invoked twice. scheduledMaps = 2; assignedReduces = 2; - doReturn(Resources.createResource(10 * 1024, 10)).when(allocator) + doReturn(Resources.createResource(10 * 1024, 10 * 1)).when(allocator) .getResourceLimit(); allocator.scheduleReduces( totalMaps, succeededMaps, @@ -2273,7 +2292,7 @@ public void testReduceScheduling() throws Exception { // Test ramp-down when enough memory but not enough cpu resource scheduledMaps = 10; assignedReduces = 0; - doReturn(Resources.createResource(100 * 1024, 5)).when(allocator) + doReturn(Resources.createResource(100 * 1024, 5 * 1)).when(allocator) .getResourceLimit(); allocator.scheduleReduces(totalMaps, succeededMaps, scheduledMaps, scheduledReduces, assignedMaps, assignedReduces, mapResourceReqt, @@ -2282,7 +2301,7 @@ public void testReduceScheduling() throws Exception { verify(allocator, times(3)).rampDownReduces(anyInt()); // Test ramp-down when enough cpu but not enough memory resource - doReturn(Resources.createResource(10 * 1024, 100)).when(allocator) + doReturn(Resources.createResource(10 * 1024, 100 * 1)).when(allocator) .getResourceLimit(); allocator.scheduleReduces(totalMaps, succeededMaps, scheduledMaps, scheduledReduces, assignedMaps, assignedReduces, mapResourceReqt, @@ -2344,13 +2363,13 @@ public void testCompletedTasksRecalculateSchedule() throws Exception { allocator.recalculatedReduceSchedule = false; allocator.schedule(); - Assertions.assertFalse( - allocator.recalculatedReduceSchedule, "Unexpected recalculate of reduce schedule"); + assertFalse(allocator.recalculatedReduceSchedule, + "Unexpected recalculate of reduce schedule"); doReturn(1).when(job).getCompletedMaps(); allocator.schedule(); - Assertions.assertTrue( - allocator.recalculatedReduceSchedule, "Expected recalculate of reduce schedule"); + assertTrue(allocator.recalculatedReduceSchedule, + "Expected recalculate of reduce schedule"); } @Test @@ -2388,14 +2407,14 @@ protected synchronized void heartbeat() throws Exception { Thread.sleep(10); timeToWaitMs -= 10; } - Assertions.assertEquals(5, allocator.getLastHeartbeatTime()); + assertEquals(5, allocator.getLastHeartbeatTime()); clock.setTime(7); timeToWaitMs = 5000; while (allocator.getLastHeartbeatTime() != 7 && timeToWaitMs > 0) { Thread.sleep(10); timeToWaitMs -= 10; } - Assertions.assertEquals(7, allocator.getLastHeartbeatTime()); + assertEquals(7, allocator.getLastHeartbeatTime()); final AtomicBoolean callbackCalled = new AtomicBoolean(false); allocator.runOnNextHeartbeat(new Runnable() { @@ -2410,8 +2429,8 @@ public void run() { Thread.sleep(10); timeToWaitMs -= 10; } - Assertions.assertEquals(8, allocator.getLastHeartbeatTime()); - Assertions.assertTrue(callbackCalled.get()); + assertEquals(8, allocator.getLastHeartbeatTime()); + assertTrue(callbackCalled.get()); } @Test @@ -2439,12 +2458,12 @@ public void testCompletedContainerEvent() { TaskAttemptEvent event = allocator.createContainerFinishedEvent(status, attemptId); - Assertions.assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED, + assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED, event.getType()); TaskAttemptEvent abortedEvent = allocator.createContainerFinishedEvent( abortedStatus, attemptId); - Assertions.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent.getType()); + assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent.getType()); // PREEMPTED ContainerId containerId2 = @@ -2457,12 +2476,12 @@ public void testCompletedContainerEvent() { TaskAttemptEvent event2 = allocator.createContainerFinishedEvent(status2, attemptId); - Assertions.assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED, + assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED, event2.getType()); TaskAttemptEvent abortedEvent2 = allocator.createContainerFinishedEvent( preemptedStatus, attemptId); - Assertions.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent2.getType()); + assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent2.getType()); // KILLED_BY_CONTAINER_SCHEDULER ContainerId containerId3 = @@ -2476,12 +2495,12 @@ public void testCompletedContainerEvent() { TaskAttemptEvent event3 = allocator.createContainerFinishedEvent(status3, attemptId); - Assertions.assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED, + assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED, event3.getType()); TaskAttemptEvent abortedEvent3 = allocator.createContainerFinishedEvent( killedByContainerSchedulerStatus, attemptId); - Assertions.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent3.getType()); + assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent3.getType()); } @Test @@ -2514,7 +2533,7 @@ protected Dispatcher createDispatcher() { protected ContainerAllocator createContainerAllocator( ClientService clientService, AppContext context) { return new MyContainerAllocator(rm, appAttemptId, context); - } + }; }; mrApp.submit(conf); @@ -2522,9 +2541,9 @@ protected ContainerAllocator createContainerAllocator( MyContainerAllocator allocator = (MyContainerAllocator) mrApp.getContainerAllocator(); amDispatcher.await(); - Assertions.assertTrue(allocator.isApplicationMasterRegistered()); + assertTrue(allocator.isApplicationMasterRegistered()); mrApp.stop(); - Assertions.assertTrue(allocator.isUnregistered()); + assertTrue(allocator.isUnregistered()); } // Step-1 : AM send allocate request for 2 ContainerRequests and 1 @@ -2581,7 +2600,7 @@ public void testRMContainerAllocatorResendsRequestsOnRMRestart() new MyContainerAllocator(rm1, conf, appAttemptId, mockJob); // Step-1 : AM send allocate request for 2 ContainerRequests and 1 - // blackListNode + // blackListeNode // create the container request // send MAP request ContainerRequestEvent event1 = @@ -2604,8 +2623,8 @@ public void testRMContainerAllocatorResendsRequestsOnRMRestart() List assignedContainers = allocator.schedule(); rm1.drainEvents(); - Assertions.assertEquals(0 -, assignedContainers.size(), "No of assignments must be 0"); + assertEquals(0, + assignedContainers.size(), "No of assignments must be 0"); // Why ask is 3, not 4? --> ask from blacklisted node h2 is removed assertAsksAndReleases(3, 0, rm1); assertBlacklistAdditionsAndRemovals(1, 0, rm1); @@ -2616,14 +2635,14 @@ public void testRMContainerAllocatorResendsRequestsOnRMRestart() // Step-2 : 2 containers are allocated by RM. assignedContainers = allocator.schedule(); rm1.drainEvents(); - Assertions.assertEquals(2 -, assignedContainers.size(), "No of assignments must be 2"); + assertEquals(2, + assignedContainers.size(), "No of assignments must be 2"); assertAsksAndReleases(0, 0, rm1); assertBlacklistAdditionsAndRemovals(0, 0, rm1); assignedContainers = allocator.schedule(); - Assertions.assertEquals(0 -, assignedContainers.size(), "No of assignments must be 0"); + assertEquals(0, + assignedContainers.size(), "No of assignments must be 0"); assertAsksAndReleases(3, 0, rm1); assertBlacklistAdditionsAndRemovals(0, 0, rm1); @@ -2642,8 +2661,8 @@ public void testRMContainerAllocatorResendsRequestsOnRMRestart() allocator.sendDeallocate(deallocate1); assignedContainers = allocator.schedule(); - Assertions.assertEquals(0 -, assignedContainers.size(), "No of assignments must be 0"); + assertEquals(0, + assignedContainers.size(), "No of assignments must be 0"); assertAsksAndReleases(3, 1, rm1); assertBlacklistAdditionsAndRemovals(0, 0, rm1); @@ -2655,7 +2674,7 @@ public void testRMContainerAllocatorResendsRequestsOnRMRestart() // NM should be rebooted on heartbeat, even first heartbeat for nm2 NodeHeartbeatResponse hbResponse = nm1.nodeHeartbeat(true); - Assertions.assertEquals(NodeAction.RESYNC, hbResponse.getNodeAction()); + assertEquals(NodeAction.RESYNC, hbResponse.getNodeAction()); // new NM to represent NM re-register nm1 = new MockNM("h1:1234", 10240, rm2.getResourceTrackerService()); @@ -2665,7 +2684,7 @@ public void testRMContainerAllocatorResendsRequestsOnRMRestart() // Step-4 : On RM restart, AM(does not know RM is restarted) sends // additional containerRequest(event4) and blacklisted nodes. - // Intern RM send rsync command + // Intern RM send resync command // send deallocate request, release=1 ContainerAllocatorEvent deallocate2 = @@ -2708,12 +2727,12 @@ public void testRMContainerAllocatorResendsRequestsOnRMRestart() assignedContainers = allocator.schedule(); rm2.drainEvents(); - Assertions.assertEquals(3 -, assignedContainers.size(), "Number of container should be 3"); + assertEquals(3, assignedContainers.size(), + "Number of container should be 3"); for (TaskAttemptContainerAssignedEvent assig : assignedContainers) { assertEquals("h1", assig.getContainer().getNodeId().getHost(), - "Assigned count not correct"); + "Assigned count not correct"); } rm1.stop(); @@ -2754,10 +2773,10 @@ protected Resource getMaxContainerCapability() { Resource.newInstance(memory, maxContainerSupported.getVirtualCores()), new String[0], false, false); - allocator.sendRequests(Collections.singletonList(mapRequestEvt)); + allocator.sendRequests(Arrays.asList(mapRequestEvt)); allocator.schedule(); - Assertions.assertEquals(0, mockScheduler.lastAnyAskMap); + assertEquals(0, mockScheduler.lastAnyAskMap); } @Test @@ -2793,14 +2812,14 @@ protected Resource getMaxContainerCapability() { Resource.newInstance(memory, maxContainerSupported.getVirtualCores()), new String[0], false, true); - allocator.sendRequests(Collections.singletonList(reduceRequestEvt)); + allocator.sendRequests(Arrays.asList(reduceRequestEvt)); // Reducer container requests are added to the pending queue upon request, // schedule all reducers here so that we can observe if reducer requests // are accepted by RMContainerAllocator on RM side. allocator.scheduleAllReduces(); allocator.schedule(); - Assertions.assertEquals(0, mockScheduler.lastAnyAskReduce); + assertEquals(0, mockScheduler.lastAnyAskReduce); } @Test @@ -2835,13 +2854,12 @@ public void testRMUnavailable() allocator.jobEvents.clear(); try { allocator.schedule(); - Assertions.fail("Should Have Exception"); + fail("Should Have Exception"); } catch (RMContainerAllocationException e) { - Assertions.assertTrue(e.getMessage().contains("Could not contact RM after")); + assertTrue(e.getMessage().contains("Could not contact RM after")); } rm1.drainEvents(); - Assertions.assertEquals(1 -, allocator.jobEvents.size(), "Should Have 1 Job Event"); + assertEquals(1, allocator.jobEvents.size(), "Should Have 1 Job Event"); JobEvent event = allocator.jobEvents.get(0); assertEquals(event.getType(), JobEventType.JOB_AM_REBOOT, "Should Reboot"); } @@ -2885,28 +2903,31 @@ public void testAMRMTokenUpdate() throws Exception { final Token oldToken = rm.getRMContext().getRMApps() .get(appId).getRMAppAttempt(appAttemptId).getAMRMToken(); - Assertions.assertNotNull(oldToken, "app should have a token"); + assertNotNull(oldToken, "app should have a token"); UserGroupInformation testUgi = UserGroupInformation.createUserForTesting( "someuser", new String[0]); Token newToken = testUgi.doAs( - (PrivilegedExceptionAction>) () -> { - MyContainerAllocator allocator = new MyContainerAllocator(rm, conf, - appAttemptId, mockJob); - - // Keep heartbeat until RM thinks the token has been updated - Token currentToken = oldToken; - long startTime = Time.monotonicNow(); - while (currentToken == oldToken) { - if (Time.monotonicNow() - startTime > 20000) { - Assertions.fail("Took to long to see AMRM token change"); + new PrivilegedExceptionAction>() { + @Override + public Token run() throws Exception { + MyContainerAllocator allocator = new MyContainerAllocator(rm, conf, + appAttemptId, mockJob); + + // Keep heartbeating until RM thinks the token has been updated + Token currentToken = oldToken; + long startTime = Time.monotonicNow(); + while (currentToken == oldToken) { + if (Time.monotonicNow() - startTime > 20000) { + fail("Took to long to see AMRM token change"); + } + Thread.sleep(100); + allocator.schedule(); + currentToken = rm.getRMContext().getRMApps().get(appId) + .getRMAppAttempt(appAttemptId).getAMRMToken(); } - Thread.sleep(100); - allocator.schedule(); - currentToken = rm.getRMContext().getRMApps().get(appId) - .getRMAppAttempt(appAttemptId).getAMRMToken(); - } - return currentToken; + return currentToken; + } }); // verify there is only one AMRM token in the UGI and it matches the @@ -2920,13 +2941,12 @@ public void testAMRMTokenUpdate() throws Exception { } } - Assertions.assertEquals(1, tokenCount, "too many AMRM tokens"); - Assertions.assertArrayEquals( - newToken.getIdentifier(), ugiToken.getIdentifier(), "token identifier not updated"); - Assertions.assertArrayEquals( - newToken.getPassword(), ugiToken.getPassword(), "token password not updated"); - Assertions.assertEquals( - new Text(rmAddr), ugiToken.getService(), "AMRM token service not updated"); + assertEquals(1, tokenCount, "too many AMRM tokens"); + assertArrayEquals(newToken.getIdentifier(), ugiToken.getIdentifier(), + "token identifier not updated"); + assertArrayEquals(newToken.getPassword(), ugiToken.getPassword(), + "token password not updated"); + assertEquals(new Text(rmAddr), ugiToken.getService(), "AMRM token service not updated"); } @Test @@ -2966,7 +2986,7 @@ protected ApplicationMasterProtocol createSchedulerProxy() { @Override protected void setRequestLimit(Priority priority, Resource capability, int limit) { - Assertions.fail("setRequestLimit() should not be invoked"); + fail("setRequestLimit() should not be invoked"); } }; @@ -3046,24 +3066,24 @@ protected ApplicationMasterProtocol createSchedulerProxy() { allocator.sendRequests(Arrays.asList(reqReduceEvents)); allocator.schedule(); - // verify all the host-specific asks were sent plus one for the + // verify all of the host-specific asks were sent plus one for the // default rack and one for the ANY request - Assertions.assertEquals(reqMapEvents.length + 2, mockScheduler.lastAsk.size()); + assertEquals(reqMapEvents.length + 2, mockScheduler.lastAsk.size()); // verify AM is only asking for the map limit overall - Assertions.assertEquals(MAP_LIMIT, mockScheduler.lastAnyAskMap); + assertEquals(MAP_LIMIT, mockScheduler.lastAnyAskMap); // assign a map task and verify we do not ask for any more maps ContainerId cid0 = mockScheduler.assignContainer("h0", false); allocator.schedule(); allocator.schedule(); - Assertions.assertEquals(2, mockScheduler.lastAnyAskMap); + assertEquals(2, mockScheduler.lastAnyAskMap); // complete the map task and verify that we ask for one more mockScheduler.completeContainer(cid0); allocator.schedule(); allocator.schedule(); - Assertions.assertEquals(3, mockScheduler.lastAnyAskMap); + assertEquals(3, mockScheduler.lastAnyAskMap); // assign three more maps and verify we ask for no more maps ContainerId cid1 = mockScheduler.assignContainer("h1", false); @@ -3071,7 +3091,7 @@ protected ApplicationMasterProtocol createSchedulerProxy() { ContainerId cid3 = mockScheduler.assignContainer("h3", false); allocator.schedule(); allocator.schedule(); - Assertions.assertEquals(0, mockScheduler.lastAnyAskMap); + assertEquals(0, mockScheduler.lastAnyAskMap); // complete two containers and verify we only asked for one more // since at that point all maps should be scheduled/completed @@ -3079,7 +3099,7 @@ protected ApplicationMasterProtocol createSchedulerProxy() { mockScheduler.completeContainer(cid3); allocator.schedule(); allocator.schedule(); - Assertions.assertEquals(1, mockScheduler.lastAnyAskMap); + assertEquals(1, mockScheduler.lastAnyAskMap); // allocate the last container and complete the first one // and verify there are no more map asks. @@ -3087,40 +3107,40 @@ protected ApplicationMasterProtocol createSchedulerProxy() { ContainerId cid4 = mockScheduler.assignContainer("h4", false); allocator.schedule(); allocator.schedule(); - Assertions.assertEquals(0, mockScheduler.lastAnyAskMap); + assertEquals(0, mockScheduler.lastAnyAskMap); // complete the last map mockScheduler.completeContainer(cid4); allocator.schedule(); allocator.schedule(); - Assertions.assertEquals(0, mockScheduler.lastAnyAskMap); + assertEquals(0, mockScheduler.lastAnyAskMap); // verify only reduce limit being requested - Assertions.assertEquals(REDUCE_LIMIT, mockScheduler.lastAnyAskReduce); + assertEquals(REDUCE_LIMIT, mockScheduler.lastAnyAskReduce); // assign a reducer and verify ask goes to zero cid0 = mockScheduler.assignContainer("h0", true); allocator.schedule(); allocator.schedule(); - Assertions.assertEquals(0, mockScheduler.lastAnyAskReduce); + assertEquals(0, mockScheduler.lastAnyAskReduce); // complete the reducer and verify we ask for another mockScheduler.completeContainer(cid0); allocator.schedule(); allocator.schedule(); - Assertions.assertEquals(1, mockScheduler.lastAnyAskReduce); + assertEquals(1, mockScheduler.lastAnyAskReduce); // assign a reducer and verify ask goes to zero cid0 = mockScheduler.assignContainer("h0", true); allocator.schedule(); allocator.schedule(); - Assertions.assertEquals(0, mockScheduler.lastAnyAskReduce); + assertEquals(0, mockScheduler.lastAnyAskReduce); // complete the reducer and verify no more reducers mockScheduler.completeContainer(cid0); allocator.schedule(); allocator.schedule(); - Assertions.assertEquals(0, mockScheduler.lastAnyAskReduce); + assertEquals(0, mockScheduler.lastAnyAskReduce); allocator.close(); } @@ -3128,37 +3148,37 @@ protected ApplicationMasterProtocol createSchedulerProxy() { public void testAttemptNotFoundCausesRMCommunicatorException() throws Exception { - Configuration conf = new Configuration(); - MyResourceManager rm = new MyResourceManager(conf); - rm.start(); + assertThrows(RMContainerAllocationException.class, () -> { + Configuration conf = new Configuration(); + MyResourceManager rm = new MyResourceManager(conf); + rm.start(); - // Submit the application - RMApp app = MockRMAppSubmitter.submitWithMemory(1024, rm); - rm.drainEvents(); - - MockNM amNodeManager = rm.registerNode("amNM:1234", 2048); - amNodeManager.nodeHeartbeat(true); - rm.drainEvents(); - - ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt() - .getAppAttemptId(); - rm.sendAMLaunched(appAttemptId); - rm.drainEvents(); + // Submit the application + RMApp app = MockRMAppSubmitter.submitWithMemory(1024, rm); + rm.drainEvents(); - JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0); - Job mockJob = mock(Job.class); - when(mockJob.getReport()).thenReturn( - MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0, - 0, 0, 0, 0, 0, 0, "jobfile", null, false, "")); - MyContainerAllocator allocator = new MyContainerAllocator(rm, conf, - appAttemptId, mockJob); + MockNM amNodeManager = rm.registerNode("amNM:1234", 2048); + amNodeManager.nodeHeartbeat(true); + rm.drainEvents(); - // Now kill the application - rm.killApp(app.getApplicationId()); - rm.waitForState(app.getApplicationId(), RMAppState.KILLED); - allocator.schedule(); + ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt() + .getAppAttemptId(); + rm.sendAMLaunched(appAttemptId); + rm.drainEvents(); - // ### RMContainerAllocationException.class + JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0); + Job mockJob = mock(Job.class); + when(mockJob.getReport()).thenReturn( + MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0, + 0, 0, 0, 0, 0, 0, "jobfile", null, false, "")); + MyContainerAllocator allocator = new MyContainerAllocator(rm, conf, + appAttemptId, mockJob); + + // Now kill the application + rm.killApp(app.getApplicationId()); + rm.waitForState(app.getApplicationId(), RMAppState.KILLED); + allocator.schedule(); + }); } @Test @@ -3196,7 +3216,7 @@ public void testUpdateAskOnRampDownAllReduces() throws Exception { MockNM nodeManager = rm.registerNode("h1:1234", 1024); rm.drainEvents(); - // Request 2 maps and 1 reducer(some on nodes which are not registered). + // Request 2 maps and 1 reducer(sone on nodes which are not registered). ContainerRequestEvent event1 = ContainerRequestCreator.createRequest(jobId, 1, Resource.newInstance(1024, 1), @@ -3239,29 +3259,29 @@ public void testUpdateAskOnRampDownAllReduces() throws Exception { rm.drainEvents(); // One map is assigned. - Assertions.assertEquals(1, allocator.getAssignedRequests().maps.size()); + assertEquals(1, allocator.getAssignedRequests().maps.size()); // Send deallocate request for map so that no maps are assigned after this. ContainerAllocatorEvent deallocate = createDeallocateEvent(jobId, 1, false); allocator.sendDeallocate(deallocate); // Now one reducer should be scheduled and one should be pending. - Assertions.assertEquals(1, allocator.getScheduledRequests().reduces.size()); - Assertions.assertEquals(1, allocator.getNumOfPendingReduces()); + assertEquals(1, allocator.getScheduledRequests().reduces.size()); + assertEquals(1, allocator.getNumOfPendingReduces()); // No map should be assigned and one should be scheduled. - Assertions.assertEquals(1, allocator.getScheduledRequests().maps.size()); - Assertions.assertEquals(0, allocator.getAssignedRequests().maps.size()); + assertEquals(1, allocator.getScheduledRequests().maps.size()); + assertEquals(0, allocator.getAssignedRequests().maps.size()); - Assertions.assertEquals(6, allocator.getAsk().size()); + assertEquals(6, allocator.getAsk().size()); for (ResourceRequest req : allocator.getAsk()) { boolean isReduce = req.getPriority().equals(RMContainerAllocator.PRIORITY_REDUCE); if (isReduce) { // 1 reducer each asked on h2, * and default-rack - Assertions.assertTrue((req.getResourceName().equals("*") || + assertTrue((req.getResourceName().equals("*") || req.getResourceName().equals("/default-rack") || req.getResourceName().equals("h2")) && req.getNumContainers() == 1); } else { //map // 0 mappers asked on h1 and 1 each on * and default-rack - Assertions.assertTrue(((req.getResourceName().equals("*") || + assertTrue(((req.getResourceName().equals("*") || req.getResourceName().equals("/default-rack")) && req.getNumContainers() == 1) || (req.getResourceName().equals("h1") && req.getNumContainers() == 0)); @@ -3274,17 +3294,17 @@ public void testUpdateAskOnRampDownAllReduces() throws Exception { // After allocate response from scheduler, all scheduled reduces are ramped // down and move to pending. 3 asks are also updated with 0 containers to // indicate ramping down of reduces to scheduler. - Assertions.assertEquals(0, allocator.getScheduledRequests().reduces.size()); - Assertions.assertEquals(2, allocator.getNumOfPendingReduces()); - Assertions.assertEquals(3, allocator.getAsk().size()); + assertEquals(0, allocator.getScheduledRequests().reduces.size()); + assertEquals(2, allocator.getNumOfPendingReduces()); + assertEquals(3, allocator.getAsk().size()); for (ResourceRequest req : allocator.getAsk()) { - Assertions.assertEquals( + assertEquals( RMContainerAllocator.PRIORITY_REDUCE, req.getPriority()); - Assertions.assertTrue(req.getResourceName().equals("*") || + assertTrue(req.getResourceName().equals("*") || req.getResourceName().equals("/default-rack") || req.getResourceName().equals("h2")); - Assertions.assertEquals(Resource.newInstance(1024, 1), req.getCapability()); - Assertions.assertEquals(0, req.getNumContainers()); + assertEquals(Resource.newInstance(1024, 1), req.getCapability()); + assertEquals(0, req.getNumContainers()); } } @@ -3367,7 +3387,7 @@ public void testAvoidAskMoreReducersWhenReducerPreemptionIsRequired() MockNM nodeManager = rm.registerNode("h1:1234", 1024); rm.drainEvents(); - // Request 2 maps and 1 reducer(some on nodes which are not registered). + // Request 2 maps and 1 reducer(sone on nodes which are not registered). ContainerRequestEvent event1 = ContainerRequestCreator.createRequest(jobId, 1, Resource.newInstance(1024, 1), @@ -3409,29 +3429,29 @@ public void testAvoidAskMoreReducersWhenReducerPreemptionIsRequired() rm.drainEvents(); // One map is assigned. - Assertions.assertEquals(1, allocator.getAssignedRequests().maps.size()); + assertEquals(1, allocator.getAssignedRequests().maps.size()); // Send deallocate request for map so that no maps are assigned after this. ContainerAllocatorEvent deallocate = createDeallocateEvent(jobId, 1, false); allocator.sendDeallocate(deallocate); // Now one reducer should be scheduled and one should be pending. - Assertions.assertEquals(1, allocator.getScheduledRequests().reduces.size()); - Assertions.assertEquals(1, allocator.getNumOfPendingReduces()); + assertEquals(1, allocator.getScheduledRequests().reduces.size()); + assertEquals(1, allocator.getNumOfPendingReduces()); // No map should be assigned and one should be scheduled. - Assertions.assertEquals(1, allocator.getScheduledRequests().maps.size()); - Assertions.assertEquals(0, allocator.getAssignedRequests().maps.size()); + assertEquals(1, allocator.getScheduledRequests().maps.size()); + assertEquals(0, allocator.getAssignedRequests().maps.size()); - Assertions.assertEquals(6, allocator.getAsk().size()); + assertEquals(6, allocator.getAsk().size()); for (ResourceRequest req : allocator.getAsk()) { boolean isReduce = req.getPriority().equals(RMContainerAllocator.PRIORITY_REDUCE); if (isReduce) { // 1 reducer each asked on h2, * and default-rack - Assertions.assertTrue((req.getResourceName().equals("*") || + assertTrue((req.getResourceName().equals("*") || req.getResourceName().equals("/default-rack") || req.getResourceName().equals("h2")) && req.getNumContainers() == 1); } else { //map // 0 mappers asked on h1 and 1 each on * and default-rack - Assertions.assertTrue(((req.getResourceName().equals("*") || + assertTrue(((req.getResourceName().equals("*") || req.getResourceName().equals("/default-rack")) && req.getNumContainers() == 1) || (req.getResourceName().equals("h1") && req.getNumContainers() == 0)); @@ -3447,17 +3467,17 @@ public void testAvoidAskMoreReducersWhenReducerPreemptionIsRequired() // After allocate response from scheduler, all scheduled reduces are ramped // down and move to pending. 3 asks are also updated with 0 containers to // indicate ramping down of reduces to scheduler. - Assertions.assertEquals(0, allocator.getScheduledRequests().reduces.size()); - Assertions.assertEquals(2, allocator.getNumOfPendingReduces()); - Assertions.assertEquals(3, allocator.getAsk().size()); + assertEquals(0, allocator.getScheduledRequests().reduces.size()); + assertEquals(2, allocator.getNumOfPendingReduces()); + assertEquals(3, allocator.getAsk().size()); for (ResourceRequest req : allocator.getAsk()) { - Assertions.assertEquals( + assertEquals( RMContainerAllocator.PRIORITY_REDUCE, req.getPriority()); - Assertions.assertTrue(req.getResourceName().equals("*") || + assertTrue(req.getResourceName().equals("*") || req.getResourceName().equals("/default-rack") || req.getResourceName().equals("h2")); - Assertions.assertEquals(Resource.newInstance(1024, 1), req.getCapability()); - Assertions.assertEquals(0, req.getNumContainers()); + assertEquals(Resource.newInstance(1024, 1), req.getCapability()); + assertEquals(0, req.getNumContainers()); } } @@ -3493,8 +3513,8 @@ public void testExcludeSchedReducesFromHeadroom() throws Exception { 0, 0, 0, 0, 0, 0, "jobfile", null, false, "")); Task mockTask = mock(Task.class); TaskAttempt mockTaskAttempt = mock(TaskAttempt.class); - when(mockJob.getTask(any())).thenReturn(mockTask); - when(mockTask.getAttempt(any())).thenReturn(mockTaskAttempt); + when(mockJob.getTask((TaskId)any())).thenReturn(mockTask); + when(mockTask.getAttempt((TaskAttemptId)any())).thenReturn(mockTaskAttempt); when(mockTaskAttempt.getProgress()).thenReturn(0.01f); MyContainerAllocator allocator = new MyContainerAllocator(rm, conf, appAttemptId, mockJob); @@ -3505,7 +3525,7 @@ public void testExcludeSchedReducesFromHeadroom() throws Exception { MockNM nodeManager2 = rm.registerNode("h2:1234", 1024); rm.drainEvents(); - // Request 2 maps and 1 reducer(some on nodes which are not registered). + // Request 2 maps and 1 reducer(sone on nodes which are not registered). ContainerRequestEvent event1 = ContainerRequestCreator.createRequest(jobId, 1, Resource.newInstance(1024, 1), @@ -3545,14 +3565,14 @@ public void testExcludeSchedReducesFromHeadroom() throws Exception { rm.drainEvents(); // Two maps are assigned. - Assertions.assertEquals(2, allocator.getAssignedRequests().maps.size()); + assertEquals(2, allocator.getAssignedRequests().maps.size()); // Send deallocate request for map so that no maps are assigned after this. ContainerAllocatorEvent deallocate1 = createDeallocateEvent(jobId, 1, false); allocator.sendDeallocate(deallocate1); ContainerAllocatorEvent deallocate2 = createDeallocateEvent(jobId, 2, false); allocator.sendDeallocate(deallocate2); // No map should be assigned. - Assertions.assertEquals(0, allocator.getAssignedRequests().maps.size()); + assertEquals(0, allocator.getAssignedRequests().maps.size()); nodeManager.nodeHeartbeat(true); rm.drainEvents(); @@ -3576,18 +3596,18 @@ public void testExcludeSchedReducesFromHeadroom() throws Exception { allocator.schedule(); rm.drainEvents(); // One reducer is assigned and one map is scheduled - Assertions.assertEquals(1, allocator.getScheduledRequests().maps.size()); - Assertions.assertEquals(1, allocator.getAssignedRequests().reduces.size()); + assertEquals(1, allocator.getScheduledRequests().maps.size()); + assertEquals(1, allocator.getAssignedRequests().reduces.size()); // Headroom enough to run a mapper if headroom is taken as it is but wont be // enough if scheduled reducers resources are deducted. rm.getMyFifoScheduler().forceResourceLimit(Resource.newInstance(1260, 2)); allocator.schedule(); rm.drainEvents(); // After allocate response, the one assigned reducer is preempted and killed - Assertions.assertEquals(1, MyContainerAllocator.getTaskAttemptKillEvents().size()); - Assertions.assertEquals(RMContainerAllocator.RAMPDOWN_DIAGNOSTIC, + assertEquals(1, MyContainerAllocator.getTaskAttemptKillEvents().size()); + assertEquals(RMContainerAllocator.RAMPDOWN_DIAGNOSTIC, MyContainerAllocator.getTaskAttemptKillEvents().get(0).getMessage()); - Assertions.assertEquals(1, allocator.getNumOfPendingReduces()); + assertEquals(1, allocator.getNumOfPendingReduces()); } private static class MockScheduler implements ApplicationMasterProtocol { @@ -3596,8 +3616,9 @@ private static class MockScheduler implements ApplicationMasterProtocol { List lastAsk = null; int lastAnyAskMap = 0; int lastAnyAskReduce = 0; - List containersToComplete = new ArrayList<>(); - List containersToAllocate = new ArrayList<>(); + List containersToComplete = + new ArrayList(); + List containersToAllocate = new ArrayList(); public MockScheduler(ApplicationAttemptId attemptId) { this.attemptId = attemptId; @@ -3699,17 +3720,17 @@ public FinishApplicationMasterResponse finishApplicationMaster( public AllocateResponse allocate(AllocateRequest request) throws YarnException, IOException { AllocateResponse response = AllocateResponse.newInstance( - request.getResponseId(), Collections.emptyList(), - Collections.emptyList(), - Collections.emptyList(), + request.getResponseId(), Collections.emptyList(), + Collections.emptyList(), + Collections.emptyList(), Resource.newInstance(512000, 1024), null, 10, null, - Collections.emptyList()); + Collections.emptyList()); response.setCollectorInfo(collectorInfo); return response; } } - public static void main(String[] args) throws Exception { + /*public static void main(String[] args) throws Exception { TestRMContainerAllocator t = new TestRMContainerAllocator(); t.testSimple(); t.testResource(); @@ -3719,6 +3740,6 @@ public static void main(String[] args) throws Exception { t.testBlackListedNodes(); t.testCompletedTasksRecalculateSchedule(); t.testAMRMTokenUpdate(); - } + }*/ } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestResourceCalculatorUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestResourceCalculatorUtils.java index 59b102daba181..0a6dc7c001bef 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestResourceCalculatorUtils.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestResourceCalculatorUtils.java @@ -19,12 +19,12 @@ package org.apache.hadoop.mapreduce.v2.app.rm; import org.apache.hadoop.yarn.api.records.Resource; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import java.util.EnumSet; import static org.apache.hadoop.yarn.proto.YarnServiceProtos.*; +import static org.junit.jupiter.api.Assertions.assertEquals; public class TestResourceCalculatorUtils { @Test @@ -59,17 +59,16 @@ private void verifyDifferentResourceTypes(Resource clusterAvailableResources, Resource nonZeroResource, int expectedNumberOfContainersForMemoryOnly, int expectedNumberOfContainersOverall) { - Assertions.assertEquals( - expectedNumberOfContainersForMemoryOnly -, ResourceCalculatorUtils.computeAvailableContainers( - clusterAvailableResources, nonZeroResource, - EnumSet.of(SchedulerResourceTypes.MEMORY)), "Incorrect number of available containers for Memory"); + assertEquals(expectedNumberOfContainersForMemoryOnly, + ResourceCalculatorUtils.computeAvailableContainers( + clusterAvailableResources, nonZeroResource, + EnumSet.of(SchedulerResourceTypes.MEMORY)), + "Incorrect number of available containers for Memory"); - Assertions.assertEquals( - expectedNumberOfContainersOverall -, ResourceCalculatorUtils.computeAvailableContainers( - clusterAvailableResources, nonZeroResource, - EnumSet.of(SchedulerResourceTypes.CPU, - SchedulerResourceTypes.MEMORY)), "Incorrect number of available containers overall"); + assertEquals(expectedNumberOfContainersOverall, + ResourceCalculatorUtils.computeAvailableContainers( + clusterAvailableResources, nonZeroResource, + EnumSet.of(SchedulerResourceTypes.CPU, SchedulerResourceTypes.MEMORY)), + "Incorrect number of available containers overall"); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/speculate/TestDataStatistics.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/speculate/TestDataStatistics.java index ea635d94151fc..16cf064ad7e82 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/speculate/TestDataStatistics.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/speculate/TestDataStatistics.java @@ -18,9 +18,10 @@ package org.apache.hadoop.mapreduce.v2.app.speculate; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import static org.junit.jupiter.api.Assertions.assertEquals; + public class TestDataStatistics { private static final double TOL = 0.001; @@ -28,21 +29,21 @@ public class TestDataStatistics { @Test public void testEmptyDataStatistics() throws Exception { DataStatistics statistics = new DataStatistics(); - Assertions.assertEquals(0, statistics.count(), TOL); - Assertions.assertEquals(0, statistics.mean(), TOL); - Assertions.assertEquals(0, statistics.var(), TOL); - Assertions.assertEquals(0, statistics.std(), TOL); - Assertions.assertEquals(0, statistics.outlier(1.0f), TOL); + assertEquals(0, statistics.count(), TOL); + assertEquals(0, statistics.mean(), TOL); + assertEquals(0, statistics.var(), TOL); + assertEquals(0, statistics.std(), TOL); + assertEquals(0, statistics.outlier(1.0f), TOL); } @Test public void testSingleEntryDataStatistics() throws Exception { DataStatistics statistics = new DataStatistics(17.29); - Assertions.assertEquals(1, statistics.count(), TOL); - Assertions.assertEquals(17.29, statistics.mean(), TOL); - Assertions.assertEquals(0, statistics.var(), TOL); - Assertions.assertEquals(0, statistics.std(), TOL); - Assertions.assertEquals(17.29, statistics.outlier(1.0f), TOL); + assertEquals(1, statistics.count(), TOL); + assertEquals(17.29, statistics.mean(), TOL); + assertEquals(0, statistics.var(), TOL); + assertEquals(0, statistics.std(), TOL); + assertEquals(17.29, statistics.outlier(1.0f), TOL); } @Test @@ -50,24 +51,24 @@ public void testMultiEntryDataStatistics() throws Exception { DataStatistics statistics = new DataStatistics(); statistics.add(17); statistics.add(29); - Assertions.assertEquals(2, statistics.count(), TOL); - Assertions.assertEquals(23.0, statistics.mean(), TOL); - Assertions.assertEquals(36.0, statistics.var(), TOL); - Assertions.assertEquals(6.0, statistics.std(), TOL); - Assertions.assertEquals(29.0, statistics.outlier(1.0f), TOL); + assertEquals(2, statistics.count(), TOL); + assertEquals(23.0, statistics.mean(), TOL); + assertEquals(36.0, statistics.var(), TOL); + assertEquals(6.0, statistics.std(), TOL); + assertEquals(29.0, statistics.outlier(1.0f), TOL); } @Test public void testUpdateStatistics() throws Exception { DataStatistics statistics = new DataStatistics(17); statistics.add(29); - Assertions.assertEquals(2, statistics.count(), TOL); - Assertions.assertEquals(23.0, statistics.mean(), TOL); - Assertions.assertEquals(36.0, statistics.var(), TOL); + assertEquals(2, statistics.count(), TOL); + assertEquals(23.0, statistics.mean(), TOL); + assertEquals(36.0, statistics.var(), TOL); statistics.updateStatistics(17, 29); - Assertions.assertEquals(2, statistics.count(), TOL); - Assertions.assertEquals(29.0, statistics.mean(), TOL); - Assertions.assertEquals(0.0, statistics.var(), TOL); + assertEquals(2, statistics.count(), TOL); + assertEquals(29.0, statistics.mean(), TOL); + assertEquals(0.0, statistics.var(), TOL); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/TestSimpleExponentialForecast.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/TestSimpleExponentialForecast.java index 15683b1b14801..b887aacecabbe 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/TestSimpleExponentialForecast.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/TestSimpleExponentialForecast.java @@ -23,9 +23,10 @@ import org.apache.hadoop.yarn.util.ControlledClock; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import static org.junit.jupiter.api.Assertions.assertEquals; + /** * Testing the statistical model of simple exponential estimator. */ @@ -48,7 +49,7 @@ private int incTestSimpleExponentialForecast() { while(progress <= 1.0) { clock.tickMsec(clockTicks); forecaster.incorporateReading(clock.getTime(), progress); - LOG.info("progress: " + progress + " --> " + forecaster); + LOG.info("progress: " + progress + " --> " + forecaster.toString()); progress += 0.005; } @@ -69,7 +70,7 @@ private int decTestSimpleExponentialForecast() { while(progress <= 1.0) { clock.tickMsec(clockTicks); forecaster.incorporateReading(clock.getTime(), progress); - LOG.info("progress: " + progress + " --> " + forecaster); + LOG.info("progress: " + progress + " --> " + forecaster.toString()); progress += progressRates[(int)(progress / 0.25)]; } @@ -90,7 +91,7 @@ private int zeroTestSimpleExponentialForecast() { while(progress <= 1.0) { clock.tickMsec(clockTicks); forecaster.incorporateReading(clock.getTime(), progress); - LOG.info("progress: " + progress + " --> " + forecaster); + LOG.info("progress: " + progress + " --> " + forecaster.toString()); int currInd = progressInd++ > 1000 ? 4 : (int)(progress / 0.25); progress += progressRates[currInd]; } @@ -101,21 +102,18 @@ private int zeroTestSimpleExponentialForecast() { @Test public void testSimpleExponentialForecastLinearInc() throws Exception { int res = incTestSimpleExponentialForecast(); - Assertions.assertEquals( - res, 0, "We got the wrong estimate from simple exponential."); + assertEquals(res, 0, "We got the wrong estimate from simple exponential."); } @Test public void testSimpleExponentialForecastLinearDec() throws Exception { int res = decTestSimpleExponentialForecast(); - Assertions.assertEquals( - res, 0, "We got the wrong estimate from simple exponential."); + assertEquals(res, 0, "We got the wrong estimate from simple exponential."); } @Test public void testSimpleExponentialForecastZeros() throws Exception { int res = zeroTestSimpleExponentialForecast(); - Assertions.assertEquals( - res, 0, "We got the wrong estimate from simple exponential."); + assertEquals(res, 0, "We got the wrong estimate from simple exponential."); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/EnvironmentVariablesExtension.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/EnvironmentVariablesExtension.java new file mode 100644 index 0000000000000..f730d0c62fd52 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/EnvironmentVariablesExtension.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.v2.app.webapp; + +import org.junit.contrib.java.lang.system.EnvironmentVariables; +import org.junit.jupiter.api.extension.BeforeEachCallback; +import org.junit.jupiter.api.extension.ExtensionContext; + +public class EnvironmentVariablesExtension implements BeforeEachCallback { + + private EnvironmentVariables environmentVariables; + + @Override + public void beforeEach(ExtensionContext extensionContext) throws Exception { + environmentVariables = new EnvironmentVariables(); + } + + public EnvironmentVariables getEnvironmentVariables() { + return environmentVariables; + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebApp.java index 6e18ebba6f432..b8e0a83f73221 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebApp.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebApp.java @@ -19,7 +19,6 @@ package org.apache.hadoop.mapreduce.v2.app.webapp; import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.APP_ID; -import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.ByteArrayOutputStream; import java.io.File; @@ -45,7 +44,6 @@ import org.glassfish.jersey.internal.inject.AbstractBinder; import org.glassfish.jersey.jettison.JettisonFeature; import org.glassfish.jersey.server.ResourceConfig; -import org.junit.jupiter.api.Assertions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.HttpConfig.Policy; @@ -71,11 +69,14 @@ import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import org.apache.http.HttpStatus; import org.junit.jupiter.api.AfterEach; -import org.junit.Rule; import org.junit.jupiter.api.Test; import com.google.inject.Injector; -import org.junit.contrib.java.lang.system.EnvironmentVariables; +import org.junit.jupiter.api.extension.RegisterExtension; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; public class TestAMWebApp { @@ -84,6 +85,9 @@ public class TestAMWebApp { System.getProperty("java.io.tmpdir")), TestAMWebApp.class.getName()); + @RegisterExtension + public final EnvironmentVariablesExtension extension = new EnvironmentVariablesExtension(); + @AfterEach public void tearDown() { TEST_DIR.delete(); @@ -217,7 +221,7 @@ protected ClientService createClientService(AppContext context) { InputStream in = conn.getInputStream(); ByteArrayOutputStream out = new ByteArrayOutputStream(); IOUtils.copyBytes(in, out, 1024); - Assertions.assertTrue(out.toString().contains("MapReduce Application")); + assertTrue(out.toString().contains("MapReduce Application")); // https:// is not accessible. URL httpsUrl = new URL("https://" + hostPort + "/mapreduce/"); @@ -225,7 +229,7 @@ protected ClientService createClientService(AppContext context) { HttpURLConnection httpsConn = (HttpURLConnection) httpsUrl.openConnection(); httpsConn.getInputStream(); - Assertions.fail("https:// is not accessible, expected to fail"); + fail("https:// is not accessible, expected to fail"); } catch (SSLException e) { // expected } @@ -234,10 +238,6 @@ protected ClientService createClientService(AppContext context) { app.verifyCompleted(); } - @Rule - public final EnvironmentVariables environmentVariables - = new EnvironmentVariables(); - @Test public void testMRWebAppSSLEnabled() throws Exception { MRApp app = new MRApp(2, 2, true, this.getClass().getName(), true) { @@ -256,9 +256,9 @@ protected ClientService createClientService(AppContext context) { keystoreFile.getParentFile().mkdirs(); KeyStoreTestUtil.createKeyStore(keystoreFile.getAbsolutePath(), "password", "server", keyPair.getPrivate(), cert); - environmentVariables.set("KEYSTORE_FILE_LOCATION", + extension.getEnvironmentVariables().set("KEYSTORE_FILE_LOCATION", keystoreFile.getAbsolutePath()); - environmentVariables.set("KEYSTORE_PASSWORD", "password"); + extension.getEnvironmentVariables().set("KEYSTORE_PASSWORD", "password"); Job job = app.submit(conf); @@ -274,7 +274,7 @@ protected ClientService createClientService(AppContext context) { InputStream in = httpsConn.getInputStream(); ByteArrayOutputStream out = new ByteArrayOutputStream(); IOUtils.copyBytes(in, out, 1024); - Assertions.assertTrue(out.toString().contains("MapReduce Application")); + assertTrue(out.toString().contains("MapReduce Application")); // http:// is not accessible. URL httpUrl = new URL("http://" + hostPort + "/mapreduce/"); @@ -282,7 +282,7 @@ protected ClientService createClientService(AppContext context) { HttpURLConnection httpConn = (HttpURLConnection) httpUrl.openConnection(); httpConn.getResponseCode(); - Assertions.fail("http:// is not accessible, expected to fail"); + fail("http:// is not accessible, expected to fail"); } catch (SocketException e) { // expected } @@ -312,9 +312,9 @@ protected ClientService createClientService(AppContext context) { keystoreFile.getParentFile().mkdirs(); KeyStoreTestUtil.createKeyStore(keystoreFile.getAbsolutePath(), "password", "server", keyPair.getPrivate(), cert); - environmentVariables.set("KEYSTORE_FILE_LOCATION", + extension.getEnvironmentVariables().set("KEYSTORE_FILE_LOCATION", keystoreFile.getAbsolutePath()); - environmentVariables.set("KEYSTORE_PASSWORD", "password"); + extension.getEnvironmentVariables().set("KEYSTORE_PASSWORD", "password"); KeyPair clientKeyPair = KeyStoreTestUtil.generateKeyPair("RSA"); X509Certificate clientCert = KeyStoreTestUtil.generateCertificate( @@ -323,9 +323,9 @@ protected ClientService createClientService(AppContext context) { truststoreFile.getParentFile().mkdirs(); KeyStoreTestUtil.createTrustStore(truststoreFile.getAbsolutePath(), "password", "client", clientCert); - environmentVariables.set("TRUSTSTORE_FILE_LOCATION", + extension.getEnvironmentVariables().set("TRUSTSTORE_FILE_LOCATION", truststoreFile.getAbsolutePath()); - environmentVariables.set("TRUSTSTORE_PASSWORD", "password"); + extension.getEnvironmentVariables().set("TRUSTSTORE_PASSWORD", "password"); Job job = app.submit(conf); @@ -341,7 +341,7 @@ protected ClientService createClientService(AppContext context) { InputStream in = httpsConn.getInputStream(); ByteArrayOutputStream out = new ByteArrayOutputStream(); IOUtils.copyBytes(in, out, 1024); - Assertions.assertTrue(out.toString().contains("MapReduce Application")); + assertTrue(out.toString().contains("MapReduce Application")); // Try with wrong client cert KeyPair otherClientKeyPair = KeyStoreTestUtil.generateKeyPair("RSA"); @@ -353,7 +353,7 @@ protected ClientService createClientService(AppContext context) { HttpURLConnection httpConn = (HttpURLConnection) httpsUrl.openConnection(); httpConn.getResponseCode(); - Assertions.fail("Wrong client certificate, expected to fail"); + fail("Wrong client certificate, expected to fail"); } catch (SSLException e) { // expected } @@ -408,20 +408,20 @@ protected ClientService createClientService(AppContext context) { String expectedURL = scheme + conf.get(YarnConfiguration.PROXY_ADDRESS) + ProxyUriUtils.getPath(app.getAppID(), "/mapreduce", true); - Assertions.assertEquals(expectedURL, + assertEquals(expectedURL, conn.getHeaderField(HttpHeaders.LOCATION)); - Assertions.assertEquals(HttpStatus.SC_MOVED_TEMPORARILY, + assertEquals(HttpStatus.SC_MOVED_TEMPORARILY, conn.getResponseCode()); app.waitForState(job, JobState.SUCCEEDED); app.verifyCompleted(); } } - public static void main(String[] args) { + /*public static void main(String[] args) { AppContext context = new MockAppContext(0, 8, 88, 4); WebApps.$for("yarn", AppContext.class, context).withResourceConfig(configure(context)). at(58888).inDevMode().start(new AMWebApp(context)).joinThread(); - } + }*/ protected static ResourceConfig configure(AppContext context) { ResourceConfig config = new ResourceConfig();