From 775c299b63ac64fef7c55956f822c79be0cbe7aa Mon Sep 17 00:00:00 2001 From: Rich Jones Date: Thu, 15 Nov 2018 14:25:59 -0500 Subject: [PATCH] mayyyyybe this will work --- common/data_refinery_common/message_queue.py | 8 +- .../surveyor/test_end_to_end.py | 117 +++++++++--------- 2 files changed, 67 insertions(+), 58 deletions(-) diff --git a/common/data_refinery_common/message_queue.py b/common/data_refinery_common/message_queue.py index 571972d2b..4b111060a 100644 --- a/common/data_refinery_common/message_queue.py +++ b/common/data_refinery_common/message_queue.py @@ -17,7 +17,11 @@ NOMAD_TRANSCRIPTOME_JOB = "TRANSCRIPTOME_INDEX" NOMAD_DOWNLOADER_JOB = "DOWNLOADER" NONE_JOB_ERROR_TEMPLATE = "send_job was called with NONE job_type: {} for {} job {}" -RUNNING_IN_CLOUD = bool(get_env_variable_gracefully("RUNNING_IN_CLOUD", "False")) +RUNNING_IN_CLOUD = get_env_variable_gracefully("RUNNING_IN_CLOUD", "False") +if RUNNING_IN_CLOUD == "False": + RUNNING_IN_CLOUD = False +else: + RUNNING_IN_CLOUD = True def send_job(job_type: Enum, job, is_dispatch=False) -> bool: @@ -99,7 +103,7 @@ def send_job(job_type: Enum, job, is_dispatch=False) -> bool: # We only want to dispatch processor jobs directly. # Everything else will be handled by the Foreman, which will increment the retry counter. - if is_processor or is_dispatch or not RUNNING_IN_CLOUD: + if is_processor or is_dispatch or (not RUNNING_IN_CLOUD): try: nomad_response = nomad_client.job.dispatch_job(nomad_job, meta={"JOB_NAME": job_type.value, "JOB_ID": str(job.id)}) diff --git a/foreman/data_refinery_foreman/surveyor/test_end_to_end.py b/foreman/data_refinery_foreman/surveyor/test_end_to_end.py index b2211f1d6..128deae70 100644 --- a/foreman/data_refinery_foreman/surveyor/test_end_to_end.py +++ b/foreman/data_refinery_foreman/surveyor/test_end_to_end.py @@ -8,6 +8,7 @@ from django.test import TransactionTestCase, tag from django.utils import timezone from unittest.mock import patch, Mock +from test.support import EnvironmentVarGuard # Python >=3 from data_refinery_common.models import ( Organism, @@ -65,59 +66,63 @@ class NoOpEndToEndTestCase(TransactionTestCase): def test_no_op(self): """Survey, download, then process an experiment we know is NO_OP.""" # Clear out pre-existing work dirs so there's no conflicts: - for work_dir in glob.glob(LOCAL_ROOT_DIR + "/processor_job_*"): - shutil.rmtree(work_dir) - - # Make sure there are no already existing jobs we might poll for unsuccessfully. - DownloaderJobOriginalFileAssociation.objects.all().delete() - DownloaderJob.objects.all().delete() - ProcessorJobOriginalFileAssociation.objects.all().delete() - ProcessorJob.objects.all().delete() - - # Prevent a call being made to NCBI's API to determine - # organism name/id. - organism = Organism(name="HOMO_SAPIENS", taxonomy_id=9606, is_scientific_name=True) - organism.save() - - accession_code = "E-GEOD-3303" - survey_job = surveyor.survey_experiment(accession_code, "ARRAY_EXPRESS") - - self.assertTrue(survey_job.success) - - downloader_jobs = DownloaderJob.objects.all() - self.assertGreater(downloader_jobs.count(), 0) - - logger.info("Survey Job finished, waiting for Downloader Jobs to complete.") - start_time = timezone.now() - for downloader_job in downloader_jobs: - downloader_job = wait_for_job(downloader_job, DownloaderJob, start_time) - self.assertTrue(downloader_job.success) - - processor_jobs = ProcessorJob.objects.all() - self.assertGreater(processor_jobs.count(), 0) - - logger.info("Downloader Jobs finished, waiting for processor Jobs to complete.") - start_time = timezone.now() - for processor_job in processor_jobs: - processor_job = wait_for_job(processor_job, ProcessorJob, start_time) - self.assertTrue(processor_job.success) - - # Test that the unsurveyor deletes all objects related to the experiment - purge_experiment(accession_code) - - self.assertEqual(Experiment.objects.all().count(), 0) - self.assertEqual(ExperimentAnnotation.objects.all().count(), 0) - self.assertEqual(ExperimentSampleAssociation.objects.all().count(), 0) - self.assertEqual(Sample.objects.all().count(), 0) - self.assertEqual(SampleAnnotation.objects.all().count(), 0) - self.assertEqual(OriginalFile.objects.all().count(), 0) - self.assertEqual(OriginalFileSampleAssociation.objects.all().count(), 0) - self.assertEqual(SampleResultAssociation.objects.all().count(), 0) - self.assertEqual(ComputationalResult.objects.all().count(), 0) - self.assertEqual(ComputationalResultAnnotation.objects.all().count(), 0) - self.assertEqual(SampleComputedFileAssociation.objects.all().count(), 0) - self.assertEqual(ComputedFile.objects.all().count(), 0) - self.assertEqual(DownloaderJob.objects.all().count(), 0) - self.assertEqual(DownloaderJobOriginalFileAssociation.objects.all().count(), 0) - self.assertEqual(ProcessorJob.objects.all().count(), 0) - self.assertEqual(ProcessorJobOriginalFileAssociation.objects.all().count(), 0) + + self.env = EnvironmentVarGuard() + self.env.set('RUNING_IN_CLOUD', 'False') + with self.env: + for work_dir in glob.glob(LOCAL_ROOT_DIR + "/processor_job_*"): + shutil.rmtree(work_dir) + + # Make sure there are no already existing jobs we might poll for unsuccessfully. + DownloaderJobOriginalFileAssociation.objects.all().delete() + DownloaderJob.objects.all().delete() + ProcessorJobOriginalFileAssociation.objects.all().delete() + ProcessorJob.objects.all().delete() + + # Prevent a call being made to NCBI's API to determine + # organism name/id. + organism = Organism(name="HOMO_SAPIENS", taxonomy_id=9606, is_scientific_name=True) + organism.save() + + accession_code = "E-GEOD-3303" + survey_job = surveyor.survey_experiment(accession_code, "ARRAY_EXPRESS") + + self.assertTrue(survey_job.success) + + downloader_jobs = DownloaderJob.objects.all() + self.assertGreater(downloader_jobs.count(), 0) + + logger.info("Survey Job finished, waiting for Downloader Jobs to complete.") + start_time = timezone.now() + for downloader_job in downloader_jobs: + downloader_job = wait_for_job(downloader_job, DownloaderJob, start_time) + self.assertTrue(downloader_job.success) + + processor_jobs = ProcessorJob.objects.all() + self.assertGreater(processor_jobs.count(), 0) + + logger.info("Downloader Jobs finished, waiting for processor Jobs to complete.") + start_time = timezone.now() + for processor_job in processor_jobs: + processor_job = wait_for_job(processor_job, ProcessorJob, start_time) + self.assertTrue(processor_job.success) + + # Test that the unsurveyor deletes all objects related to the experiment + purge_experiment(accession_code) + + self.assertEqual(Experiment.objects.all().count(), 0) + self.assertEqual(ExperimentAnnotation.objects.all().count(), 0) + self.assertEqual(ExperimentSampleAssociation.objects.all().count(), 0) + self.assertEqual(Sample.objects.all().count(), 0) + self.assertEqual(SampleAnnotation.objects.all().count(), 0) + self.assertEqual(OriginalFile.objects.all().count(), 0) + self.assertEqual(OriginalFileSampleAssociation.objects.all().count(), 0) + self.assertEqual(SampleResultAssociation.objects.all().count(), 0) + self.assertEqual(ComputationalResult.objects.all().count(), 0) + self.assertEqual(ComputationalResultAnnotation.objects.all().count(), 0) + self.assertEqual(SampleComputedFileAssociation.objects.all().count(), 0) + self.assertEqual(ComputedFile.objects.all().count(), 0) + self.assertEqual(DownloaderJob.objects.all().count(), 0) + self.assertEqual(DownloaderJobOriginalFileAssociation.objects.all().count(), 0) + self.assertEqual(ProcessorJob.objects.all().count(), 0) + self.assertEqual(ProcessorJobOriginalFileAssociation.objects.all().count(), 0)