Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add function to determine comment id for a job and use it #249

Merged
merged 3 commits into from
Feb 13, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 29 additions & 4 deletions tasks/deploy.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
from tasks.build import CFG_DIRNAME, JOB_CFG_FILENAME, JOB_REPO_ID, JOB_REPOSITORY
from tasks.build import get_build_env_cfg
from tools import config, pr_comments, run_cmd
from tools.job_metadata import read_job_metadata_from_file


BUCKET_NAME = "bucket_name"
Expand Down Expand Up @@ -74,6 +75,26 @@ def determine_job_dirs(pr_number):
return job_directories


def determine_pr_comment_id(job_dir):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It would probably be good to implement a test for this function, but I see that we don't have any tests for the functions in tasks/deploy.py, so this can be done later for all of them.

"""
Determine pr_comment_id by reading _bot_job{JOBID}.metadata in job_dir.

Args:
job_dir (string): working directory of the job

Returns:
(int): id of comment corresponding to job in pull request or -1
"""
# assumes that last part of job_dir encodes the job's id
job_id = os.path.basename(os.path.normpath(job_dir))
job_metadata_file = os.path.join(job_dir, f"_bot_job{job_id}.metadata")
job_metadata = read_job_metadata_from_file(job_metadata_file)
if job_metadata and "pr_comment_id" in job_metadata:
return int(job_metadata["pr_comment_id"])
else:
return -1


def determine_slurm_out(job_dir):
"""
Determine path to job stdout/err output file for a given job directory.
Expand Down Expand Up @@ -371,10 +392,13 @@ def determine_successful_jobs(job_dirs):
for job_dir in job_dirs:
slurm_out = determine_slurm_out(job_dir)
eessi_tarballs = determine_eessi_tarballs(job_dir)
pr_comment_id = determine_pr_comment_id(job_dir)

if check_build_status(slurm_out, eessi_tarballs):
log(f"{funcname}(): SUCCESSFUL build in '{job_dir}'")
successes.append({'job_dir': job_dir,
'slurm_out': slurm_out,
'pr_comment_id': pr_comment_id,
'eessi_tarballs': eessi_tarballs})
else:
log(f"{funcname}(): FAILED build in '{job_dir}'")
Expand Down Expand Up @@ -403,9 +427,9 @@ def determine_tarballs_to_deploy(successes, upload_policy):
log(f"{funcname}(): num successful jobs {len(successes)}")

to_be_deployed = {}
for s in successes:
for job in successes:
# all tarballs for successful job
tarballs = s["eessi_tarballs"]
tarballs = job["eessi_tarballs"]
log(f"{funcname}(): num tarballs {len(tarballs)}")

# full path to first tarball for successful job
Expand Down Expand Up @@ -438,7 +462,7 @@ def determine_tarballs_to_deploy(successes, upload_policy):
else:
deploy = True
elif upload_policy == "once":
uploaded = uploaded_before(build_target, s["job_dir"])
uploaded = uploaded_before(build_target, job["job_dir"])
if uploaded is None:
deploy = True
else:
Expand All @@ -447,7 +471,8 @@ def determine_tarballs_to_deploy(successes, upload_policy):
f"{indent_fname}has been uploaded through '{uploaded}'")

if deploy:
to_be_deployed[build_target] = {"job_dir": s["job_dir"],
to_be_deployed[build_target] = {"job_dir": job["job_dir"],
"pr_comment_id": job["pr_comment_id"],
"timestamp": timestamp}

return to_be_deployed
Expand Down
Loading