Skip to content

Commit

Permalink
Bug 1823654 - Introduce pyupgrade (mozilla#7904)
Browse files Browse the repository at this point in the history
* Ruff Auto fix

* Ruff unsafe fixes auto fix

* Use builtin list instead of typing.List

---------

Co-authored-by: Sebastian Hengst <[email protected]>
  • Loading branch information
yschneider-sinneria and Archaeopteryx committed Apr 26, 2024
1 parent 7cd8ee5 commit e95d703
Show file tree
Hide file tree
Showing 103 changed files with 320 additions and 429 deletions.
2 changes: 1 addition & 1 deletion misc/compare_pushes.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def main(args):
# Support comma separated projects
projects = args.projects.split(",")
for _project in projects:
logger.info("Comparing {} against production.".format(_project))
logger.info(f"Comparing {_project} against production.")
# Remove properties that are irrelevant for the comparison
pushes = compare_to_client.get_pushes(_project, count=50)
for _push in sorted(pushes, key=lambda push: push["revision"]):
Expand Down
2 changes: 2 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@ select = [
"W",
# pyflakes
"F",
# pyupgrade
"UP",
]

ignore = [
Expand Down
4 changes: 2 additions & 2 deletions tests/autoclassify/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,10 @@ def create_failure_lines(job, failure_line_list, start_line=0):
job_log = JobLog.objects.create(
job=job,
name="{}{}".format(base_data.get("test"), job.id),
url="bar{}".format(i),
url=f"bar{i}",
status=1,
)
print("create jobLog for job id: {}".format(job.id))
print(f"create jobLog for job id: {job.id}")
failure_line.job_log = job_log
failure_line.save()
failure_lines.append(failure_line)
Expand Down
10 changes: 5 additions & 5 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -427,7 +427,7 @@ def eleven_job_blobs(sample_data, sample_push, test_repository, mock_log_parser)
del blob["sources"]

blob["revision"] = sample_push[push_index]["revision"]
blob["taskcluster_task_id"] = "V3SVuxO8TFy37En_6HcXL{}".format(task_id_index)
blob["taskcluster_task_id"] = f"V3SVuxO8TFy37En_6HcXL{task_id_index}"
blob["taskcluster_retry_id"] = "0"
blobs.append(blob)

Expand Down Expand Up @@ -463,7 +463,7 @@ def eleven_job_blobs_new_date(sample_data, sample_push, test_repository, mock_lo
del blob["sources"]

blob["revision"] = sample_push[push_index]["revision"]
blob["taskcluster_task_id"] = "V3SVuxO8TFy37En_6HcX{:0>2}".format(task_id_index)
blob["taskcluster_task_id"] = f"V3SVuxO8TFy37En_6HcX{task_id_index:0>2}"
blob["taskcluster_retry_id"] = "0"
blob["job"]["revision"] = sample_push[push_index]["revision"]
blob["job"]["submit_timestamp"] = sample_push[push_index]["push_timestamp"]
Expand Down Expand Up @@ -843,7 +843,7 @@ def _fetch_data(self, project):
% project
)
files_bugzilla_data = None
file_name = "files_bugzilla_map_%s_%s.json" % (project, self.run_id)
file_name = f"files_bugzilla_map_{project}_{self.run_id}.json"
exception = None
try:
tests_folder = os.path.dirname(__file__)
Expand Down Expand Up @@ -1117,7 +1117,7 @@ def bug_data(eleven_jobs_stored, test_repository, test_push, bugs):
bug_id = bugs[0].id
job_id = jobs[0].id
th_models.BugJobMap.create(job_id=job_id, bug_id=bug_id)
query_string = "?startday=2012-05-09&endday=2018-05-10&tree={}".format(test_repository.name)
query_string = f"?startday=2012-05-09&endday=2018-05-10&tree={test_repository.name}"

return {
"tree": test_repository.name,
Expand Down Expand Up @@ -1270,7 +1270,7 @@ def __init__(self, *prior_dirs):

def __call__(self, fixture_filename):
fixture_path = join(*self._prior_dirs, fixture_filename)
with open(fixture_path, "r") as f:
with open(fixture_path) as f:
return json.load(f)


Expand Down
2 changes: 1 addition & 1 deletion tests/e2e/test_job_ingestion.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from mock import MagicMock
from unittest.mock import MagicMock

from tests.test_utils import add_log_response
from treeherder.etl.jobs import store_job_data
Expand Down
3 changes: 1 addition & 2 deletions tests/etl/test_perf_data_load.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
import time

import pytest
from typing import List

from django.core.management import call_command
from django.db import IntegrityError
Expand Down Expand Up @@ -87,7 +86,7 @@ def sample_perf_artifact() -> dict:


@pytest.fixture
def sibling_perf_artifacts(sample_perf_artifact: dict) -> List[dict]:
def sibling_perf_artifacts(sample_perf_artifact: dict) -> list[dict]:
"""intended to belong to the same job"""
artifacts = [copy.deepcopy(sample_perf_artifact) for _ in range(3)]

Expand Down
2 changes: 1 addition & 1 deletion tests/etl/test_pushlog.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def test_ingest_hg_pushlog_cache_last_push(test_repository, test_base_dir, activ
pushes = pushlog_dict["pushes"]
max_push_id = max(int(k) for k in pushes.keys())

cache_key = "{}:last_push_id".format(test_repository.name)
cache_key = f"{test_repository.name}:last_push_id"
assert cache.get(cache_key) == max_push_id


Expand Down
1 change: 0 additions & 1 deletion tests/etl/test_text.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
from treeherder.etl.text import astral_filter, filter_re


Expand Down
2 changes: 1 addition & 1 deletion tests/intermittents_commenter/test_commenter.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def test_intermittents_commenter(bug_data):

comment_params = process.generate_bug_changes(startday, endday, alt_startday, alt_endday)

with open("tests/intermittents_commenter/expected_comment.text", "r") as comment:
with open("tests/intermittents_commenter/expected_comment.text") as comment:
expected_comment = comment.read()
print(len(expected_comment))
print(len(comment_params[0]["changes"]["comment"]["body"]))
Expand Down
4 changes: 2 additions & 2 deletions tests/log_parser/test_log_view_artifact_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def do_test(log):
result file with the same prefix.
"""

url = add_log_response("{}.txt.gz".format(log))
url = add_log_response(f"{log}.txt.gz")

builder = LogViewerArtifactBuilder(url)
lpc = ArtifactBuilderCollection(url, builders=builder)
Expand All @@ -31,7 +31,7 @@ def do_test(log):
# with open(SampleData().get_log_path("{0}.logview.json".format(log)), "w") as f:
# f.write(json.dumps(act, indent=2))

exp = test_utils.load_exp("{0}.logview.json".format(log))
exp = test_utils.load_exp(f"{log}.logview.json")

assert act == exp

Expand Down
2 changes: 1 addition & 1 deletion tests/log_parser/test_performance_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,6 @@ def test_performance_log_parsing_malformed_perfherder_data():
}
],
}
parser.parse_line("PERFHERDER_DATA: {}".format(json.dumps(valid_perfherder_data)), 3)
parser.parse_line(f"PERFHERDER_DATA: {json.dumps(valid_perfherder_data)}", 3)

assert parser.get_artifact() == [valid_perfherder_data]
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import random
import datetime
from typing import Tuple

from treeherder.perf.auto_perf_sheriffing.backfill_reports import (
BackfillReportMaintainer,
Expand Down Expand Up @@ -141,7 +140,7 @@ def test_reports_are_updated_after_alert_summaries_change(
assert initial_records_timestamps != records_timestamps


def __fetch_report_timestamps(test_perf_alert_summary) -> Tuple:
def __fetch_report_timestamps(test_perf_alert_summary) -> tuple:
report = BackfillReport.objects.get(summary=test_perf_alert_summary)
report_timestamps = report.created, report.last_updated
records_timestamps = [record.created for record in report.records.all()]
Expand Down
8 changes: 4 additions & 4 deletions tests/perf/auto_sheriffing_criteria/test_common_behaviour.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import pytest
from django.conf import settings
from typing import List, Type, Callable
from typing import Callable

from tests.perf.auto_sheriffing_criteria.conftest import CASSETTES_RECORDING_DATE
from treeherder.config.settings import BZ_DATETIME_FORMAT
Expand All @@ -18,15 +18,15 @@
pytestmark = [pytest.mark.freeze_time(CASSETTES_RECORDING_DATE, tick=True)]


def bugzilla_formula_instances() -> List[BugzillaFormula]:
def bugzilla_formula_instances() -> list[BugzillaFormula]:
return [EngineerTractionFormula(), FixRatioFormula()]


def formula_instances() -> List[Callable]:
def formula_instances() -> list[Callable]:
return bugzilla_formula_instances() + [TotalAlertsFormula()]


def concrete_formula_classes() -> List[Type[BugzillaFormula]]:
def concrete_formula_classes() -> list[type[BugzillaFormula]]:
return [EngineerTractionFormula, FixRatioFormula]


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def should_take_more_than(seconds: float):
@pytest.fixture
def updatable_criteria_csv(tmp_path):
updatable_csv = tmp_path / "updatable-criteria.csv"
with open(RECORD_TEST_PATH, "r") as file_:
with open(RECORD_TEST_PATH) as file_:
updatable_csv.write_text(file_.read())

return updatable_csv
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import pytest
from datetime import datetime, timedelta

from typing import List

from tests.perf.auto_sheriffing_criteria.conftest import CASSETTES_RECORDING_DATE
from treeherder.config.settings import BZ_DATETIME_FORMAT
Expand Down Expand Up @@ -44,7 +43,7 @@ def quantified_bugs(betamax_recorder) -> list:


@pytest.fixture
def cooled_down_bugs(nonblock_session, quantified_bugs) -> List[dict]:
def cooled_down_bugs(nonblock_session, quantified_bugs) -> list[dict]:
bugs = []
for bug in quantified_bugs:
created_at = datetime.strptime(bug["creation_time"], BZ_DATETIME_FORMAT)
Expand Down
2 changes: 1 addition & 1 deletion tests/push_health/test_usage.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def test_get_usage(push_usage, test_repository):
nrql = "SELECT%20max(needInvestigation)%20FROM%20push_health_need_investigation%20FACET%20revision%20SINCE%201%20DAY%20AGO%20TIMESERIES%20where%20repo%3D'{}'%20AND%20appName%3D'{}'".format(
"try", "treeherder-prod"
)
new_relic_url = "{}?nrql={}".format(settings.NEW_RELIC_INSIGHTS_API_URL, nrql)
new_relic_url = f"{settings.NEW_RELIC_INSIGHTS_API_URL}?nrql={nrql}"

responses.add(
responses.GET,
Expand Down
56 changes: 17 additions & 39 deletions tests/sampledata.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,89 +5,67 @@
class SampleData:
@classmethod
def get_perf_data(cls, filename):
with open(
"{0}/sample_data/artifacts/performance/{1}".format(os.path.dirname(__file__), filename)
) as f:
with open(f"{os.path.dirname(__file__)}/sample_data/artifacts/performance/{filename}") as f:
return json.load(f)

def __init__(self):
self.job_data_file = "{0}/sample_data/job_data.txt".format(os.path.dirname(__file__))
self.job_data_file = f"{os.path.dirname(__file__)}/sample_data/job_data.txt"

self.push_data_file = "{0}/sample_data/push_data.json".format(os.path.dirname(__file__))
self.push_data_file = f"{os.path.dirname(__file__)}/sample_data/push_data.json"

self.logs_dir = "{0}/sample_data/logs".format(os.path.dirname(__file__))
self.logs_dir = f"{os.path.dirname(__file__)}/sample_data/logs"

with open(
"{0}/sample_data/artifacts/text_log_summary.json".format(os.path.dirname(__file__))
) as f:
with open(f"{os.path.dirname(__file__)}/sample_data/artifacts/text_log_summary.json") as f:
self.text_log_summary = json.load(f)

with open(
"{0}/sample_data/pulse_consumer/taskcluster_pulse_messages.json".format(
"{}/sample_data/pulse_consumer/taskcluster_pulse_messages.json".format(
os.path.dirname(__file__)
)
) as f:
self.taskcluster_pulse_messages = json.load(f)

with open(
"{0}/sample_data/pulse_consumer/taskcluster_tasks.json".format(
os.path.dirname(__file__)
)
f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/taskcluster_tasks.json"
) as f:
self.taskcluster_tasks = json.load(f)

with open(
"{0}/sample_data/pulse_consumer/taskcluster_transformed_jobs.json".format(
"{}/sample_data/pulse_consumer/taskcluster_transformed_jobs.json".format(
os.path.dirname(__file__)
)
) as f:
self.taskcluster_transformed_jobs = json.load(f)

with open(
"{0}/sample_data/pulse_consumer/job_data.json".format(os.path.dirname(__file__))
) as f:
with open(f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/job_data.json") as f:
self.pulse_jobs = json.load(f)

with open(
"{0}/sample_data/pulse_consumer/transformed_job_data.json".format(
os.path.dirname(__file__)
)
f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/transformed_job_data.json"
) as f:
self.transformed_pulse_jobs = json.load(f)

with open(
"{0}/sample_data/pulse_consumer/github_push.json".format(os.path.dirname(__file__))
) as f:
with open(f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/github_push.json") as f:
self.github_push = json.load(f)

with open(
"{0}/sample_data/pulse_consumer/transformed_gh_push.json".format(
os.path.dirname(__file__)
)
f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/transformed_gh_push.json"
) as f:
self.transformed_github_push = json.load(f)

with open(
"{0}/sample_data/pulse_consumer/github_pr.json".format(os.path.dirname(__file__))
) as f:
with open(f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/github_pr.json") as f:
self.github_pr = json.load(f)

with open(
"{0}/sample_data/pulse_consumer/transformed_gh_pr.json".format(
os.path.dirname(__file__)
)
f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/transformed_gh_pr.json"
) as f:
self.transformed_github_pr = json.load(f)

with open(
"{0}/sample_data/pulse_consumer/hg_push.json".format(os.path.dirname(__file__))
) as f:
with open(f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/hg_push.json") as f:
self.hg_push = json.load(f)

with open(
"{0}/sample_data/pulse_consumer/transformed_hg_push.json".format(
os.path.dirname(__file__)
)
f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/transformed_hg_push.json"
) as f:
self.transformed_hg_push = json.load(f)

Expand All @@ -106,4 +84,4 @@ def initialize_data(self):

def get_log_path(self, name):
"""Returns the full path to a log file"""
return "{0}/{1}".format(self.logs_dir, name)
return f"{self.logs_dir}/{name}"
8 changes: 4 additions & 4 deletions tests/services/test_taskcluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,15 +54,15 @@ def test_filter_relevant_actions(self, actions_json, original_task, expected_act

def test_task_in_context(self):
# match
tag_set_list, task_tags = [
tag_set_list, task_tags = (
load_json_fixture(f) for f in ("matchingTagSetList.json", "matchingTaskTags.json")
]
)
assert TaskclusterModelImpl._task_in_context(tag_set_list, task_tags) is True

# mismatch
tag_set_list, task_tags = [
tag_set_list, task_tags = (
load_json_fixture(f) for f in ("mismatchingTagSetList.json", "mismatchingTaskTags.json")
]
)
assert TaskclusterModelImpl._task_in_context(tag_set_list, task_tags) is False

def test_get_action(self, actions_json, expected_backfill_task):
Expand Down
2 changes: 1 addition & 1 deletion tests/test_dockerflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ def test_get_version(client):
response = client.get("/__version__")
assert response.status_code == 200

with open(f"{settings.BASE_DIR}/version.json", "r") as version_file:
with open(f"{settings.BASE_DIR}/version.json") as version_file:
assert response.json() == json.loads(version_file.read())


Expand Down
2 changes: 1 addition & 1 deletion tests/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ def add_log_response(filename):
Set up responses for a local gzipped log and return the url for it.
"""
log_path = SampleData().get_log_path(filename)
log_url = "http://my-log.mozilla.org/{}".format(filename)
log_url = f"http://my-log.mozilla.org/{filename}"

with open(log_path, "rb") as log_file:
content = log_file.read()
Expand Down
4 changes: 2 additions & 2 deletions tests/webapp/api/test_bug_job_map_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def test_bug_job_map_detail(client, eleven_jobs_stored, test_repository, test_us
user=test_user,
)

pk = "{0}-{1}".format(job.id, bug.id)
pk = f"{job.id}-{bug.id}"

resp = client.get(
reverse("bug-job-map-detail", kwargs={"project": test_repository.name, "pk": pk})
Expand Down Expand Up @@ -130,7 +130,7 @@ def test_bug_job_map_delete(
if not test_no_auth:
client.force_authenticate(user=test_user)

pk = "{0}-{1}".format(job.id, bug.id)
pk = f"{job.id}-{bug.id}"

resp = client.delete(
reverse("bug-job-map-detail", kwargs={"project": test_repository.name, "pk": pk})
Expand Down
Loading

0 comments on commit e95d703

Please sign in to comment.