From 2d357b430c4ff7f7d2ff402d33026da9f1a40cea Mon Sep 17 00:00:00 2001 From: jcass77 Date: Thu, 16 Jul 2020 14:53:18 +0200 Subject: [PATCH] refactor: Drop support for Python 2.7, update codebase to Python 3.6 --- django_apscheduler/admin.py | 4 +-- django_apscheduler/docs/changelog.md | 3 ++- django_apscheduler/jobstores.py | 40 ++++++++++++++-------------- django_apscheduler/models.py | 27 +++++++++---------- django_apscheduler/result_storage.py | 6 ++--- tests/compat.py | 6 ----- tests/test_jobstore.py | 10 +++---- 7 files changed, 44 insertions(+), 52 deletions(-) delete mode 100644 tests/compat.py diff --git a/django_apscheduler/admin.py b/django_apscheduler/admin.py index 628b5ee..91e21f8 100644 --- a/django_apscheduler/admin.py +++ b/django_apscheduler/admin.py @@ -32,7 +32,7 @@ def get_queryset(self, request): .values_list("job") .annotate(duration=Avg("duration")) } - return super(DjangoJobAdmin, self).get_queryset(request) + return super().get_queryset(request) def next_run_time_sec(self, obj): if obj.next_run_time is None: @@ -54,7 +54,7 @@ def run_time_sec(self, obj): def get_queryset(self, request): return ( - super(DjangoJobExecutionAdmin, self) + super() .get_queryset(request) .select_related("job") ) diff --git a/django_apscheduler/docs/changelog.md b/django_apscheduler/docs/changelog.md index 62bdef3..3efb9c9 100644 --- a/django_apscheduler/docs/changelog.md +++ b/django_apscheduler/docs/changelog.md @@ -6,13 +6,14 @@ This changelog is used to track all major changes to django_apscheduler. **Enhancements** +- Drop support for Python 2.7, convert codebase to Python 3.6+. - CI: drop coverage for Python 2.7 and Django <= 2.1, which are no longer maintained upstream. - CI: add coverage for Python 3.7 and 3.8, as well as Django long term support (LTS) and the latest released versions. - CI: un-pin dependency on agronholm/apscheduler#149, which has since been merged and released upstream. - Rename Django `test_settings.py` file to prevent collision with actual test scripts. - Clean up unused dependencies / update dependencies to latest available versions. - Switch to Black code formatting. -- Align package layout with official [Django recommendations](https://docs.djangoproject.com/en/dev/intro/reusable-apps/#packaging-your-app) +- Align package layout with official [Django recommendations](https://docs.djangoproject.com/en/dev/intro/reusable-apps/#packaging-your-app) **Fixes** diff --git a/django_apscheduler/jobstores.py b/django_apscheduler/jobstores.py index 875056a..33e642d 100644 --- a/django_apscheduler/jobstores.py +++ b/django_apscheduler/jobstores.py @@ -17,7 +17,7 @@ from django_apscheduler.result_storage import DjangoResultStorage from django_apscheduler.util import deserialize_dt, serialize_dt -LOGGER = logging.getLogger("django_apscheduler") +logger = logging.getLogger("django_apscheduler") def ignore_database_error(on_error_value=None): @@ -54,30 +54,30 @@ class DjangoJobStore(BaseJobStore): """ def __init__(self, pickle_protocol=pickle.HIGHEST_PROTOCOL): - super(DjangoJobStore, self).__init__() + super().__init__() self.pickle_protocol = pickle_protocol @ignore_database_error() def lookup_job(self, job_id): - LOGGER.debug("Lookup for a job %s", job_id) + logger.debug("Lookup for a job %s", job_id) try: job_state = DjangoJob.objects.get(name=job_id).job_state except DjangoJob.DoesNotExist: return None r = self._reconstitute_job(job_state) if job_state else None - LOGGER.debug("Got %s", r) + logger.debug("Got %s", r) return r @ignore_database_error(on_error_value=[]) def get_due_jobs(self, now): - LOGGER.debug("get_due_jobs for time=%s", now) + logger.debug("get_due_jobs for time=%s", now) try: out = self._get_jobs(next_run_time__lte=serialize_dt(now)) - LOGGER.debug("Got %s", out) + logger.debug("Got %s", out) return out # TODO: Make this except clause more specific except Exception: - LOGGER.exception("Exception during getting jobs") + logger.exception("Exception during getting jobs") return [] @ignore_database_error() @@ -92,7 +92,7 @@ def get_next_run_time(self): return # TODO: Make this except clause more specific except Exception: - LOGGER.exception("Exception during get_next_run_time for jobs") + logger.exception("Exception during get_next_run_time for jobs") @ignore_database_error(on_error_value=[]) def get_all_jobs(self): @@ -111,7 +111,7 @@ def add_job(self, job): ) if not created: - LOGGER.warning( + logger.warning( "Job with id %s already in jobstore. I'll refresh it", job.id ) dbJob.next_run_time = serialize_dt(job.next_run_time) @@ -125,7 +125,7 @@ def update_job(self, job): job_state=pickle.dumps(job.__getstate__(), self.pickle_protocol), ) - LOGGER.debug( + logger.debug( "Update job %s: next_run_time=%s, job_state=%s", job, serialize_dt(job.next_run_time), @@ -133,14 +133,14 @@ def update_job(self, job): ) if updated == 0: - LOGGER.info("Job with id %s not found", job.id) + logger.info("Job with id %s not found", job.id) raise JobLookupError(job.id) @ignore_database_error() def remove_job(self, job_id): qs = DjangoJob.objects.filter(name=job_id) if not qs.exists(): - LOGGER.warning("Job with id %s not found. Can't remove job.", job_id) + logger.warning("Job with id %s not found. Can't remove job.", job_id) qs.delete() @ignore_database_error() @@ -178,7 +178,7 @@ def _get_jobs(self, **filters): # Remove all the jobs we failed to restore if failed_job_ids: - LOGGER.warning("Remove bad jobs: %s", failed_job_ids) + logger.warning("Remove bad jobs: %s", failed_job_ids) DjangoJob.objects.filter(id__in=failed_job_ids).delete() def map_jobs(job): @@ -194,15 +194,15 @@ def event_name(code): return key -class _EventManager(object): +class _EventManager: - LOGGER = LOGGER.getChild("events") + logger = logger.getChild("events") def __init__(self, storage=None): self.storage = storage or DjangoResultStorage() def __call__(self, event): - LOGGER.debug("Got event: %s, %s, %s", event, type(event), event.__dict__) + logger.debug("Got event: %s, %s, %s", event, type(event), event.__dict__) # print event, type(event), event.__dict__ try: if isinstance(event, JobSubmissionEvent): @@ -210,7 +210,7 @@ def __call__(self, event): elif isinstance(event, JobExecutionEvent): self._process_execution_event(event) except Exception as e: - self.LOGGER.exception(str(e)) + self.logger.exception(str(e)) @ignore_database_error() def _process_submission_event(self, event): @@ -219,7 +219,7 @@ def _process_submission_event(self, event): try: job = DjangoJob.objects.get(name=event.job_id) except ObjectDoesNotExist: - self.LOGGER.warning("Job with id %s not found in database", event.job_id) + self.logger.warning("Job with id %s not found in database", event.job_id) return self.storage.get_or_create_job_execution(job, event) @@ -231,7 +231,7 @@ def _process_execution_event(self, event): try: job = DjangoJob.objects.get(name=event.job_id) except ObjectDoesNotExist: - self.LOGGER.warning("Job with id %s not found in database", event.job_id) + self.logger.warning("Job with id %s not found in database", event.job_id) return self.storage.register_job_executed(job, event) @@ -263,7 +263,7 @@ def test_job(): """ def inner(func): - k.setdefault("id", "{}.{}".format(func.__module__, func.__name__)) + k.setdefault("id", f"{func.__module__}.{func.__name__}") scheduler.add_job(func, *a, **k) return func diff --git a/django_apscheduler/models.py b/django_apscheduler/models.py index 2d1a18a..f3abf61 100644 --- a/django_apscheduler/models.py +++ b/django_apscheduler/models.py @@ -1,4 +1,3 @@ -# coding=utf-8 from datetime import timedelta from django.db import models, connection @@ -9,7 +8,7 @@ from django_apscheduler import util -LOGGER = logging.getLogger("django_apscheduler") +logger = logging.getLogger("django_apscheduler") class DjangoJobManager(models.Manager): @@ -22,7 +21,7 @@ class DjangoJobManager(models.Manager): def get_queryset(self): self.__ping() - return super(DjangoJobManager, self).get_queryset() + return super().get_queryset() def __ping(self): if time.time() - self._last_ping < self._ping_interval: @@ -38,13 +37,13 @@ def __ping(self): self._last_ping = time.time() def __reconnect(self): - LOGGER.warning("Mysql closed the connection. Perform reconnect...") + logger.warning("Mysql closed the connection. Perform reconnect...") if connection.connection: connection.connection.close() connection.connection = None else: - LOGGER.warning("Connection was already closed.") + logger.warning("Connection was already closed.") class DjangoJob(models.Model): @@ -61,7 +60,7 @@ def __str__(self): if self.next_run_time else "paused" ) - return "%s (%s)" % (self.name, status) + return f"{self.name} ({status})" class Meta: ordering = ("next_run_time",) @@ -79,14 +78,14 @@ def delete_old_job_executions(self, max_age): class DjangoJobExecution(models.Model): - ADDED = u"Added" - SENT = u"Started execution" - MAX_INSTANCES = u"Max instances reached!" - MISSED = u"Missed!" - MODIFIED = u"Modified!" - REMOVED = u"Removed!" - ERROR = u"Error!" - SUCCESS = u"Executed" + ADDED = "Added" + SENT = "Started execution" + MAX_INSTANCES = "Max instances reached!" + MISSED = "Missed!" + MODIFIED = "Modified!" + REMOVED = "Removed!" + ERROR = "Error!" + SUCCESS = "Executed" job = models.ForeignKey(DjangoJob, on_delete=models.CASCADE) status = models.CharField( diff --git a/django_apscheduler/result_storage.py b/django_apscheduler/result_storage.py index 014fde7..94cca98 100644 --- a/django_apscheduler/result_storage.py +++ b/django_apscheduler/result_storage.py @@ -7,13 +7,13 @@ from django_apscheduler.util import serialize_dt -class DjangoResultStorage(object): +class DjangoResultStorage: """ Uses Django ORM table for store job status and results. You can override this class to change result storage. """ - LOGGER = logging.getLogger("django_apscheduler.result_storage") + logger = logging.getLogger("django_apscheduler.result_storage") def get_or_create_job_execution( self, job: DjangoJob, event: JobSubmissionEvent @@ -81,7 +81,7 @@ def register_job_executed( ) if job_execution.finished: - self.LOGGER.warning("Job already finished! %s", job_execution) + self.logger.warning("Job already finished! %s", job_execution) return job_execution.finished = time.time() diff --git a/tests/compat.py b/tests/compat.py deleted file mode 100644 index 5c34576..0000000 --- a/tests/compat.py +++ /dev/null @@ -1,6 +0,0 @@ -try: - import mock -except ImportError: - from unittest import mock - -mock_compat = mock diff --git a/tests/test_jobstore.py b/tests/test_jobstore.py index d38d31b..932f511 100644 --- a/tests/test_jobstore.py +++ b/tests/test_jobstore.py @@ -1,7 +1,6 @@ -from __future__ import print_function - import datetime import logging +from unittest import mock import pytz from apscheduler.events import JobExecutionEvent, JobSubmissionEvent @@ -13,7 +12,6 @@ from django_apscheduler.models import DjangoJob, DjangoJobExecution from django_apscheduler.result_storage import DjangoResultStorage from django_apscheduler.util import serialize_dt -from tests.compat import mock_compat from tests.conftest import job logging.basicConfig() @@ -65,7 +63,7 @@ def job_for_tests(): job_for_tests.mock() -job_for_tests.mock = mock_compat.Mock() +job_for_tests.mock = mock.Mock() def test_try_add_job_then_start(db, scheduler): @@ -122,7 +120,7 @@ def test_issue_15(db): DjangoJobExecution.objects.create(job=job, run_time=serialize_dt(srt)) storage.get_or_create_job_execution( - job, mock_compat.Mock(scheduled_run_times=[srt]) + job, mock.Mock(scheduled_run_times=[srt]) ) @@ -138,7 +136,7 @@ def mocked_execute(self, *a, **k): else: return [] - with mock_compat.patch.object(CursorWrapper, "execute", mocked_execute): + with mock.patch.object(CursorWrapper, "execute", mocked_execute): store = DjangoJobStore() # DjangoJob.objects._last_ping = 0