Skip to content

Commit

Permalink
Don't mistakenly take a lock on DagRun via ti.refresh_from_fb (#25312)
Browse files Browse the repository at this point in the history
In 2.2.0 we made TI.dag_run be automatically join-loaded, which is fine
for most cases, but for `refresh_from_db` we don't need that (we don't
access anything under ti.dag_run) and it's possible that when
`lock_for_update=True` is passed we are locking more than we want to and
_might_ cause deadlocks.

Even if it doesn't, selecting more than we need is wasteful.

(cherry picked from commit be2b53e)
  • Loading branch information
ashb authored and ephraimbuddy committed Aug 15, 2022
1 parent cf448ea commit 78fa95c
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 13 deletions.
28 changes: 18 additions & 10 deletions airflow/models/taskinstance.py
Original file line number Diff line number Diff line change
Expand Up @@ -308,6 +308,7 @@ def clear_task_instances(
if dag_run_state == DagRunState.QUEUED:
dr.last_scheduling_decision = None
dr.start_date = None
session.flush()


class _LazyXComAccessIterator(collections.abc.Iterator):
Expand Down Expand Up @@ -879,28 +880,35 @@ def refresh_from_db(self, session=NEW_SESSION, lock_for_update=False) -> None:
"""
self.log.debug("Refreshing TaskInstance %s from DB", self)

qry = session.query(TaskInstance).filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id == self.task_id,
TaskInstance.run_id == self.run_id,
TaskInstance.map_index == self.map_index,
if self in session:
session.refresh(self, TaskInstance.__mapper__.column_attrs.keys())

qry = (
# To avoid joining any relationships, by default select all
# columns, not the object. This also means we get (effectively) a
# namedtuple back, not a TI object
session.query(*TaskInstance.__table__.columns).filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id == self.task_id,
TaskInstance.run_id == self.run_id,
TaskInstance.map_index == self.map_index,
)
)

if lock_for_update:
for attempt in run_with_db_retries(logger=self.log):
with attempt:
ti: Optional[TaskInstance] = qry.with_for_update().first()
ti: Optional[TaskInstance] = qry.with_for_update().one_or_none()
else:
ti = qry.first()
ti = qry.one_or_none()
if ti:
# Fields ordered per model definition
self.start_date = ti.start_date
self.end_date = ti.end_date
self.duration = ti.duration
self.state = ti.state
# Get the raw value of try_number column, don't read through the
# accessor here otherwise it will be incremented by one already.
self.try_number = ti._try_number
# Since we selected columns, not the object, this is the raw value
self.try_number = ti.try_number
self.max_tries = ti.max_tries
self.hostname = ti.hostname
self.unixname = ti.unixname
Expand Down
8 changes: 5 additions & 3 deletions tests/jobs/test_scheduler_job.py
Original file line number Diff line number Diff line change
Expand Up @@ -457,7 +457,8 @@ def test_execute_task_instances_is_paused_wont_execute(self, session, dag_maker)
(ti1,) = dr1.task_instances
ti1.state = State.SCHEDULED

self.scheduler_job._critical_section_execute_task_instances(session)
self.scheduler_job._critical_section_enqueue_task_instances(session)
session.flush()
ti1.refresh_from_db(session=session)
assert State.SCHEDULED == ti1.state
session.rollback()
Expand Down Expand Up @@ -1315,8 +1316,9 @@ def test_enqueue_task_instances_sets_ti_state_to_None_if_dagrun_in_finish_state(
session.commit()

with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:
self.scheduler_job._enqueue_task_instances_with_queued_state([ti])
ti.refresh_from_db()
self.scheduler_job._enqueue_task_instances_with_queued_state([ti], session=session)
session.flush()
ti.refresh_from_db(session=session)
assert ti.state == State.NONE
mock_queue_command.assert_not_called()

Expand Down

0 comments on commit 78fa95c

Please sign in to comment.