Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Clear up terminology #66

Merged
merged 3 commits into from
Nov 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions cylc/flow/run_modes/simulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,6 @@ def submit_task_job(
'name': RunMode.SIMULATION.value,
'install target': 'localhost',
'hosts': ['localhost'],
'disable task event handlers':
rtconfig['simulation']['disable task event handlers'],
'submission retry delays': [],
'execution retry delays': []
}
Expand Down
7 changes: 4 additions & 3 deletions cylc/flow/run_modes/skip.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,9 +109,10 @@ def process_outputs(itask: 'TaskProxy', rtconfig: Dict) -> List[str]:
# which we hold back, to prevent warnings about pre-requisites being
# unmet being shown because a "finished" output happens to come first.
for message in itask.state.outputs.iter_required_messages(
exclude=(
TASK_OUTPUT_SUCCEEDED if TASK_OUTPUT_FAILED
in conf_outputs else TASK_OUTPUT_FAILED
disable=(
TASK_OUTPUT_SUCCEEDED
if TASK_OUTPUT_FAILED in conf_outputs
else TASK_OUTPUT_FAILED
)
):
trigger = itask.state.outputs._message_to_trigger[message]
Expand Down
28 changes: 18 additions & 10 deletions cylc/flow/task_outputs.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ def get_completion_expression(tdef: 'TaskDef') -> str:
def get_optional_outputs(
expression: str,
outputs: Iterable[str],
force_optional: "Optional[str]" = None
disable: "Optional[str]" = None
) -> Dict[str, Optional[bool]]:
"""Determine which outputs in an expression are optional.

Expand All @@ -204,8 +204,9 @@ def get_optional_outputs(
The completion expression.
outputs:
All outputs that apply to this task.
force_optional:
Don't have the CompletionEvaluator consider this output.
disable:
Disable this output and any others it is joined with by `and`
(which will mean they are necessarily optional).

Returns:
dict: compvar: is_optional
Expand Down Expand Up @@ -236,7 +237,14 @@ def get_optional_outputs(
>>> sorted(get_optional_outputs(
... '(succeeded and towel) or (failed and bugblatter)',
... {'succeeded', 'towel', 'failed', 'bugblatter'},
... 'failed'
... ).items())
[('bugblatter', True), ('failed', True),
('succeeded', True), ('towel', True)]

>>> sorted(get_optional_outputs(
... '(succeeded and towel) or (failed and bugblatter)',
... {'succeeded', 'towel', 'failed', 'bugblatter'},
... disable='failed'
... ).items())
[('bugblatter', True), ('failed', True),
('succeeded', False), ('towel', False)]
Expand All @@ -249,7 +257,7 @@ def get_optional_outputs(
all_compvars = {trigger_to_completion_variable(out) for out in outputs}

# Allows exclusion of additional outcomes:
extra_excludes = {force_optional: False} if force_optional else {}
extra_excludes = {disable: False} if disable else {}

return { # output: is_optional
# the outputs that are used in the expression
Expand Down Expand Up @@ -627,22 +635,22 @@ def _is_compvar_complete(self, compvar: str) -> Optional[bool]:

def iter_required_messages(
self,
exclude: 'Optional[Literal["succeeded", "failed"]]' = None
disable: 'Optional[Literal["succeeded", "failed"]]' = None
) -> Iterator[str]:
"""Yield task messages that are required for this task to be complete.

Note, in some cases tasks might not have any required messages,
e.g. "completion = succeeded or failed".

Args:
exclude: Don't check wether this output is required for
completion - in skip mode we only want to check either
succeeded or failed, but not both.
disable: Consider this output and any others it is joined with by
`and` to not exist. In skip mode we only want to check either
succeeded or failed, but not both.
"""
for compvar, is_optional in get_optional_outputs(
self._completion_expression,
set(self._message_to_compvar.values()),
force_optional=exclude
disable=disable
).items():
if is_optional is False:
for message, _compvar in self._message_to_compvar.items():
Expand Down
2 changes: 1 addition & 1 deletion cylc/flow/taskdef.py
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,7 @@ def is_parentless(self, point):
def __repr__(self) -> str:
"""
>>> TaskDef(
... name='oliver', rtcfg={}, run_mode='fake', start_point='1',
... name='oliver', rtcfg={}, start_point='1',
... initial_point='1'
... )
<TaskDef 'oliver'>
Expand Down
4 changes: 2 additions & 2 deletions tests/unit/run_modes/test_skip_units.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ def test_process_outputs(outputs, required, expect):
rtconfig=rtconf),
state=SimpleNamespace(
outputs=SimpleNamespace(
iter_required_messages=lambda exclude: iter(required),
iter_required_messages=lambda *a, **k: iter(required),
_message_to_trigger={v: v for v in required}
)))

Expand All @@ -114,7 +114,7 @@ def test_skip_mode_validate(monkeypatch, caplog):
https://github.com/cylc/cylc-admin/blob/master/docs/proposal-skip-mode.md

| If the run mode is set to simulation or skip in the workflow
| configuration, then cylc validate and cylc lint should produce
| configuration, then cylc validate and cylc lint should produce
| warning (similar to development features in other languages / systems).
"""
taskdefs = {
Expand Down
36 changes: 24 additions & 12 deletions tests/unit/test_task_outputs.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,6 +288,8 @@ def test_iter_required_outputs():
'y',
}


def test_iter_required_outputs__disable():
# Get all outputs required for success path (excluding failure, what
# is still required):
outputs = TaskOutputs(
Expand All @@ -298,21 +300,31 @@ def test_iter_required_outputs():
)
)

# Excluding succeeded leaves us with failure required outputs:
assert set(outputs.iter_required_messages(
exclude=TASK_OUTPUT_SUCCEEDED)) == {
TASK_OUTPUT_FAILED, 'x', 'y',}
assert set(outputs.iter_required_messages()) == set()

# Excluding failed leaves us with succeeded required outputs:
assert set(outputs.iter_required_messages(
exclude=TASK_OUTPUT_FAILED)) == {
TASK_OUTPUT_SUCCEEDED, 'a', 'b',}
# Disabling succeeded leaves us with failure required outputs:
assert set(
outputs.iter_required_messages(disable=TASK_OUTPUT_SUCCEEDED)
) == {
TASK_OUTPUT_FAILED,
'x',
'y',
}

# Excluding an abitrary output leaves us with required outputs
# Disabling failed leaves us with succeeded required outputs:
assert set(outputs.iter_required_messages(disable=TASK_OUTPUT_FAILED)) == {
TASK_OUTPUT_SUCCEEDED,
'a',
'b',
}

# Disabling an abitrary output leaves us with required outputs
# from another branch:
assert set(outputs.iter_required_messages(
exclude='a')) == {
TASK_OUTPUT_FAILED, 'x', 'y',}
assert set(outputs.iter_required_messages(disable='a')) == {
TASK_OUTPUT_FAILED,
'x',
'y',
}


def test_get_trigger_completion_variable_maps():
Expand Down
8 changes: 4 additions & 4 deletions tests/unit/test_task_state.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
)
def test_state_comparison(state, is_held):
"""Test the __call__ method."""
tdef = TaskDef('foo', {}, 'live', '123', '123')
tdef = TaskDef('foo', {}, '123', '123')
tstate = TaskState(tdef, '123', state, is_held)

assert tstate(state, is_held=is_held)
Expand Down Expand Up @@ -72,7 +72,7 @@ def test_state_comparison(state, is_held):
)
def test_reset(state, is_held, should_reset):
"""Test that tasks do or don't have their state changed."""
tdef = TaskDef('foo', {}, 'live', '123', '123')
tdef = TaskDef('foo', {}, '123', '123')
# create task state:
# * status: waiting
# * is_held: true
Expand All @@ -96,7 +96,7 @@ def test_task_prereq_duplicates(set_cycling_type):

dep = Dependency([trig], [trig], False)

tdef = TaskDef('foo', {}, 'live', IntegerPoint("1"), IntegerPoint("1"))
tdef = TaskDef('foo', {}, IntegerPoint("1"), IntegerPoint("1"))
tdef.add_dependency(dep, seq1)
tdef.add_dependency(dep, seq2) # duplicate!

Expand All @@ -110,7 +110,7 @@ def test_task_prereq_duplicates(set_cycling_type):
def test_task_state_order():
"""Test is_gt and is_gte methods."""

tdef = TaskDef('foo', {}, 'live', IntegerPoint("1"), IntegerPoint("1"))
tdef = TaskDef('foo', {}, IntegerPoint("1"), IntegerPoint("1"))
tstate = TaskState(tdef, IntegerPoint("1"), TASK_STATUS_SUBMITTED, False)

assert tstate.is_gt(TASK_STATUS_WAITING)
Expand Down
2 changes: 0 additions & 2 deletions tests/unit/test_xtrigger_mgr.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,6 @@ def test_housekeeping_with_xtrigger_satisfied(xtrigger_mgr):
tdef = TaskDef(
name="foo",
rtcfg={'completion': None},
run_mode="live",
start_point=1,
initial_point=1,
)
Expand Down Expand Up @@ -232,7 +231,6 @@ def test__call_xtriggers_async(xtrigger_mgr):
tdef = TaskDef(
name="foo",
rtcfg={'completion': None},
run_mode="live",
start_point=1,
initial_point=1
)
Expand Down
Loading