-
Notifications
You must be signed in to change notification settings - Fork 927
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
feat: Add test for evaluate step #460
Changes from all commits
ae112cb
2fbd877
010660b
8e0c9c0
ae03634
b600eaf
bbc7117
d954077
6cd98ae
2c6dada
9fdafaf
aead2ab
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,7 +1,8 @@ | ||
import logging | ||
from typing import TextIO | ||
|
||
logger = logging.getLogger(__name__) | ||
h = logging.StreamHandler() | ||
fmt = logging.Formatter("[%(asctime)s/%(levelname)s] - %(message)s") | ||
logger: logging.Logger = logging.getLogger(__name__) | ||
h: logging.StreamHandler[TextIO] = logging.StreamHandler() | ||
fmt: logging.Formatter = logging.Formatter("[%(asctime)s/%(levelname)s] - %(message)s") | ||
h.setFormatter(fmt) | ||
logger.addHandler(h) |
Original file line number | Diff line number | Diff line change | ||||
---|---|---|---|---|---|---|
@@ -1,25 +1,30 @@ | ||||||
from typing import Any | ||||||
import logging | ||||||
|
||||||
from beartype import beartype | ||||||
from temporalio import activity | ||||||
|
||||||
from ...activities.task_steps.utils import simple_eval_dict | ||||||
from ...autogen.openapi_model import EvaluateStep | ||||||
from ...common.protocol.tasks import ( | ||||||
StepContext, | ||||||
StepOutcome, | ||||||
) | ||||||
from ...common.protocol.tasks import StepContext, StepOutcome | ||||||
from ...env import testing | ||||||
|
||||||
|
||||||
@beartype | ||||||
async def evaluate_step( | ||||||
context: StepContext[EvaluateStep], | ||||||
) -> StepOutcome[dict[str, Any]]: | ||||||
exprs = context.definition.arguments | ||||||
output = simple_eval_dict(exprs, values=context.model_dump()) | ||||||
async def evaluate_step(context: StepContext) -> StepOutcome: | ||||||
# NOTE: This activity is only for returning immediately, so we just evaluate the expression | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The
Suggested change
|
||||||
# Hence, it's a local activity and SHOULD NOT fail | ||||||
try: | ||||||
assert isinstance(context.current_step, EvaluateStep) | ||||||
|
||||||
exprs = context.current_step.evaluate | ||||||
output = simple_eval_dict(exprs, values=context.model_dump()) | ||||||
|
||||||
result = StepOutcome(output=output) | ||||||
return result | ||||||
|
||||||
return StepOutcome(output=output) | ||||||
except BaseException as e: | ||||||
logging.error(f"Error in evaluate_step: {e}") | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Catching |
||||||
return StepOutcome(error=str(e)) | ||||||
|
||||||
|
||||||
# Note: This is here just for clarity. We could have just imported evaluate_step directly | ||||||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,24 +1,40 @@ | ||
import logging | ||
|
||
from beartype import beartype | ||
from simpleeval import simple_eval | ||
from temporalio import activity | ||
|
||
from ...autogen.openapi_model import ( | ||
IfElseWorkflowStep, | ||
) | ||
from ...autogen.openapi_model import IfElseWorkflowStep | ||
from ...common.protocol.tasks import ( | ||
StepContext, | ||
StepOutcome, | ||
) | ||
from ...env import testing | ||
|
||
|
||
@activity.defn | ||
@beartype | ||
async def if_else_step(context: StepContext[IfElseWorkflowStep]) -> dict: | ||
raise NotImplementedError() | ||
# context_data: dict = context.model_dump() | ||
async def if_else_step(context: StepContext) -> StepOutcome: | ||
# NOTE: This activity is only for logging, so we just evaluate the expression | ||
# Hence, it's a local activity and SHOULD NOT fail | ||
try: | ||
assert isinstance(context.current_step, IfElseWorkflowStep) | ||
|
||
expr: str = context.current_step.if_ | ||
output = simple_eval(expr, names=context.model_dump()) | ||
output: bool = bool(output) | ||
|
||
result = StepOutcome(output=output) | ||
return result | ||
|
||
# next_workflow = ( | ||
# context.definition.then | ||
# if simple_eval(context.definition.if_, names=context_data) | ||
# else context.definition.else_ | ||
# ) | ||
except BaseException as e: | ||
logging.error(f"Error in if_else_step: {e}") | ||
return StepOutcome(error=str(e)) | ||
|
||
# return {"goto_workflow": next_workflow} | ||
|
||
# Note: This is here just for clarity. We could have just imported if_else_step directly | ||
# They do the same thing, so we dont need to mock the if_else_step function | ||
mock_if_else_step = if_else_step | ||
|
||
if_else_step = activity.defn(name="if_else_step")( | ||
if_else_step if not testing else mock_if_else_step | ||
) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,37 @@ | ||
import logging | ||
|
||
from beartype import beartype | ||
from simpleeval import simple_eval | ||
from temporalio import activity | ||
|
||
from ...autogen.openapi_model import LogStep | ||
from ...common.protocol.tasks import ( | ||
StepContext, | ||
StepOutcome, | ||
) | ||
from ...env import testing | ||
|
||
|
||
@beartype | ||
async def log_step(context: StepContext) -> StepOutcome: | ||
# NOTE: This activity is only for logging, so we just evaluate the expression | ||
# Hence, it's a local activity and SHOULD NOT fail | ||
try: | ||
assert isinstance(context.current_step, LogStep) | ||
|
||
expr: str = context.current_step.log | ||
output = simple_eval(expr, names=context.model_dump()) | ||
|
||
result = StepOutcome(output=output) | ||
return result | ||
|
||
except BaseException as e: | ||
logging.error(f"Error in log_step: {e}") | ||
return StepOutcome(error=str(e)) | ||
|
||
|
||
# Note: This is here just for clarity. We could have just imported log_step directly | ||
# They do the same thing, so we dont need to mock the log_step function | ||
mock_log_step = log_step | ||
|
||
log_step = activity.defn(name="log_step")(log_step if not testing else mock_log_step) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,37 @@ | ||
import logging | ||
|
||
from temporalio import activity | ||
|
||
from ...activities.task_steps.utils import simple_eval_dict | ||
from ...autogen.openapi_model import ReturnStep | ||
from ...common.protocol.tasks import ( | ||
StepContext, | ||
StepOutcome, | ||
) | ||
from ...env import testing | ||
|
||
|
||
async def return_step(context: StepContext) -> StepOutcome: | ||
# NOTE: This activity is only for returning immediately, so we just evaluate the expression | ||
# Hence, it's a local activity and SHOULD NOT fail | ||
try: | ||
assert isinstance(context.current_step, ReturnStep) | ||
|
||
exprs: dict[str, str] = context.current_step.return_ | ||
output = simple_eval_dict(exprs, values=context.model_dump()) | ||
|
||
result = StepOutcome(output=output) | ||
return result | ||
|
||
except BaseException as e: | ||
logging.error(f"Error in log_step: {e}") | ||
return StepOutcome(error=str(e)) | ||
|
||
|
||
# Note: This is here just for clarity. We could have just imported return_step directly | ||
# They do the same thing, so we dont need to mock the return_step function | ||
mock_return_step = return_step | ||
|
||
return_step = activity.defn(name="return_step")( | ||
return_step if not testing else mock_return_step | ||
) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,48 @@ | ||
import logging | ||
|
||
from beartype import beartype | ||
from simpleeval import simple_eval | ||
from temporalio import activity | ||
|
||
from ...autogen.openapi_model import SwitchStep | ||
from ...common.protocol.tasks import ( | ||
StepContext, | ||
StepOutcome, | ||
) | ||
from ...env import testing | ||
|
||
|
||
@beartype | ||
async def switch_step(context: StepContext) -> StepOutcome: | ||
# NOTE: This activity is only for logging, so we just evaluate the expression | ||
# Hence, it's a local activity and SHOULD NOT fail | ||
try: | ||
assert isinstance(context.current_step, SwitchStep) | ||
|
||
# Assume that none of the cases evaluate to truthy | ||
output: int = -1 | ||
|
||
cases: list[str] = [c.case for c in context.current_step.switch] | ||
|
||
for i, case in enumerate(cases): | ||
result = simple_eval(case, names=context.model_dump()) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Using There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Using |
||
|
||
if result: | ||
output = i | ||
break | ||
|
||
result = StepOutcome(output=output) | ||
return result | ||
|
||
except BaseException as e: | ||
logging.error(f"Error in switch_step: {e}") | ||
return StepOutcome(error=str(e)) | ||
|
||
|
||
# Note: This is here just for clarity. We could have just imported switch_step directly | ||
# They do the same thing, so we dont need to mock the switch_step function | ||
mock_switch_step = switch_step | ||
|
||
switch_step = activity.defn(name="switch_step")( | ||
switch_step if not testing else mock_switch_step | ||
) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,25 @@ | ||
from temporalio import activity | ||
|
||
from ...activities.task_steps.utils import simple_eval_dict | ||
from ...autogen.openapi_model import WaitForInputStep | ||
from ...common.protocol.tasks import StepContext, StepOutcome | ||
from ...env import testing | ||
|
||
|
||
async def wait_for_input_step(context: StepContext) -> StepOutcome: | ||
assert isinstance(context.current_step, WaitForInputStep) | ||
|
||
exprs = context.current_step.wait_for_input | ||
output = simple_eval_dict(exprs, values=context.model_dump()) | ||
|
||
result = StepOutcome(output=output) | ||
return result | ||
|
||
|
||
# Note: This is here just for clarity. We could have just imported wait_for_input_step directly | ||
# They do the same thing, so we dont need to mock the wait_for_input_step function | ||
mock_wait_for_input_step = wait_for_input_step | ||
|
||
wait_for_input_step = activity.defn(name="wait_for_input_step")( | ||
wait_for_input_step if not testing else mock_wait_for_input_step | ||
) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The
evaluate_step
function signature should include the specific step typeEvaluateStep
for thecontext
parameter to ensure type safety and clarity in the function's usage.