Skip to content

Commit

Permalink
Issue #332 Use log level to print less verbose logs on failed batch job
Browse files Browse the repository at this point in the history
  • Loading branch information
JohanKJSchreurs committed Feb 1, 2023
1 parent 024fe65 commit 29a1601
Show file tree
Hide file tree
Showing 2 changed files with 158 additions and 4 deletions.
50 changes: 46 additions & 4 deletions openeo/rest/job.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def list_results(self) -> dict:
def download_result(self, target: Union[str, Path] = None) -> Path:
"""
Download single job result to the target file path or into folder (current working dir by default).
Fails if there are multiple result files.
:param target: String or path where the file should be downloaded to.
Expand Down Expand Up @@ -136,14 +136,56 @@ def get_results(self) -> "JobResults":
"""
return JobResults(self)

def logs(self, offset=None) -> List[LogEntry]:
""" Retrieve job logs."""
def logs(
self, offset=None, log_level: Optional[int] = logging.ERROR
) -> List[LogEntry]:
"""Retrieve job logs.
:param offset: _description_, defaults to None
:param log_level:
Show only messages of this log level and higher levels, defaults to logging.ERROR
:return: a list of log entries
"""

# TODO: option to filter on level? Or move filtering functionality to a separate batch job logs class?
# See: https://github.com/Open-EO/openeo-python-client/issues/332
url = "/jobs/{}/logs".format(self.job_id)
logs = self.connection.get(url, params={'offset': offset}, expected_status=200).json()["logs"]
entries = [LogEntry(log) for log in logs]
log_level = BatchJob._normalize_log_level(log_level)
entries = [
LogEntry(log)
for log in logs
if BatchJob._string_to_log_level(log["level"]) >= log_level
]
return VisualList('logs', data=entries)

@classmethod
def _normalize_log_level(cls, log_level):
if log_level is None:
return logging.ERROR

if isinstance(log_level, str):
return cls._string_to_log_level(log_level)

return log_level

@staticmethod
def _string_to_log_level(internal_log_level: Optional[str]) -> Optional[str]:
if not internal_log_level:
return logging.ERROR

internal_log_level = internal_log_level.upper()
if internal_log_level in ["CRITICAL", "ERROR"]:
return logging.ERROR
elif internal_log_level == "WARNING":
return logging.WARNING
elif internal_log_level == "INFO":
return logging.INFO
elif internal_log_level == "DEBUG":
return logging.DEBUG

return logging.ERROR

def run_synchronous(
self, outputfile: Union[str, Path, None] = None,
print=print, max_poll_interval=60, connection_retry_interval=30
Expand Down
112 changes: 112 additions & 0 deletions tests/rest/test_job.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import json
import logging
import re
from pathlib import Path
from unittest import mock
Expand Down Expand Up @@ -264,6 +265,117 @@ def test_get_job_logs(session040, requests_mock):
assert log_entries[0].message == "error processing batch job"


# TODO: do we keep testing on sesssion040, or should we switch to con100?
def test_get_job_logs_returns_only_error_loglevel_by_default(session040, requests_mock):
requests_mock.get(
API_URL + "/jobs/f00ba5/logs",
json={
"logs": [
{
"id": "123abc",
"level": "error",
"message": "error processing batch job",
},
{
"id": "234abc",
"level": "debug",
"message": "Some debug info we want to filter out",
},
{
"id": "345abc",
"level": "info",
"message": "Some general info we want to filter out",
},
{
"id": "345abc",
"level": "warning",
"message": "Some warning we want to filter out",
},
]
},
)

log_entries = session040.job("f00ba5").logs()

assert len(log_entries) == 1
assert log_entries[0].message == "error processing batch job"


@pytest.mark.parametrize(
"log_level,exp_num_messages",
[
(None, 1),
(logging.ERROR, 1),
("error", 1),
("ERROR", 1),
(logging.WARNING, 2),
("warning", 2),
("WARNING", 2),
(logging.INFO, 3),
("INFO", 3),
("info", 3),
(logging.DEBUG, 4),
("DEBUG", 4),
("debug", 4),
],
)
def test_get_job_logs_keeps_loglevel_that_is_higher_or_equal(
session040, requests_mock, log_level, exp_num_messages
):
requests_mock.get(
API_URL + "/jobs/f00ba5/logs",
json={
"logs": [
{
"id": "123abc",
"level": "error",
"message": "error processing batch job",
},
{
"id": "234abc",
"level": "debug",
"message": "Some debug info we want to filter out",
},
{
"id": "345abc",
"level": "info",
"message": "Some general info we want to filter out",
},
{
"id": "345abc",
"level": "warning",
"message": "Some warning we want to filter out",
},
]
},
)

log_entries = session040.job("f00ba5").logs(log_level=log_level)
assert len(log_entries) == exp_num_messages


@pytest.mark.parametrize(
"log_level_in, expected_log_level",
[
(None, logging.ERROR),
(logging.ERROR, logging.ERROR),
("error", logging.ERROR),
("ERROR", logging.ERROR),
(logging.WARNING, logging.WARNING),
("warning", logging.WARNING),
("WARNING", logging.WARNING),
(logging.INFO, logging.INFO),
("INFO", logging.INFO),
("info", logging.INFO),
(logging.DEBUG, logging.DEBUG),
("DEBUG", logging.DEBUG),
("debug", logging.DEBUG),
],
)
def test_normalize_log_level(log_level_in, expected_log_level):
assert BatchJob._normalize_log_level(log_level_in) == expected_log_level


def test_create_job_100(con100, requests_mock):
def check_request(request):
assert request.json() == {
Expand Down

0 comments on commit 29a1601

Please sign in to comment.