Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

#26 Refine collecting logging outputs #89

Merged
merged 5 commits into from
Dec 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/sdk/python/rtdip_sdk/pipelines/logging/interfaces.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,5 @@

class LoggingBaseInterface(PipelineComponentBaseInterface):
@abstractmethod
def get_logs_as_df(self) -> DataFrame:
def get_logs_as_df(self, logger_name: str) -> DataFrame:
pass
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,17 @@
# limitations under the License.
import logging

import pandas
from pandas import DataFrame
from pyspark.sql import DataFrame as PySparkDataFrame, SparkSession
from datetime import datetime


from pyspark.sql.types import StructField, TimestampType, StringType, StructType, Row


class DataFrameLogHandler(logging.Handler):
"""
Handles logs from attached logger and stores them in a DataFrame at runtime
Uses the following format: {Timestamp, Logger Name, Logging Level, Log Message}

Args:
logging.Handler: Inherits from logging.Handler
Expand All @@ -37,25 +40,33 @@ class DataFrameLogHandler(logging.Handler):

"""

logs_df: DataFrame = None
logs_df: PySparkDataFrame = None
spark: SparkSession

def __init__(self):
self.logs_df = DataFrame(columns=["timestamp", "name", "level", "message"])
def __init__(self, spark: SparkSession):
self.spark = spark
schema = StructType(
[
StructField("timestamp", TimestampType(), True),
StructField("name", StringType(), True),
StructField("level", StringType(), True),
StructField("message", StringType(), True),
]
)

self.logs_df = self.spark.createDataFrame([], schema)
super().__init__()

def emit(self, record: logging.LogRecord) -> None:
"""Process and store a log record"""
log_entry = {
"timestamp": datetime.fromtimestamp(record.created),
"name": record.name,
"level": record.levelname,
"message": record.msg,
}

new_log_df_row = pandas.DataFrame(
log_entry, columns=["timestamp", "name", "level", "message"], index=[0]
new_log_entry = Row(
timestamp=datetime.fromtimestamp(record.created),
name=record.name,
level=record.levelname,
message=record.msg,
)
self.logs_df = pandas.concat([self.logs_df, new_log_df_row], ignore_index=True)

def get_logs_as_df(self) -> DataFrame:
self.logs_df = self.logs_df.union(self.spark.createDataFrame([new_log_entry]))

def get_logs_as_df(self) -> PySparkDataFrame:
return self.logs_df
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.
import logging

import pandas

from pandas import DataFrame
from datetime import datetime

Expand Down
Original file line number Diff line number Diff line change
@@ -1,13 +1,12 @@
import os

from pandas import DataFrame
from pandas.io.common import file_path_to_url
from pyspark.sql import SparkSession

from src.sdk.python.rtdip_sdk.pipelines._pipeline_utils.models import (
Libraries,
SystemType,
)
from src.sdk.python.rtdip_sdk.pipelines.logging.interfaces import LoggingBaseInterface

from src.sdk.python.rtdip_sdk.pipelines.logging.logger_manager import LoggerManager
from src.sdk.python.rtdip_sdk.pipelines.logging.spark.dataframe.dataframe_log_handler import (
DataFrameLogHandler,
Expand All @@ -17,19 +16,15 @@
)


class RuntimeLogCollector(LoggingBaseInterface):
class RuntimeLogCollector:
"""Collects logs from all loggers in the LoggerManager at runtime."""

logger_manager: LoggerManager = LoggerManager()
df_handler: DataFrameLogHandler = DataFrameLogHandler()

def __init__(self):
pass
spark: SparkSession

@classmethod
def get_logs_as_df(cls) -> DataFrame:
"""Return the DataFrame containing the logs"""
return cls.df_handler.get_logs_as_df()
def __init__(self, spark: SparkSession):
self.spark = spark

@staticmethod
def libraries():
Expand All @@ -44,24 +39,23 @@ def settings() -> dict:
def system_type() -> SystemType:
pass

@classmethod
def _attach_dataframe_handler_to_loggers(cls) -> None:
"""Attaches the DataFrameLogHandler to the logger."""

loggers = cls.logger_manager.get_all_loggers()

for logger in loggers.values():
# avoid duplicate handlers
if cls.df_handler not in logger.handlers:
logger.addHandler(cls.df_handler)
def _attach_dataframe_handler_to_logger(
self, logger_name: str
) -> DataFrameLogHandler:
"""Attaches the DataFrameLogHandler to the logger. Returns True if the handler was attached, False otherwise."""
logger = self.logger_manager.get_logger(logger_name)
df_log_handler = DataFrameLogHandler(self.spark)
if logger is not None:
if df_log_handler not in logger.handlers:
logger.addHandler(df_log_handler)
return df_log_handler

@classmethod
def _attach_file_handler_to_loggers(
cls, filename: str, path: str = ".", mode: str = "a"
self, filename: str, path: str = ".", mode: str = "a"
) -> None:
"""Attaches the FileLogHandler to the logger."""

loggers = cls.logger_manager.get_all_loggers()
loggers = self.logger_manager.get_all_loggers()
file_path = os.path.join(path, filename)
file_handler = FileLogHandler(file_path, mode)
for logger in loggers.values():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,11 @@
import os

import pytest

from pandas import DataFrame
from pyspark.sql import SparkSession

from src.sdk.python.rtdip_sdk.pipelines.logging.logger_manager import LoggerManager
from src.sdk.python.rtdip_sdk.pipelines.logging.spark.runtime_log_collector import (
RuntimeLogCollector,
)
Expand All @@ -38,20 +40,20 @@ def spark():
spark.stop()


def test_logger_manager_basic_function():
def test_logger_manager_basic_function(spark):
df = DataFrame()
monitor = IdentifyMissingDataInterval(
df=df,
interval="10s",
tolerance="500ms",
)
log_collector = RuntimeLogCollector()
log_collector = RuntimeLogCollector(spark)

assert monitor.logger_manager is log_collector.logger_manager


def test_df_output(spark, caplog):
log_collector = RuntimeLogCollector()
log_collector = RuntimeLogCollector(spark)
data = [
(1, "2024-02-11 00:00:00.000"),
(2, "2024-02-11 00:00:10.000"),
Expand All @@ -66,24 +68,64 @@ def test_df_output(spark, caplog):
]
columns = ["Index", "EventTime"]
df = spark.createDataFrame(data, schema=columns)

monitor = IdentifyMissingDataInterval(
df=df,
interval="10s",
tolerance="500ms",
)
log_collector._attach_dataframe_handler_to_loggers()
log_handler = log_collector._attach_dataframe_handler_to_logger(
"IdentifyMissingDataInterval"
)

with caplog.at_level(logging.INFO, logger="IdentifyMissingDataInterval"):
monitor.check()

result_df = log_handler.get_logs_as_df()

assert result_df.count() == 6


def test_unique_dataframes(spark, caplog):
log_collector = RuntimeLogCollector(spark)
data = [
(1, "2024-02-11 00:00:00.000"),
(2, "2024-02-11 00:00:10.000"),
(3, "2024-02-11 00:00:20.000"),
(4, "2024-02-11 00:00:36.000"), # Missing interval (20s to 36s)
(5, "2024-02-11 00:00:45.000"),
(6, "2024-02-11 00:00:55.000"),
(7, "2024-02-11 00:01:05.000"),
(8, "2024-02-11 00:01:15.000"),
(9, "2024-02-11 00:01:25.000"),
(10, "2024-02-11 00:01:41.000"), # Missing interval (25s to 41s)
]
columns = ["Index", "EventTime"]
df = spark.createDataFrame(data, schema=columns)
logger = LoggerManager().create_logger("Test_Logger")
monitor = IdentifyMissingDataInterval(
df=df,
interval="10s",
tolerance="500ms",
)
log_handler_identify_missing_data_interval = (
log_collector._attach_dataframe_handler_to_logger("IdentifyMissingDataInterval")
)

log_handler_test = log_collector._attach_dataframe_handler_to_logger("Test_Logger")

with caplog.at_level(logging.INFO, logger="IdentifyMissingDataInterval"):
monitor.check()

result_df = log_collector.get_logs_as_df()
result_df = log_handler_identify_missing_data_interval.get_logs_as_df()
result_df_test = log_handler_test.get_logs_as_df()

assert result_df.shape[0] == 6
assert result_df.count() != result_df_test.count()


def test_file_logging(spark, caplog):

log_collector = RuntimeLogCollector()
log_collector = RuntimeLogCollector(spark)
data = [
(1, "2024-02-11 00:00:00.000"),
(2, "2024-02-11 00:00:10.000"),
Expand Down
Loading