Skip to content

Commit

Permalink
add support for custom metrics
Browse files Browse the repository at this point in the history
  • Loading branch information
brimoor committed Dec 20, 2024
1 parent d458e19 commit ade1290
Show file tree
Hide file tree
Showing 3 changed files with 147 additions and 0 deletions.
68 changes: 68 additions & 0 deletions plugins/evaluation/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
|
"""
import json
from packaging.version import Version

from bson import json_util

Expand Down Expand Up @@ -43,6 +44,7 @@ def execute(self, ctx):
gt_field = kwargs.pop("gt_field")
eval_key = kwargs.pop("eval_key")
method = kwargs.pop("method")
metrics = kwargs.pop("metrics", None)

target_view = _get_target_view(ctx, target)
_, eval_type, _ = _get_evaluation_type(target_view, pred_field)
Expand All @@ -52,6 +54,18 @@ def execute(self, ctx):
# Remove None values
kwargs = {k: v for k, v in kwargs.items() if v is not None}

# Parse custom metrics
if metrics:
custom_metrics = {}
for metric in metrics:
operator = foo.get_operator(metric)
kwargs.pop(f"header|{metric}", None)
params = kwargs.pop(f"parameters|{metric}", None)
operator.parse_parameters(ctx, params)
custom_metrics[metric] = params

kwargs["custom_metrics"] = custom_metrics

if eval_type == "regression":
eval_fcn = target_view.evaluate_regressions
elif eval_type == "classification":
Expand Down Expand Up @@ -174,6 +188,10 @@ def evaluate_model(ctx, inputs):

_get_evaluation_method(eval_type, method).get_parameters(ctx, inputs)

# @todo can remove this if we require `fiftyone>=1.2.0`
if Version(fo.__version__) >= Version("1.2.0"):
_add_custom_metrics(ctx, inputs, eval_type, method)

return True


Expand Down Expand Up @@ -210,6 +228,56 @@ def _get_evaluation_type(view, pred_field):
return label_type, eval_type, methods


def _add_custom_metrics(ctx, inputs, eval_type, method):
supported_metrics = []
for operator in foo.list_operators(type="operator"):
if (
"metric" in operator.config.kwargs.get("tags", [])
and operator.config.kwargs.get("type", None) in (eval_type, None)
and operator.config.kwargs.get("method", None) in (method, None)
):
supported_metrics.append(operator)

if not supported_metrics:
return

metric_choices = types.Dropdown(multiple=True)
for operator in supported_metrics:
metric_choices.add_choice(
operator.uri,
label=operator.config.label,
description=operator.config.description,
)

inputs.list(
"metrics",
types.String(),
required=False,
default=None,
label="Custom metrics",
description="Optional custom metric(s) to compute",
view=metric_choices,
)

metrics = ctx.params.get("metrics", None)
if not metrics:
return

for metric in metrics:
operator = foo.get_operator(metric)
obj = types.Object()
obj.view(
f"header|{metric}",
types.Header(
label=f"{operator.config.label} parameters",
divider=True,
),
)
operator.get_parameters(ctx, obj)
if len(obj.properties) > 1:
inputs.define_property(f"parameters|{metric}", obj)


def _get_evaluation_method(eval_type, method):
if eval_type == "regression":
if method == "simple":
Expand Down
70 changes: 70 additions & 0 deletions plugins/metric-examples/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
"""
Example metrics.
| Copyright 2017-2024, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import fiftyone as fo
import fiftyone.operators as foo
import fiftyone.operators.types as types


class EvaluationMetric(foo.Operator):
def get_parameters(self, ctx, inputs):
pass

def parse_parameters(self, ctx, params):
pass

def compute(self, samples, eval_key, results, **kwargs):
raise NotImplementedError("Subclass must implement compute()")

def get_fields(self, samples, eval_key):
return []

def rename(self, samples, eval_key, new_eval_key):
pass

def cleanup(self, samples, eval_key):
pass


class ExampleMetric(EvaluationMetric):
@property
def config(self):
return foo.OperatorConfig(
name="example_metric",
label="Example metric",
description="This is an example metric",
tags=["metric"],
)

def get_parameters(self, ctx, inputs):
inputs.str("value", default="foo", required=True)

def compute(self, samples, eval_key, results, value="foo"):
dataset = samples._dataset
metric_field = f"{eval_key}_example_metric"
dataset.add_sample_field(metric_field, fo.StringField)
samples.set_field(metric_field, value).save()
return value

def get_fields(self, samples, eval_key):
metric_field = f"{eval_key}_example_metric"
return [metric_field]

def rename(self, samples, eval_key, new_eval_key):
dataset = samples._dataset
metric_field = f"{eval_key}_example_metric"
new_metric_field = f"{new_eval_key}_example_metric"
dataset.rename_sample_field(metric_field, new_metric_field)

def cleanup(self, samples, eval_key):
dataset = samples._dataset
metric_field = f"{eval_key}_example_metric"
dataset.delete_sample_field(metric_field, error_level=1)


def register(p):
p.register(ExampleMetric)
9 changes: 9 additions & 0 deletions plugins/metric-examples/fiftyone.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
name: "@voxel51/metric-examples"
description: Example metrics
version: 1.0.0
fiftyone:
version: ">=1.2.0"
url: https://github.com/voxel51/fiftyone-plugins/tree/main/plugins/metric-examples
license: Apache 2.0
panels:
- example_metric

0 comments on commit ade1290

Please sign in to comment.