Skip to content

Commit

Permalink
Add a tool to help conduct experiments (#2651)
Browse files Browse the repository at this point in the history
* implement run and experiment

* implement experiment result aggregator

* refactor experiment.py

* refactor run.py

* get export model speed

* add var collumn

* refactor experiment.py

* refine a way to update argument in cmd

* refine resource tracker

* support anomaly on research framework

* refine code aggregating exp result

* bugfix

* make other task available

* eval task save avg_time_per_images as result

* Add new argument to track CPU&GPU utilization and memory usage (#2500)

* add argument to track resource usage

* fix bug

* fix a bug in a multi gpu case

* use total cpu usage

* add unit test

* add mark to unit test

* cover edge case

* add pynvml in requirement

* align with pre-commit

* add license comment

* update changelog

* refine argument help

* align with pre-commit

* add version to requirement and raise an error if not supported values are given

* apply new resource tracker format

* refactor run.py

* support optimize in research framework

* cover edge case

* Handle a case where fail cases exist

* make argparse raise error rather than exit if problem exist

* revert tensorboard aggregator

* bugfix

* save failed cases as yaml file

* deal with integer in variables

* add epoch to metric

* use latest log.json file

* align with otx logging method

* move experiment.py from cli to tools

* refactor experiment.py

* merge otx run feature into experiment.py

* move set_arguments_to_cmd definition into experiment.py

* refactor experiment.py

* bugfix

* minor bugfix

* use otx.cli instead of each otx entry

* add feature to parse single workspace

* add comments

* fix bugs

* align with pre-commit

* revert parser argument

* align with pre-commit
  • Loading branch information
eunwoosh authored Nov 20, 2023
1 parent 5f62d47 commit 9cf5624
Show file tree
Hide file tree
Showing 6 changed files with 827 additions and 8 deletions.
9 changes: 8 additions & 1 deletion src/otx/algorithms/classification/adapters/openvino/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,12 @@ def __init__(self, task_environment: TaskEnvironment):
self.inferencer = self.load_inferencer()
template_file_path = self.task_environment.model_template.model_template_path
self._base_dir = os.path.abspath(os.path.dirname(template_file_path))
self._avg_time_per_image: Optional[float] = None

@property
def avg_time_per_image(self) -> Optional[float]:
"""Average inference time per image."""
return self._avg_time_per_image

def load_inferencer(self) -> ClassificationOpenVINOInferencer:
"""load_inferencer function of ClassificationOpenVINOTask."""
Expand Down Expand Up @@ -270,7 +276,8 @@ def add_prediction(id: int, predicted_scene: AnnotationSceneEntity, aux_data: tu

self.inferencer.await_all()

logger.info(f"Avg time per image: {total_time/len(dataset)} secs")
self._avg_time_per_image = total_time / len(dataset)
logger.info(f"Avg time per image: {self._avg_time_per_image} secs")
logger.info(f"Total time: {total_time} secs")
logger.info("Classification OpenVINO inference completed")

Expand Down
9 changes: 8 additions & 1 deletion src/otx/algorithms/detection/adapters/openvino/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -387,13 +387,19 @@ def __init__(self, task_environment: TaskEnvironment):
self.confidence_threshold: float = 0.0
self.config = self.load_config()
self.inferencer = self.load_inferencer()
self._avg_time_per_image: Optional[float] = None
logger.info("OpenVINO task initialization completed")

@property
def hparams(self):
"""Hparams of OpenVINO Detection Task."""
return self.task_environment.get_hyper_parameters(DetectionConfig)

@property
def avg_time_per_image(self) -> Optional[float]:
"""Average inference time per image."""
return self._avg_time_per_image

def load_config(self) -> ADDict:
"""Load configurable parameters from model adapter.
Expand Down Expand Up @@ -557,7 +563,8 @@ def add_prediction(id: int, predicted_scene: AnnotationSceneEntity, aux_data: tu

self.inferencer.await_all()

logger.info(f"Avg time per image: {total_time/len(dataset)} secs")
self._avg_time_per_image = total_time / len(dataset)
logger.info(f"Avg time per image: {self._avg_time_per_image} secs")
logger.info(f"Total time: {total_time} secs")
logger.info("OpenVINO inference completed")
return dataset
Expand Down
9 changes: 8 additions & 1 deletion src/otx/algorithms/segmentation/adapters/openvino/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,7 @@ def __init__(self, task_environment: TaskEnvironment):
self.model = self.task_environment.model
self.model_name = self.task_environment.model_template.model_template_id
self.inferencer = self.load_inferencer()
self._avg_time_per_image: Optional[float] = None

labels = task_environment.get_labels(include_empty=False)
self._label_dictionary = dict(enumerate(labels, 1))
Expand All @@ -173,6 +174,11 @@ def hparams(self):
"""Hparams of OpenVINO Segmentation Task."""
return self.task_environment.get_hyper_parameters(SegmentationConfig)

@property
def avg_time_per_image(self) -> Optional[float]:
"""Average inference time per image."""
return self._avg_time_per_image

def load_inferencer(self) -> OpenVINOSegmentationInferencer:
"""load_inferencer function of OpenVINO Segmentation Task."""
if self.model is None:
Expand Down Expand Up @@ -248,7 +254,8 @@ def add_prediction(

self.inferencer.await_all()

logger.info(f"Avg time per image: {total_time/len(dataset)} secs")
self._avg_time_per_image = total_time / len(dataset)
logger.info(f"Avg time per image: {self._avg_time_per_image} secs")
logger.info(f"Total time: {total_time} secs")
logger.info("Segmentation OpenVINO inference completed")

Expand Down
9 changes: 8 additions & 1 deletion src/otx/algorithms/visual_prompting/tasks/openvino.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,7 @@ def __init__(self, task_environment: TaskEnvironment) -> None:
self.model = self.task_environment.model
self.model_name = self.task_environment.model_template.model_template_id
self.inferencer = self.load_inferencer()
self._avg_time_per_image: Optional[float] = None

labels = task_environment.get_labels(include_empty=False)
self._label_dictionary = dict(enumerate(labels, 1))
Expand All @@ -270,6 +271,11 @@ def hparams(self):
"""Hparams of OpenVINO Visual Prompting Task."""
return self.task_environment.get_hyper_parameters(VisualPromptingBaseConfig)

@property
def avg_time_per_image(self) -> Optional[float]:
"""Average inference time per image."""
return self._avg_time_per_image

def load_inferencer(self) -> OpenVINOVisualPromptingInferencer:
"""Load OpenVINO Visual Prompting Inferencer."""
if self.model is None:
Expand Down Expand Up @@ -328,7 +334,8 @@ def add_prediction(id: int, annotations: List[Annotation]):

self.inferencer.await_all()

logger.info(f"Avg time per image: {total_time/len(dataset)} secs")
self._avg_time_per_image = total_time / len(dataset)
logger.info(f"Avg time per image: {self._avg_time_per_image} secs")
logger.info(f"Total time: {total_time} secs")
logger.info("Visual Prompting OpenVINO inference completed")

Expand Down
8 changes: 4 additions & 4 deletions src/otx/cli/tools/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,11 +156,11 @@ def main():
print(resultset.performance)

output_path = Path(args.output) if args.output else config_manager.output_path
performance = {resultset.performance.score.name: resultset.performance.score.value}
if hasattr(task, "avg_time_per_image"):
performance["avg_time_per_image"] = task.avg_time_per_image
with open(output_path / "performance.json", "w", encoding="UTF-8") as write_file:
json.dump(
{resultset.performance.score.name: resultset.performance.score.value},
write_file,
)
json.dump(performance, write_file)

return dict(retcode=0, template=template.name)

Expand Down
Loading

0 comments on commit 9cf5624

Please sign in to comment.