From 67c20085a1353acad6a78127dee3957f1cb8fce8 Mon Sep 17 00:00:00 2001 From: Eugene Khvedchenya Date: Tue, 7 May 2024 16:39:43 +0300 Subject: [PATCH] Cleanup --- src/super_gradients/common/object_names.py | 1 - src/super_gradients/common/registry/registry.py | 5 ----- .../default_yolo_nas_r_train_params.yaml | 9 ++------- 3 files changed, 2 insertions(+), 13 deletions(-) diff --git a/src/super_gradients/common/object_names.py b/src/super_gradients/common/object_names.py index ec12ff8e04..a66e6787db 100644 --- a/src/super_gradients/common/object_names.py +++ b/src/super_gradients/common/object_names.py @@ -150,7 +150,6 @@ class Optimizers: RMS_PROP_TF = "RMSpropTF" LAMB = "Lamb" LION = "Lion" - RADAM = "RAdam" class Callbacks: diff --git a/src/super_gradients/common/registry/registry.py b/src/super_gradients/common/registry/registry.py index 2a9eb3dce1..e303f3766f 100644 --- a/src/super_gradients/common/registry/registry.py +++ b/src/super_gradients/common/registry/registry.py @@ -177,11 +177,6 @@ def warn_if_deprecated(name: str, registry: dict): Optimizers.RMS_PROP: optim.RMSprop, } -try: - OPTIMIZERS[Optimizers.RADAM] = optim.RAdam -except (ImportError, AttributeError): - pass - TORCH_LR_SCHEDULERS = { "StepLR": torch.optim.lr_scheduler.StepLR, "LambdaLR": torch.optim.lr_scheduler.LambdaLR, diff --git a/src/super_gradients/recipes/training_hyperparams/default_yolo_nas_r_train_params.yaml b/src/super_gradients/recipes/training_hyperparams/default_yolo_nas_r_train_params.yaml index 0b77216655..d56f504527 100644 --- a/src/super_gradients/recipes/training_hyperparams/default_yolo_nas_r_train_params.yaml +++ b/src/super_gradients/recipes/training_hyperparams/default_yolo_nas_r_train_params.yaml @@ -31,8 +31,6 @@ optimizer: AdamW optimizer_params: weight_decay: 0.000001 -#clip_grad_norm: 1.0 - ema: True ema_params: decay: 0.9997 @@ -46,6 +44,7 @@ sync_bn: False # A batch with the largest loss will be visualized for train and valid loaders # Visualization images will be logged using configured logger phase_callbacks: [] +#phase_callbacks: # - ExtremeBatchOBBVisualizationCallback: # loss_to_monitor: "YoloNASRLoss/loss" # max: True @@ -56,7 +55,6 @@ phase_callbacks: [] # # post_prediction_callback: # _target_: super_gradients.training.models.detection_models.yolo_nas_r.yolo_nas_r_post_prediction_callback.YoloNASRPostPredictionCallback -# #output_device: cpu # score_threshold: 0.25 # pre_nms_max_predictions: 4096 # post_nms_max_predictions: 512 @@ -70,12 +68,12 @@ valid_metrics_list: include_classwise_ap: True post_prediction_callback: _target_: super_gradients.training.models.detection_models.yolo_nas_r.yolo_nas_r_post_prediction_callback.YoloNASRPostPredictionCallback - #output_device: cpu score_threshold: 0.1 pre_nms_max_predictions: 4096 post_nms_max_predictions: 512 nms_iou_threshold: 0.6 +# One can use COCO-style mAP implementation that sweeps over 0.5..0.95 thresholds and uses 101-point recall thresholds # - OBBDetectionMetrics_050_095: # num_cls: ${dataset_params.num_classes} # class_names: ${dataset_params.class_names} @@ -90,9 +88,6 @@ valid_metrics_list: pre_prediction_callback: -#metric_to_watch: 'YoloNASRLoss/loss' -#greater_metric_to_watch_is_better: False - metric_to_watch: 'mAP@0.50' greater_metric_to_watch_is_better: True