diff --git a/fbgemm_gpu/codegen/training/python/__init__.template b/fbgemm_gpu/codegen/training/python/__init__.template index 66c7742829..f35092379f 100644 --- a/fbgemm_gpu/codegen/training/python/__init__.template +++ b/fbgemm_gpu/codegen/training/python/__init__.template @@ -6,22 +6,34 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -# All optimizers -import fbgemm_gpu.split_embedding_codegen_lookup_invokers.lookup_args as lookup_args # noqa: F401 -{%- for optim in all_optimizers %} -import fbgemm_gpu.split_embedding_codegen_lookup_invokers.lookup_{{ optim }} as lookup_{{optim}} # noqa: F401 -{%- endfor %} +import warnings -# SSD optimizers (putting them under try-except for BC as they are -# experimental ops which can be removed/updated in the future) +{%- macro force_import(name) %} +import fbgemm_gpu.split_embedding_codegen_lookup_invokers.{{ name }} as {{ name }} # noqa: F401 +{%- endmacro %} + +{%- macro try_import(name) %} try: - import fbgemm_gpu.split_embedding_codegen_lookup_invokers.lookup_args_ssd as lookup_args_ssd - {%- for optim in ssd_optimizers %} - import fbgemm_gpu.split_embedding_codegen_lookup_invokers.lookup_{{ optim }}_ssd as lookup_{{ optim }}_ssd - {%- endfor %} + # Import is placed under a try-except bc the op is experimental and can be + # removed/updated in the future + import fbgemm_gpu.split_embedding_codegen_lookup_invokers.{{ name }} as {{ name }} # noqa: F401 except: - import logging - logging.warn("fbgemm_gpu.split_embedding_codegen_lookup_invokers.lookup_args_ssd import failed") - {%- for optim in ssd_optims %} - logging.warn("fbgemm_gpu.split_embedding_codegen_lookup_invokers.lookup_{{ optim }}_ssd import failed") - {%- endfor %} + warnings.warn( + f"""\033[93m + Failed to import: fbgemm_gpu.split_embedding_codegen_lookup_invokers.{{ name }} + \033[0m""", + DeprecationWarning, + ) +{%- endmacro %} + +# TBE optimizers +{{- force_import("lookup_args") }} +{%- for optim in all_optimizers %} + {{ try_import("lookup_" + optim) }} +{%- endfor %} + +# SSD TBE optimizers +{{- try_import("lookup_args_ssd") }} +{%- for optim in ssd_optimizers %} + {{ try_import("lookup_" + optim + "_ssd") }} +{%- endfor %} diff --git a/fbgemm_gpu/fbgemm_gpu/config/__init__.py b/fbgemm_gpu/fbgemm_gpu/config/__init__.py index 5dcee0ed65..10d892c7fa 100644 --- a/fbgemm_gpu/fbgemm_gpu/config/__init__.py +++ b/fbgemm_gpu/fbgemm_gpu/config/__init__.py @@ -6,4 +6,4 @@ # pyre-strict -from .feature_list import FeatureGateName # noqa F401 +from .feature_list import FeatureGate, FeatureGateName # noqa F401 diff --git a/fbgemm_gpu/fbgemm_gpu/config/feature_list.py b/fbgemm_gpu/fbgemm_gpu/config/feature_list.py index 011446a376..3fab8c47d1 100644 --- a/fbgemm_gpu/fbgemm_gpu/config/feature_list.py +++ b/fbgemm_gpu/fbgemm_gpu/config/feature_list.py @@ -45,7 +45,32 @@ def foo(): """ + # Enable TBE V2 APIs TBE_V2 = auto() + # Enable Ensemble Rowwise Adagrad (D60189486 stack) + TBE_ENSEMBLE_ROWWISE_ADAGRAD = auto() + def is_enabled(self) -> bool: - return torch.ops.fbgemm.check_feature_gate_key(self.name) + return FeatureGate.is_enabled(self) + + +class FeatureGate: + """ + FBGEMM_GPU feature gate. + + This class exists because methods defined on enums cannot be invoked when + the enum is packaged into a model (the mechanism is unclear). + + **Code Example:** + + .. code-block:: python + + from deeplearning.fbgemm.fbgemm_gpu.config import FeatureGate, FeatureGateName + + FeatureGate.is_enabled(FeatureGateName.TBE_V2) + """ + + @classmethod + def is_enabled(cls, feature: FeatureGateName) -> bool: + return torch.ops.fbgemm.check_feature_gate_key(feature.name) diff --git a/fbgemm_gpu/include/fbgemm_gpu/config/feature_gates.h b/fbgemm_gpu/include/fbgemm_gpu/config/feature_gates.h index aedb3c5896..551e83d7ce 100644 --- a/fbgemm_gpu/include/fbgemm_gpu/config/feature_gates.h +++ b/fbgemm_gpu/include/fbgemm_gpu/config/feature_gates.h @@ -55,7 +55,9 @@ namespace fbgemm_gpu::config { /// UI. /// /// For OSS: The environment variable will be evaluated as f"FBGEMM_{ENUM}" -#define ENUMERATE_ALL_FEATURE_FLAGS X(TBE_V2) +#define ENUMERATE_ALL_FEATURE_FLAGS \ + X(TBE_V2) \ + X(TBE_ENSEMBLE_ROWWISE_ADAGRAD) // X(EXAMPLE_FEATURE_FLAG) /// @ingroup fbgemm-gpu-config diff --git a/fbgemm_gpu/test/config/feature_gate_test.py b/fbgemm_gpu/test/config/feature_gate_test.py index 07d7ab7b68..2fe1409d28 100644 --- a/fbgemm_gpu/test/config/feature_gate_test.py +++ b/fbgemm_gpu/test/config/feature_gate_test.py @@ -12,7 +12,7 @@ # pyre-fixme[21] import fbgemm_gpu -from fbgemm_gpu.config import FeatureGateName +from fbgemm_gpu.config import FeatureGate, FeatureGateName # pyre-fixme[16]: Module `fbgemm_gpu` has no attribute `open_source`. open_source: bool = getattr(fbgemm_gpu, "open_source", False) @@ -38,6 +38,11 @@ def test_feature_gates(self) -> None: with self.assertNotRaised(Exception): print(f"\n[OSS] Feature {feature.name} enabled: {feature.is_enabled()}") + with self.assertNotRaised(Exception): + print( + f"\n[OSS] Feature {feature.name} enabled: {FeatureGate.is_enabled(feature)}" + ) + @unittest.skipIf(open_source, "Not supported in open source") def test_feature_gates_fb(self) -> None: # pyre-fixme[16]