From 47a59ec5fe036ebec6efccbd98b0baf0515b9e72 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 1 Feb 2024 17:28:16 -0500 Subject: [PATCH] tests/learning: Do not apply marks to a fixture (#2901) It doesn't transfer to tests using the fixture (and does nothing). Apply marks to parameters instead. Simplify model construction. Instead of a fixture that returns a constructor function, just use the constructor function directly. Signed-off-by: Jan Vesely --- tests/composition/test_learning.py | 60 ++++++++++++++---------------- 1 file changed, 28 insertions(+), 32 deletions(-) diff --git a/tests/composition/test_learning.py b/tests/composition/test_learning.py index 2cd856be390..69c48556e3b 100644 --- a/tests/composition/test_learning.py +++ b/tests/composition/test_learning.py @@ -12,10 +12,7 @@ from psyneulink.core.globals.keywords import Loss # from psyneulink.library.components.mechanisms.processing.objective.comparatormechanism import SSE, MSE, L0 -@pytest.mark.pytorch -@pytest.mark.composition -@pytest.fixture -def xor_network(): +def xor_network(comp_type, comp_learning_rate, pathway_learning_rate): """Create simple sample network for testing learning specifications Returns a function that takes a Composition type and learning_rate specifications and returns an instantiated Composition and its components @@ -39,39 +36,38 @@ def xor_network(): inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) targets = np.array([[0], [1], [1], [0]]) - def _get_comp_type(comp_type, comp_learning_rate, pathway_learning_rate): - if comp_type == 'composition': - xor = Composition(learning_rate=comp_learning_rate) - # Note: uses Projections specified above by inference - pathway = xor.add_backpropagation_learning_pathway(pathway=[input_layer,hidden_layer,output_layer], - learning_rate=pathway_learning_rate) - target_mechanism = pathway.learning_components[pnl.TARGET_MECHANISM] - elif comp_type == 'autodiff': - # FIX: the format commented out below doesn't work for LLVM: - # xor = pnl.AutodiffComposition(nodes=[input_layer,hidden_layer,output_layer]) - # xor.add_projections([input_to_hidden_wts, hidden_to_output_wts]) - xor = pnl.AutodiffComposition() - xor.add_node(input_layer) - xor.add_node(hidden_layer) - xor.add_node(output_layer) - xor.add_projection(sender=input_layer, projection=input_to_hidden_wts, receiver=hidden_layer) - xor.add_projection(sender=hidden_layer, projection=hidden_to_output_wts, receiver=output_layer) - target_mechanism = None - else: - assert False, f"Bad composition type parameter passed to xor_net fixture" - return xor, input_layer, hidden_layer, output_layer, target_mechanism, inputs, targets, - return _get_comp_type + if comp_type == 'composition': + xor = Composition(learning_rate=comp_learning_rate) + # Note: uses Projections specified above by inference + pathway = xor.add_backpropagation_learning_pathway(pathway=[input_layer,hidden_layer,output_layer], + learning_rate=pathway_learning_rate) + target_mechanism = pathway.learning_components[pnl.TARGET_MECHANISM] + elif comp_type == 'autodiff': + # FIX: the format commented out below doesn't work for LLVM: + # xor = pnl.AutodiffComposition(nodes=[input_layer,hidden_layer,output_layer]) + # xor.add_projections([input_to_hidden_wts, hidden_to_output_wts]) + xor = pnl.AutodiffComposition() + xor.add_node(input_layer) + xor.add_node(hidden_layer) + xor.add_node(output_layer) + xor.add_projection(sender=input_layer, projection=input_to_hidden_wts, receiver=hidden_layer) + xor.add_projection(sender=hidden_layer, projection=hidden_to_output_wts, receiver=output_layer) + target_mechanism = None + else: + assert False, f"Bad composition type parameter passed to xor_net fixture" + return xor, input_layer, hidden_layer, output_layer, target_mechanism, inputs, targets, class TestInputAndTargetSpecs: - @pytest.mark.pytorch + @pytest.mark.composition @pytest.mark.parametrize('input_type', ['dict', 'func', 'gen', 'gen_func']) - @pytest.mark.parametrize('exec_mode', [pnl.ExecutionMode.PyTorch, - pnl.ExecutionMode.LLVMRun, + @pytest.mark.parametrize('exec_mode', [pytest.param(pnl.ExecutionMode.PyTorch, marks=pytest.mark.pytorch), + pytest.param(pnl.ExecutionMode.LLVMRun, marks=pytest.mark.llvm), pnl.ExecutionMode.Python]) - @pytest.mark.parametrize('comp_type', ['composition', 'autodiff']) - def test_node_spec_types(self, xor_network, comp_type, input_type, exec_mode): + @pytest.mark.parametrize('comp_type', ['composition', + pytest.param('autodiff', marks=pytest.mark.pytorch)]) + def test_node_spec_types(self, comp_type, input_type, exec_mode): if comp_type == 'composition' and exec_mode != pnl.ExecutionMode.Python: pytest.skip(f"Execution mode {exec_mode} not relevant for Composition learn") @@ -1898,7 +1894,7 @@ def test_matrix_spec_and_learning_rate(self): ('learning_mech', .01, .02, .03, .04, [[0.63458688]]), ] @pytest.mark.parametrize('spec_types', spec_types, ids=[x[0] for x in spec_types]) - def test_different_learning_rate_specs_for_comp(self, xor_network, spec_types): + def test_different_learning_rate_specs_for_comp(self, spec_types): learning_mech_learning_rate = spec_types[1] learning_pathway_learning_rate = spec_types[2] composition_learning_rate = spec_types[3]