From 7ab4b0d2490dad0064dd23efcee647aa444f64d6 Mon Sep 17 00:00:00 2001 From: "jdc@princeton.edu" Date: Fri, 2 Mar 2018 09:11:53 -0500 Subject: [PATCH 1/5] =?UTF-8?q?=E2=80=A2=20Scripts/Examples=20=20=20Rumelh?= =?UTF-8?q?art=20Semantic=20Network:=20updated=20with=20step=20function?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Scripts/Examples/Reinforcement-Learning.py | 27 ++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/Scripts/Examples/Reinforcement-Learning.py b/Scripts/Examples/Reinforcement-Learning.py index 8b935ba0a9a..b1c85f43a6b 100644 --- a/Scripts/Examples/Reinforcement-Learning.py +++ b/Scripts/Examples/Reinforcement-Learning.py @@ -44,12 +44,31 @@ def print_header(system): def show_weights(): - print('Reward prediction weights: \n', action_selection.input_state.path_afferents[0].matrix) - print( - '\nAction selected: {}; predicted reward: {}'.format( + # print('Reward prediction weights: \n', action_selection.input_state.path_afferents[0].matrix) + # print( + # '\nAction selected: {}; predicted reward: {}'.format( + # np.nonzero(action_selection.output_state.value)[0][0], + # action_selection.output_state.value[np.nonzero(action_selection.output_state.value)][0] + # ) + + print('\n' + 'action_selection output: {} ' + 'comparator sample: {} ' + 'comparator target: {} ' + 'comparator target: {} ' + 'learning mech act in: {} ' + 'learning mech act out: {} ' + 'learning mech error in: {} ' + 'learning mech error out: {} ' + 'learning mech learning_sig: {} ' + 'predicted reward: {} '.format( + action_selection.output_state.value, + action_selection.output_state.efferents[1].receiver.owner.input_states[pnl.SAMPLE].value, + action_selection.output_state.efferents[1].receiver.owner.input_states[pnl.TARGET].value, + action_selection.afferents.value, np.nonzero(action_selection.output_state.value)[0][0], action_selection.output_state.value[np.nonzero(action_selection.output_state.value)][0] - ) + ) From 9c1cb68f8b60009fcd4c2838b21ce32388bbf4e2 Mon Sep 17 00:00:00 2001 From: "jdc@princeton.edu" Date: Fri, 2 Mar 2018 09:24:55 -0500 Subject: [PATCH 2/5] =?UTF-8?q?=E2=80=A2=20Scripts/Examples=20=20=20Rumelh?= =?UTF-8?q?art=20Semantic=20Network:=20updated=20with=20step=20function?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Scripts/Examples/Reinforcement-Learning.py | 39 ++++++++++++---------- 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/Scripts/Examples/Reinforcement-Learning.py b/Scripts/Examples/Reinforcement-Learning.py index b1c85f43a6b..b929ad09714 100644 --- a/Scripts/Examples/Reinforcement-Learning.py +++ b/Scripts/Examples/Reinforcement-Learning.py @@ -50,28 +50,31 @@ def show_weights(): # np.nonzero(action_selection.output_state.value)[0][0], # action_selection.output_state.value[np.nonzero(action_selection.output_state.value)][0] # ) - + assert True + comparator = action_selection.output_state.efferents[0].receiver.owner + learn_mech = action_selection.output_state.efferents[1].receiver.owner print('\n' - 'action_selection output: {} ' - 'comparator sample: {} ' - 'comparator target: {} ' - 'comparator target: {} ' - 'learning mech act in: {} ' - 'learning mech act out: {} ' - 'learning mech error in: {} ' - 'learning mech error out: {} ' - 'learning mech learning_sig: {} ' - 'predicted reward: {} '.format( + '\naction_selection output: {} ' + '\ncomparator sample: {} ' + '\ncomparator target: {} ' + '\nlearning mech act in: {} ' + '\nlearning mech act out: {} ' + '\nlearning mech error in: {} ' + '\nlearning mech error out: {} ' + '\nlearning mech learning_sig: {} ' + '\npredicted reward: {} '. + format( action_selection.output_state.value, - action_selection.output_state.efferents[1].receiver.owner.input_states[pnl.SAMPLE].value, - action_selection.output_state.efferents[1].receiver.owner.input_states[pnl.TARGET].value, - action_selection.afferents.value, - np.nonzero(action_selection.output_state.value)[0][0], - action_selection.output_state.value[np.nonzero(action_selection.output_state.value)][0] - + comparator.input_states[pnl.SAMPLE].value, + comparator.input_states[pnl.TARGET].value, + learn_mech.input_states[pnl.ACTIVATION_INPUT].value, + learn_mech.input_states[pnl.ACTIVATION_OUTPUT].value, + learn_mech.input_states[pnl.ERROR_SIGNAL].value, + learn_mech.output_states[pnl.ERROR_SIGNAL].value, + learn_mech.output_states[pnl.LEARNING_SIGNAL].value, + action_selection.output_state.value[np.nonzero(action_selection.output_state.value)][0]) ) - p.run( num_trials=10, inputs=[[[1, 1, 1]]], From 92ea382de18823efcb016d9ba079ceff919abe2a Mon Sep 17 00:00:00 2001 From: "jdc@princeton.edu" Date: Fri, 2 Mar 2018 13:42:21 -0500 Subject: [PATCH 3/5] - --- psyneulink/components/system.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/psyneulink/components/system.py b/psyneulink/components/system.py index 360b87dc05b..210e18c4c94 100644 --- a/psyneulink/components/system.py +++ b/psyneulink/components/system.py @@ -1627,6 +1627,8 @@ def build_dependency_sets_by_traversing_projections(sender_mech, process): # for now, however, assume this is not desired (i.e., only TERMINAL mechanisms # should project to ObjectiveMechanisms) and always replace internal # ObjectiveMechanism with projection from a LearningMechanism (if it is available) + # Otherwise: + # - include it in the graph obj_mech_replaced = False From 0142e9515f32a2a60dffa8538cf67f48fa675309 Mon Sep 17 00:00:00 2001 From: "jdc@princeton.edu" Date: Fri, 2 Mar 2018 13:56:44 -0500 Subject: [PATCH 4/5] =?UTF-8?q?=E2=80=A2=C2=A0Examples=20=20=20Rumelhart?= =?UTF-8?q?=20Semantic=20Network=20script?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../Examples/Rumelhart Semantic Network.py | 39 ++++++++++++++++++- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/Scripts/Examples/Rumelhart Semantic Network.py b/Scripts/Examples/Rumelhart Semantic Network.py index 05e1cc5d04a..e40b4b57bf0 100644 --- a/Scripts/Examples/Rumelhart Semantic Network.py +++ b/Scripts/Examples/Rumelhart Semantic Network.py @@ -9,7 +9,9 @@ # At present, it implements only the structure of the network, as shown below: # Semantic Network: -# _ +# _ +# R_STEP P_STEP Q_STEP A_STEP | Readout Processes +# | | / / _______| # REP PROP QUAL ACT | # \___\__/____/ | # | _ | Output Processes @@ -21,6 +23,16 @@ # It does not yet implement learning or testing. + +def step(variable,params,context): + if np.sum(variable)<.5: + out=0 + else: + out=1 + return(out) +Step=pnl.UserDefinedFunction(custom_function=step, + default_variable=np.zeros(4)) + #Processing Units: rep_in = pnl.TransferMechanism(size=10, name='REP_IN') rel_in = pnl.TransferMechanism(size=11, name='REL_IN') @@ -30,6 +42,10 @@ prop_out = pnl.TransferMechanism(size=12, function=pnl.Logistic, name='PROP_OUT') qual_out = pnl.TransferMechanism(size=13, function=pnl.Logistic, name='QUAL_OUT') act_out = pnl.TransferMechanism(size=14, function=pnl.Logistic, name='ACT_OUT') +r_step = pnl.ProcessingMechanism(size=10, function=Step, name='REP_STEP') +p_step = pnl.ProcessingMechanism(size=12, function=Step, name='PROP_STEP') +q_step = pnl.ProcessingMechanism(size=13, function=Step, name='QUAL_STEP') +a_step = pnl.ProcessingMechanism(size=14, function=Step, name='ACT_STEP') #Processes that comprise the System: # NOTE: this is one of several configuration of processes that can be used to construct the full network @@ -53,8 +69,27 @@ learning=pnl.LEARNING, name='REL_ACT_PROC') +rep_step_proc = pnl.Process(pathway=[rep_out, r_step], + name='REP_STEP_PROC') +act_step_proc = pnl.Process(pathway=[act_out, a_step], + name='ACT_STEP_PROC') +qual_step_proc = pnl.Process(pathway=[qual_out, q_step], + name='QUAL_STEP_PROC') +prop_step_proc = pnl.Process(pathway=[prop_out, p_step], + name='PROP_STEP_PROC') + + # The System: -S = pnl.System(processes=[rep_hidden_proc, rel_hidden_proc, rel_rep_proc, rel_prop_proc, rel_qual_proc, rel_act_proc]) +S = pnl.System(processes=[rep_hidden_proc, + rel_hidden_proc, + rel_rep_proc, + rel_prop_proc, + rel_qual_proc, + rel_act_proc, + rep_step_proc, + act_step_proc, + qual_step_proc, + prop_step_proc]) # Shows just the processing network: # S.show_graph(show_dimensions=True) From f2b842252b3554368e708eceea5ff058275ff2e1 Mon Sep 17 00:00:00 2001 From: "jdc@princeton.edu" Date: Fri, 2 Mar 2018 14:02:12 -0500 Subject: [PATCH 5/5] =?UTF-8?q?=E2=80=A2=C2=A0Examples=20=20=20Rumelhart?= =?UTF-8?q?=20Semantic=20Network=20script:=20=20=20=20=20=20=20eliminated?= =?UTF-8?q?=20explicit=20implementation=20of=20Step=20function;=20=20=20?= =?UTF-8?q?=20=20=20=20now=20assigns=20Python-defined=20step=20function=20?= =?UTF-8?q?directly?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Scripts/Examples/Rumelhart Semantic Network.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/Scripts/Examples/Rumelhart Semantic Network.py b/Scripts/Examples/Rumelhart Semantic Network.py index e40b4b57bf0..383302d4933 100644 --- a/Scripts/Examples/Rumelhart Semantic Network.py +++ b/Scripts/Examples/Rumelhart Semantic Network.py @@ -24,14 +24,12 @@ # It does not yet implement learning or testing. -def step(variable,params,context): +def step(variable): if np.sum(variable)<.5: out=0 else: out=1 return(out) -Step=pnl.UserDefinedFunction(custom_function=step, - default_variable=np.zeros(4)) #Processing Units: rep_in = pnl.TransferMechanism(size=10, name='REP_IN') @@ -42,10 +40,10 @@ def step(variable,params,context): prop_out = pnl.TransferMechanism(size=12, function=pnl.Logistic, name='PROP_OUT') qual_out = pnl.TransferMechanism(size=13, function=pnl.Logistic, name='QUAL_OUT') act_out = pnl.TransferMechanism(size=14, function=pnl.Logistic, name='ACT_OUT') -r_step = pnl.ProcessingMechanism(size=10, function=Step, name='REP_STEP') -p_step = pnl.ProcessingMechanism(size=12, function=Step, name='PROP_STEP') -q_step = pnl.ProcessingMechanism(size=13, function=Step, name='QUAL_STEP') -a_step = pnl.ProcessingMechanism(size=14, function=Step, name='ACT_STEP') +r_step = pnl.ProcessingMechanism(size=10, function=step, name='REP_STEP') +p_step = pnl.ProcessingMechanism(size=12, function=step, name='PROP_STEP') +q_step = pnl.ProcessingMechanism(size=13, function=step, name='QUAL_STEP') +a_step = pnl.ProcessingMechanism(size=14, function=step, name='ACT_STEP') #Processes that comprise the System: # NOTE: this is one of several configuration of processes that can be used to construct the full network