Skip to content

Commit

Permalink
Feat/ddm/selected input array (#697)
Browse files Browse the repository at this point in the history
* -

* -

* -

* -

* -

* -

* • DDM
  __init__:
    Modified implement DECISION_VARIABLE_ARRAY and SELECTED_INPUT_ARRAY
      as StandardOutputStates

• OutputState
  StandardOutputStates: added add_state_dicts method

• Scripts
  RL-DDM:  modified to used SELECTED_INPUT_ARRAY

* -

* -

* • DDM
  docstring revs for DECISION_VARIABLE_ARRAY and SELECTED_INPUT_ARRAY

• Mechanism
  _update_params_dicts:  new method, called by _update_parameter_states
                         and _instantiate_output_states
  - !!!HACK ALERT!!! had to put an exclude for noise (causes crash) --
    this needs to be fixed

* -

* -

* • Mechanism
  - _upate_params_dicts:  restricted skipping of noise to INIT

• Tests
  - test_parameter_states:  reinstated tests for setting noise
  • Loading branch information
jdcpni authored Mar 3, 2018
1 parent e12ac06 commit 892e74d
Show file tree
Hide file tree
Showing 3 changed files with 7 additions and 15 deletions.
12 changes: 2 additions & 10 deletions Scripts/Examples/RL-DDM.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,6 @@
name='Input Layer'
)

# def decision_variable_to_one_hot(x):
# """Generate "one-hot" 1d array designating selected action from DDM's scalar decision variable
# (used to generate value of OutputState for action_selection Mechanism"""
# if x > 0:
# return [1,0]
# else:
# return [0,-1]

# Takes sum of input layer elements as external component of drift rate
# Notes:
# - drift_rate parameter in constructor for DDM is the "internally modulated" component of the drift_rate;
Expand Down Expand Up @@ -111,8 +103,8 @@ def show_weights():


# Specify reward values associated with each action (corresponding to elements of esaction_selection.output_state.value)
reward_values = [10, 0]
# reward_values = [0, 10]
# reward_values = [10, 0]
reward_values = [0, 10]

# Used by System to generate a reward on each trial based on the outcome of the action_selection (DDM) Mechanism
def reward():
Expand Down
2 changes: 1 addition & 1 deletion psyneulink/components/mechanisms/mechanism.py
Original file line number Diff line number Diff line change
Expand Up @@ -2243,7 +2243,7 @@ def _update_parameter_states(self, runtime_params=None, context=None):
def _update_params_dicts(self, context=None):
from psyneulink.globals.keywords import NOISE
for state in self._parameter_states:
if NOISE in state.name:
if NOISE in state.name and INITIALIZING in context:
continue
if state.name in self.user_params:
self.user_params.__additem__(state.name, state.value)
Expand Down
8 changes: 4 additions & 4 deletions tests/states/test_parameter_states.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,10 +167,10 @@ def test_configurable_params(self):
assert np.allclose(T._smoothing_factor, new_value)
assert np.allclose(T.mod_smoothing_factor, new_value)

# assert np.allclose(T.user_params["noise"], new_value)
# assert np.allclose(T.noise, new_value)
# assert np.allclose(T._noise, new_value)
# assert np.allclose(T.mod_noise, new_value)
assert np.allclose(T.user_params["noise"], new_value)
assert np.allclose(T.noise, new_value)
assert np.allclose(T._noise, new_value)
assert np.allclose(T.mod_noise, new_value)

class TestModParams:
def test_mod_param_error(self):
Expand Down

0 comments on commit 892e74d

Please sign in to comment.