Skip to content

Commit

Permalink
Merge pull request #321 from jdcpni/devel
Browse files Browse the repository at this point in the history
Gating restored
  • Loading branch information
kmantel authored Jun 15, 2017
2 parents 089a2e1 + ef14aab commit d5216c3
Show file tree
Hide file tree
Showing 10 changed files with 254 additions and 24 deletions.
22 changes: 22 additions & 0 deletions .idea/PsyNeuLink.iml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions .idea/encodings.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions .idea/misc.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 8 additions & 0 deletions .idea/modules.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

21 changes: 21 additions & 0 deletions .idea/runConfigurations/_Scratch_Pad.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions .idea/vcs.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

16 changes: 7 additions & 9 deletions PsyNeuLink/Components/States/State.py
Original file line number Diff line number Diff line change
Expand Up @@ -1209,8 +1209,9 @@ def update(self, params=None, time_scale=TimeScale.TRIAL, context=None):
"""

# GET STATE-SPECIFIC PARAM_SPECS
# SET UP -------------------------------------------------------------------------------------------------------

# Get state-specific param_specs
try:
# Get State params
self.stateParams = params[self.paramsType]
Expand All @@ -1220,16 +1221,15 @@ def update(self, params=None, time_scale=TimeScale.TRIAL, context=None):
raise StateError("PROGRAM ERROR: paramsType not specified for {}".format(self.name))
#endregion

# FLAG FORMAT OF INPUT

# Flag format of input
if isinstance(self.value, numbers.Number):
# Treat as single real value
value_is_number = True
else:
# Treat as vector (list or np.array)
value_is_number = False

# AGGREGATE INPUT FROM PROJECTIONS
# AGGREGATE INPUT FROM PROJECTIONS -----------------------------------------------------------------------------

# Get type-specific params from PROJECTION_PARAMS
mapping_params = merge_param_dicts(self.stateParams, MAPPING_PROJECTION_PARAMS, PROJECTION_PARAMS)
Expand Down Expand Up @@ -1280,11 +1280,9 @@ def update(self, params=None, time_scale=TimeScale.TRIAL, context=None):
self.owner.name))
continue

# MODIFIED 6/10/17 OLD: [COMMENTED OUT TO TEST Gating]
sender_id = sender.owner._execution_id
if sender_id != self_id:
continue
# MODIFIED 6/10/17 END

# Only accept projections from a Process to which the owner Mechanism belongs
if isinstance(sender, ProcessInputState):
Expand Down Expand Up @@ -1328,7 +1326,7 @@ def update(self, params=None, time_scale=TimeScale.TRIAL, context=None):
mod_meta_param, mod_param_name, mod_param_value = _get_modulated_param(self, projection)
self._mod_proj_values[mod_meta_param].append(type_match(projection_value, type(mod_param_value)))

# AGGREGATE ModulatoryProjection VALUES
# AGGREGATE ModulatoryProjection VALUES -----------------------------------------------------------------------

# For each modulated parameter of the state's function,
# combine any values received from the relevant projections into a single modulation value
Expand All @@ -1342,15 +1340,15 @@ def update(self, params=None, time_scale=TimeScale.TRIAL, context=None):
else:
self.stateParams[FUNCTION_PARAMS].update({function_param: aggregated_mod_val})

# CALL STATE'S function TO GET ITS VALUE
# CALL STATE'S function TO GET ITS VALUE ----------------------------------------------------------------------
try:
# pass only function params (which implement the effects of any modulatory projections)
function_params = self.stateParams[FUNCTION_PARAMS]
except (KeyError, TypeError):
function_params = None
state_value = self._execute(function_params=function_params, context=context)

# ASSIGN VALUE
# ASSIGN VALUE ------------------------------------------------------------------------------------------------

# MODIFIED 6/11/17 OLD:
# # If self.value is a number, convert combined_values back to number
Expand Down
7 changes: 5 additions & 2 deletions PsyNeuLink/Components/System.py
Original file line number Diff line number Diff line change
Expand Up @@ -2759,8 +2759,11 @@ def show_graph(self,
for proj in projs:
if proj.receiver.owner == rcvr:
edge_name = proj.name
edge_shape = proj.matrix.shape
has_learning = proj.has_learning_projection
# edge_shape = proj.matrix.shape
try:
has_learning = proj.has_learning_projection
except AttributeError:
has_learning = None
edge_label = edge_name
#### CHANGE MADE HERE ###
# if rcvr is learning mechanism, draw arrow with learning color
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@

random_weight_matrix = lambda sender, receiver : random_matrix(sender, receiver, .2, -.1)

Gating_Mechanism = GatingMechanism(default_gating_policy=0.0,
Gating_Mechanism = GatingMechanism(default_gating_policy=1.0,
gating_signals=[
Hidden_Layer_1,
Hidden_Layer_2,
Expand Down Expand Up @@ -66,17 +66,17 @@
# matrix=FULL_CONNECTIVITY_MATRIX
# matrix=RANDOM_CONNECTIVITY_MATRIX
# # MODIFIED 6/11/17 OLD:
# matrix=Middle_Weights_matrix
# MODIFIED 6/11/17 NEW:
matrix={VALUE:Middle_Weights_matrix,
# FUNCTION:Linear,
FUNCTION:ConstantIntegrator,
FUNCTION_PARAMS:{
INITIALIZER:Middle_Weights_matrix,
RATE:Middle_Weights_matrix},
# FUNCTION:ConstantIntegrator(rate=Middle_Weights_matrix)
# MODULATION:ADDITIVE_PARAM
}
matrix=Middle_Weights_matrix
# # MODIFIED 6/11/17 NEW:
# matrix={VALUE:Middle_Weights_matrix,
# # FUNCTION:Linear,
# FUNCTION:ConstantIntegrator,
# FUNCTION_PARAMS:{
# INITIALIZER:Middle_Weights_matrix,
# RATE:Middle_Weights_matrix},
# # FUNCTION:ConstantIntegrator(rate=Middle_Weights_matrix)
# # MODULATION:ADDITIVE_PARAM
# }
# MODIFIED 6/11/17 END:
)

Expand Down Expand Up @@ -130,7 +130,7 @@
# stim_list = {Input_Layer:[[-1, 30]]}
# stim_list = {Input_Layer:[[-1, 30]]}
stim_list = {Input_Layer:[[-1, 30]],
Gating_Mechanism:[1.0]}
Gating_Mechanism:[0.0]}
target_list = {Output_Layer:[[0, 0, 1]]}


Expand Down Expand Up @@ -185,6 +185,7 @@ def show_target():
x.reportOutputPref = True
composition = x

# x.show_graph()
# x.show_graph(show_learning=True)

# from PsyNeuLink.Components.Mechanisms.AdaptiveMechanisms.GatingMechanisms.GatingMechanism \
Expand Down
161 changes: 161 additions & 0 deletions tests/mechanisms/test_gating_mechanism.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,161 @@
import numpy as np
from PsyNeuLink.Components.Projections.TransmissiveProjections.MappingProjection import MappingProjection

from PsyNeuLink.Components.Functions.Function import ConstantIntegrator, Logistic
from PsyNeuLink.Components.Mechanisms.AdaptiveMechanisms.GatingMechanisms.GatingMechanism import GatingMechanism
from PsyNeuLink.Components.Mechanisms.ProcessingMechanisms.TransferMechanism import TransferMechanism
from PsyNeuLink.Components.Process import process
from PsyNeuLink.Components.System import system
from PsyNeuLink.Globals.Keywords import FUNCTION, FUNCTION_PARAMS, INITIALIZER, LEARNING, RATE, SOFT_CLAMP, VALUE
from PsyNeuLink.Globals.Preferences.ComponentPreferenceSet import REPORT_OUTPUT_PREF, VERBOSE_PREF
from PsyNeuLink.Globals.TimeScale import CentralClock


def test_gating():
Input_Layer = TransferMechanism(
name='Input Layer',
function=Logistic,
default_input_value=np.zeros((2,))
)

Hidden_Layer_1 = TransferMechanism(
name='Hidden Layer_1',
function=Logistic(),
default_input_value=np.zeros((5,))
)

Hidden_Layer_2 = TransferMechanism(
name='Hidden Layer_2',
function=Logistic(),
default_input_value=[0, 0, 0, 0]
)

Output_Layer = TransferMechanism(
name='Output Layer',
function=Logistic,
default_input_value=[0, 0, 0]
)

Gating_Mechanism = GatingMechanism(
default_gating_policy=0.0,
gating_signals=[
Hidden_Layer_1,
Hidden_Layer_2,
Output_Layer,
]
)

Input_Weights_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5)
Middle_Weights_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4)
Output_Weights_matrix = (np.arange(4 * 3).reshape((4, 3)) + 1) / (4 * 3)

# TEST PROCESS.LEARNING WITH:
# CREATION OF FREE STANDING PROJECTIONS THAT HAVE NO LEARNING (Input_Weights, Middle_Weights and Output_Weights)
# INLINE CREATION OF PROJECTIONS (Input_Weights, Middle_Weights and Output_Weights)
# NO EXPLICIT CREATION OF PROJECTIONS (Input_Weights, Middle_Weights and
# Output_Weights)

# This projection will be used by the process below by referencing it in the process' pathway;
# note: sender and receiver args don't need to be specified
Input_Weights = MappingProjection(
name='Input Weights',
matrix=Input_Weights_matrix
)

# This projection will be used by the process below by assigning its sender and receiver args
# to mechanismss in the pathway
Middle_Weights = MappingProjection(
name='Middle Weights',
sender=Hidden_Layer_1,
receiver=Hidden_Layer_2,
matrix={
VALUE: Middle_Weights_matrix,
FUNCTION: ConstantIntegrator,
FUNCTION_PARAMS: {
INITIALIZER: Middle_Weights_matrix,
RATE: Middle_Weights_matrix
},
}
)

Output_Weights = MappingProjection(
name='Output Weights',
sender=Hidden_Layer_2,
receiver=Output_Layer,
matrix=Output_Weights_matrix
)

z = process(
default_input_value=[0, 0],
pathway=[
Input_Layer,
# The following reference to Input_Weights is needed to use it in the pathway
# since it's sender and receiver args are not specified in its
# declaration above
Input_Weights,
Hidden_Layer_1,
# No projection specification is needed here since the sender arg for Middle_Weights
# is Hidden_Layer_1 and its receiver arg is Hidden_Layer_2
# Middle_Weights,
Hidden_Layer_2,
# Output_Weights does not need to be listed for the same reason as Middle_Weights
# If Middle_Weights and/or Output_Weights is not declared above, then the process
# will assign a default for missing projection
# Output_Weights,
Output_Layer
],
clamp_input=SOFT_CLAMP,
learning=LEARNING,
learning_rate=1.0,
target=[0, 0, 1],
prefs={
VERBOSE_PREF: False,
REPORT_OUTPUT_PREF: True
}
)

g = process(
default_input_value=[1.0],
pathway=[Gating_Mechanism]
)

stim_list = {
Input_Layer: [[-1, 30]],
Gating_Mechanism: [1.0]
}
target_list = {
Output_Layer: [[0, 0, 1]]
}

def print_header():
print("\n\n**** TRIAL: ", CentralClock.trial)

def show_target():
i = s.input
t = s.targetInputStates[0].value
print('\nOLD WEIGHTS: \n')
print('- Input Weights: \n', Input_Weights.matrix)
print('- Middle Weights: \n', Middle_Weights.matrix)
print('- Output Weights: \n', Output_Weights.matrix)
print('\nSTIMULI:\n\n- Input: {}\n- Target: {}\n'.format(i, t))
print('ACTIVITY FROM OLD WEIGHTS: \n')
print('- Middle 1: \n', Hidden_Layer_1.value)
print('- Middle 2: \n', Hidden_Layer_2.value)
print('- Output:\n', Output_Layer.value)

s = system(
processes=[z, g],
targets=[0, 0, 1],
learning_rate=1.0
)

s.reportOutputPref = True
# s.show_graph(show_learning=True)

results = s.run(
num_executions=10,
inputs=stim_list,
targets=target_list,
call_before_trial=print_header,
call_after_trial=show_target,
)

0 comments on commit d5216c3

Please sign in to comment.