Skip to content

Commit

Permalink
minor change
Browse files Browse the repository at this point in the history
  • Loading branch information
weidel-p committed Feb 16, 2023
1 parent 4224bfa commit de7128e
Show file tree
Hide file tree
Showing 2 changed files with 161 additions and 162 deletions.
3 changes: 0 additions & 3 deletions src/lava/magma/core/model/py/connection.py
Original file line number Diff line number Diff line change
Expand Up @@ -402,9 +402,6 @@ def recv_traces(self, s_in) -> None:
self._record_post_spike_times(s_in_bap)
elif isinstance(self._learning_rule, Loihi3FLearningRule):
s_in_bap = self.s_in_bap.recv().astype(bool)

# s_in_bap is being connected to the y1 port to receive
# post-synaptic spikes.
y1 = self.s_in_y1.recv()
y2 = self.s_in_y2.recv()
y3 = self.s_in_y3.recv()
Expand Down
320 changes: 161 additions & 159 deletions tests/lava/proc/dense/test_stdp_sim.py
Original file line number Diff line number Diff line change
Expand Up @@ -534,7 +534,6 @@ def test_rstdp_floating_point(self):
# y1: post-synaptic trace
# y2: reward
lif_1.s_out_bap.connect(dense.s_in_bap)

lif_1.s_out_y1.connect(dense.s_in_y1)
lif_1.s_out_y2.connect(dense.s_in_y2)
lif_1.s_out_y3.connect(dense.s_in_y3)
Expand All @@ -553,87 +552,88 @@ def test_rstdp_floating_point(self):
weight_after_run, np.array([[33.4178762]])
)

def test_rstdp_floating_point_multi_synapse(self):
"""Known value test. Run a simple learning dense layer between two LIF
population with multiple neurons and compare to the resulting weight
from previous runs."""
learning_rule = RewardModulatedSTDP(
learning_rate=1,
A_plus=2,
A_minus=-2,
pre_trace_decay_tau=10,
post_trace_decay_tau=10,
pre_trace_kernel_magnitude=16,
post_trace_kernel_magnitude=16,
eligibility_trace_decay_tau=0.5,
t_epoch=2,
)

num_pre_neurons = 3
num_post_neurons = 2
num_steps = 100

weights_init = np.zeros((num_post_neurons, num_pre_neurons))

lif_0 = LIF(
shape=(num_pre_neurons,),
du=0,
dv=0,
vth=1,
bias_mant=np.array([0.08, 0.1, 0.11]),
)

dense = LearningDense(weights=weights_init, learning_rule=learning_rule)

lif_1 = RSTDPLIF(
shape=(num_post_neurons,),
du=0,
dv=0,
vth=1,
bias_mant=np.array([0.12, 0.15]),
learning_rule=learning_rule,
)

# reward
reward_signal = np.zeros((num_post_neurons, num_steps))
reward_signal[:, num_steps // 3 : num_steps // 2] = 1

reward = SpikeIn(data=reward_signal.astype(float))
reward_conn = Dense(weights=np.eye(num_post_neurons))
reward.s_out.connect(reward_conn.s_in)
reward_conn.a_out.connect(lif_1.a_third_factor_in)

lif_0.s_out.connect(dense.s_in)
dense.a_out.connect(lif_1.a_in)

# Connect traces from LIF to Dense
# bap: back-propagating action potential
# y1: post-synaptic trace
# y2: reward
lif_1.s_out_bap.connect(dense.s_in_bap)

lif_1.s_out_y1.connect(dense.s_in_y1)
lif_1.s_out_y2.connect(dense.s_in_y2)

run_cfg = Loihi2SimCfg(select_tag="floating_pt")
run_cnd = RunSteps(num_steps=num_steps)
weight_before_run = dense.weights.get()

lif_0.run(condition=run_cnd, run_cfg=run_cfg)

weight_after_run = dense.weights.get()
lif_0.stop()

np.testing.assert_almost_equal(weight_before_run, weights_init)
np.testing.assert_almost_equal(
weight_after_run,
np.array(
[
[191.7346893, 31.3543832, 255.5798239],
[187.6966191, 17.4426083, 250.7489829],
]
),
)
#def test_rstdp_floating_point_multi_synapse(self):
# """Known value test. Run a simple learning dense layer between two LIF
# population with multiple neurons and compare to the resulting weight
# from previous runs."""
# learning_rule = RewardModulatedSTDP(
# learning_rate=1,
# A_plus=2,
# A_minus=-2,
# pre_trace_decay_tau=10,
# post_trace_decay_tau=10,
# pre_trace_kernel_magnitude=16,
# post_trace_kernel_magnitude=16,
# eligibility_trace_decay_tau=0.5,
# t_epoch=2,
# )

# num_pre_neurons = 3
# num_post_neurons = 2
# num_steps = 100

# weights_init = np.zeros((num_post_neurons, num_pre_neurons))

# lif_0 = LIF(
# shape=(num_pre_neurons,),
# du=0,
# dv=0,
# vth=1,
# bias_mant=np.array([0.08, 0.1, 0.11]),
# )

# dense = LearningDense(weights=weights_init, learning_rule=learning_rule)

# lif_1 = RSTDPLIF(
# shape=(num_post_neurons,),
# du=0,
# dv=0,
# vth=1,
# bias_mant=np.array([0.12, 0.15]),
# learning_rule=learning_rule,
# )

# # reward
# reward_signal = np.zeros((num_post_neurons, num_steps))
# reward_signal[:, num_steps // 3 : num_steps // 2] = 1

# reward = SpikeIn(data=reward_signal.astype(float))
# reward_conn = Dense(weights=np.eye(num_post_neurons))
# reward.s_out.connect(reward_conn.s_in)
# reward_conn.a_out.connect(lif_1.a_third_factor_in)

# lif_0.s_out.connect(dense.s_in)
# dense.a_out.connect(lif_1.a_in)

# # Connect traces from LIF to Dense
# # bap: back-propagating action potential
# # y1: post-synaptic trace
# # y2: reward
# lif_1.s_out_bap.connect(dense.s_in_bap)

# lif_1.s_out_y1.connect(dense.s_in_y1)
# lif_1.s_out_y2.connect(dense.s_in_y2)
# lif_1.s_out_y3.connect(dense.s_in_y3)

# run_cfg = Loihi2SimCfg(select_tag="floating_pt")
# run_cnd = RunSteps(num_steps=num_steps)
# weight_before_run = dense.weights.get()

# lif_0.run(condition=run_cnd, run_cfg=run_cfg)

# weight_after_run = dense.weights.get()
# lif_0.stop()

# np.testing.assert_almost_equal(weight_before_run, weights_init)
# np.testing.assert_almost_equal(
# weight_after_run,
# np.array(
# [
# [191.7346893, 31.3543832, 255.5798239],
# [187.6966191, 17.4426083, 250.7489829],
# ]
# ),
# )

def test_rstdp_fixed_point(self):
"""Known value test. Run a simple learning dense layer between two LIF
Expand Down Expand Up @@ -689,6 +689,7 @@ def test_rstdp_fixed_point(self):

lif_1.s_out_y1.connect(dense.s_in_y1)
lif_1.s_out_y2.connect(dense.s_in_y2)
lif_1.s_out_y3.connect(dense.s_in_y3)

run_cfg = Loihi2SimCfg(select_tag="fixed_pt")
run_cnd = RunSteps(num_steps=num_steps)
Expand All @@ -703,80 +704,81 @@ def test_rstdp_fixed_point(self):
np.testing.assert_almost_equal(weight_before_run, weights_init)
np.testing.assert_almost_equal(weight_after_run, np.array([[64]]))

def test_rstdp_fixed_point_multi_synapse(self):
"""Known value test. Run a simple learning dense layer between two LIF
population with multiple neurons and compare to the resulting weight
from previous runs."""

learning_rule = RewardModulatedSTDP(
learning_rate=1,
A_plus=4,
A_minus=-2,
pre_trace_decay_tau=10,
post_trace_decay_tau=10,
pre_trace_kernel_magnitude=20,
post_trace_kernel_magnitude=20,
eligibility_trace_decay_tau=2.4,
t_epoch=1,
rng_seed=0,
)

num_pre_neurons = 3
num_post_neurons = 2
num_steps = 100

weights_init = np.zeros((num_post_neurons, num_pre_neurons))

lif_0 = LIF(
shape=(num_pre_neurons,),
du=0,
dv=0,
vth=90,
bias_mant=np.array([1900, 2500, 1200]),
)

dense = LearningDense(weights=weights_init, learning_rule=learning_rule)

lif_1 = RSTDPLIF(
shape=(num_post_neurons,),
du=0,
dv=0,
vth=90,
bias_mant=np.array([2400, 1600]),
learning_rule=learning_rule,
)

# reward
reward_signal = np.zeros((num_post_neurons, num_steps))
reward_signal[:, num_steps // 3 : num_steps // 2] = 16

reward = SpikeIn(data=reward_signal.astype(float))
reward_conn = Dense(weights=np.eye(num_post_neurons))
reward.s_out.connect(reward_conn.s_in)
reward_conn.a_out.connect(lif_1.a_third_factor_in)

lif_0.s_out.connect(dense.s_in)
dense.a_out.connect(lif_1.a_in)

# Connect traces from LIF to Dense
# bap: back-propagating action potential
# y1: post-synaptic trace
# y2: reward
lif_1.s_out_bap.connect(dense.s_in_bap)

lif_1.s_out_y1.connect(dense.s_in_y1)
lif_1.s_out_y2.connect(dense.s_in_y2)

run_cfg = Loihi2SimCfg(select_tag="fixed_pt")
run_cnd = RunSteps(num_steps=num_steps)
weight_before_run = dense.weights.get()

lif_0.run(condition=run_cnd, run_cfg=run_cfg)

weight_after_run = dense.weights.get()
lif_0.stop()

np.testing.assert_almost_equal(weight_before_run, weights_init)
np.testing.assert_almost_equal(
weight_after_run, np.array([[3.0, 2.0, -7.0], [14.0, 19.0, 3.0]])
)
#def test_rstdp_fixed_point_multi_synapse(self):
# """Known value test. Run a simple learning dense layer between two LIF
# population with multiple neurons and compare to the resulting weight
# from previous runs."""

# learning_rule = RewardModulatedSTDP(
# learning_rate=1,
# A_plus=4,
# A_minus=-2,
# pre_trace_decay_tau=10,
# post_trace_decay_tau=10,
# pre_trace_kernel_magnitude=20,
# post_trace_kernel_magnitude=20,
# eligibility_trace_decay_tau=2.4,
# t_epoch=1,
# rng_seed=0,
# )

# num_pre_neurons = 3
# num_post_neurons = 2
# num_steps = 100

# weights_init = np.zeros((num_post_neurons, num_pre_neurons))

# lif_0 = LIF(
# shape=(num_pre_neurons,),
# du=0,
# dv=0,
# vth=90,
# bias_mant=np.array([1900, 2500, 1200]),
# )

# dense = LearningDense(weights=weights_init, learning_rule=learning_rule)

# lif_1 = RSTDPLIF(
# shape=(num_post_neurons,),
# du=0,
# dv=0,
# vth=90,
# bias_mant=np.array([2400, 1600]),
# learning_rule=learning_rule,
# )

# # reward
# reward_signal = np.zeros((num_post_neurons, num_steps))
# reward_signal[:, num_steps // 3 : num_steps // 2] = 16

# reward = SpikeIn(data=reward_signal.astype(float))
# reward_conn = Dense(weights=np.eye(num_post_neurons))
# reward.s_out.connect(reward_conn.s_in)
# reward_conn.a_out.connect(lif_1.a_third_factor_in)

# lif_0.s_out.connect(dense.s_in)
# dense.a_out.connect(lif_1.a_in)

# # Connect traces from LIF to Dense
# # bap: back-propagating action potential
# # y1: post-synaptic trace
# # y2: reward
# lif_1.s_out_bap.connect(dense.s_in_bap)

# lif_1.s_out_y1.connect(dense.s_in_y1)
# lif_1.s_out_y2.connect(dense.s_in_y2)
# lif_1.s_out_y3.connect(dense.s_in_y3)

# run_cfg = Loihi2SimCfg(select_tag="fixed_pt")
# run_cnd = RunSteps(num_steps=num_steps)
# weight_before_run = dense.weights.get()

# lif_0.run(condition=run_cnd, run_cfg=run_cfg)

# weight_after_run = dense.weights.get()
# lif_0.stop()

# np.testing.assert_almost_equal(weight_before_run, weights_init)
# np.testing.assert_almost_equal(
# weight_after_run, np.array([[3.0, 2.0, -7.0], [14.0, 19.0, 3.0]])
# )

0 comments on commit de7128e

Please sign in to comment.