diff --git a/Scripts/Models (Under Development)/N-back/N-back MODULARIZED.py b/Scripts/Models (Under Development)/N-back/N-back MODULARIZED.py deleted file mode 100644 index 74dcf24e51d..00000000000 --- a/Scripts/Models (Under Development)/N-back/N-back MODULARIZED.py +++ /dev/null @@ -1,231 +0,0 @@ -import numpy as np -from psyneulink import * -# from psyneulink.core.scheduling.condition import When -from graph_scheduler import * - -# TODO: -# - from nback-paper: -# - get ffn weights -# - import stimulus generation code -# - retrain on full set of 1,2,3,4,5 back -# - validate against nback-paper results -# - DriftOnASphereIntegrator: fix for noise=0 -# - write test that compares DriftOnASphereIntegrator with spherical_drift code in nback-paper - -# FROM nback-paper: -# 'smtemp':8, -# 'stim_weight':0.05, -# 'hrate':0.04 -# SDIM = 20 -# indim = 2 * (CDIM + SDIM) -# hiddim = SDIM * 4 - -# TEST: -# Structural parameters: -NUM_TASKS=3 -# Test: -STIM_SIZE=1 -# Replicate model: -# STIM_SIZE=20 -# ---------- -CONTEXT_SIZE=25 -HIDDEN_SIZE=STIM_SIZE*4 - -# Execution parameters -# Test: -CONTEXT_DRIFT_RATE=.1 -CONTEXT_DRIFT_NOISE=.00000000001 -# Replicate model: -# CONTEXT_DRIFT_RATE=.25 -# CONTEXT_DRIFT_NOISE=.075 -# ---- -NUM_TRIALS=20 -NBACK=2 -TOLERANCE=.5 -STIM_WEIGHT=.05 -HAZARD_RATE=0.04 -SOFT_MAX_TEMP=1/8 - -# # MODEL: -# STIM_SIZE=25 -# CONTEXT_SIZE=20 -# CONTEXT_DRIFT_RATE=.25 -# CONTEXT_DRIFT_NOISE=.075 -# NUM_TRIALS = 25 - -def control_function(outcome): - """Evaluate response and set ControlSignal for EM[store_prob] accordingly. - - outcome[0] = ffn output - If ffn_output signifies a MATCH: - set EM[store_prob]=1 (as prep encoding stimulus in EM on next trial) - terminate trial - If ffn_output signifies a NON-MATCH: - set EM[store_prob]=0 (as prep for another retrieval from EM without storage) - continue trial - - Notes: - - outcome is passed as 2d array with a single 1d length 2 entry, such that output[0] = ffn output - - ffn output: [1,0]=MATCH, [0,1]=NON-MATCH - - return value is used by: - - control Mechanism to set ControlSignal for EM[store_prob] (per above) - - terminate_trial(), which is used by Condition specified as termination_processing for comp.run(), - to determine whether to end or continue trial - - """ - ffn_output = outcome[0] - if ffn_output[1] > ffn_output[0]: - return 1 - else: # NON-MATCH: - return 0 - return None - - -def terminate_trial(ctl_mech): - """Determine whether to continue or terminate trial. - Determination is made in control_function (assigned as function of control Mechanism): - - terminate if match or hazard rate is realized - - continue if non-match or hazard rate is not realized - """ - if ctl_mech.value==1 or np.random.random() > HAZARD_RATE: - return 1 # terminate - else: - return 0 # continue - - -def construct_model(num_tasks, stim_size, context_size, hidden_size, display=False): - - # Mechanisms: - stim = TransferMechanism(name='STIM', size=STIM_SIZE) - context = ProcessingMechanism(name='CONTEXT', - function=DriftOnASphereIntegrator( - initializer=np.random.random(CONTEXT_SIZE-1), - noise=CONTEXT_DRIFT_NOISE, - dimension=CONTEXT_SIZE)) - task = ProcessingMechanism(name="TASK", size=NUM_TASKS) - em = EpisodicMemoryMechanism(name='EPISODIC MEMORY (dict)', - # default_variable=[[0]*STIM_SIZE, [0]*CONTEXT_SIZE], - input_ports=[{NAME:"STIMULUS_FIELD", - SIZE:STIM_SIZE}, - {NAME:"CONTEXT_FIELD", - SIZE:CONTEXT_SIZE}], - function=ContentAddressableMemory( - initializer=[[[0]*STIM_SIZE, [0]*CONTEXT_SIZE]], - distance_field_weights=[STIM_WEIGHT, 1-STIM_WEIGHT], - equidistant_entries_select=NEWEST, - selection_function=SoftMax(output=MAX_INDICATOR, - gain=SOFT_MAX_TEMP)), - ) - stim_comparator = ComparatorMechanism(name='STIM COMPARATOR', - # sample=STIM_SIZE, target=STIM_SIZE - input_ports=[{NAME:"CURRENT_STIMULUS", SIZE:STIM_SIZE}, - {NAME:"RETRIEVED_STIMULUS", SIZE:STIM_SIZE}], - ) - context_comparator = ComparatorMechanism(name='CONTEXT COMPARATOR', - # sample=np.zeros(STIM_SIZE), - # target=np.zeros(CONTEXT_SIZE) - input_ports=[{NAME:"CURRENT_CONTEXT", SIZE:CONTEXT_SIZE}, - {NAME:"RETRIEVED_CONTEXT", SIZE:CONTEXT_SIZE}], - function=Distance(metric=COSINE)) - - # QUESTION: GET INFO ABOUT INPUT FUNCTIONS FROM ANDRE: - input_current_stim = TransferMechanism(size=STIM_SIZE, function=Linear, name="CURRENT STIMULUS") # function=Logistic) - input_current_context = TransferMechanism(size=STIM_SIZE, function=Linear, name="CURRENT CONTEXT") # function=Logistic) - input_retrieved_stim = TransferMechanism(size=STIM_SIZE, function=Linear, name="RETRIEVED STIMULUS") # function=Logistic) - input_retrieved_context = TransferMechanism(size=STIM_SIZE, function=Linear, name="RETRIEVED CONTEXT") # function=Logistic) - input_task = TransferMechanism(size=NUM_TASKS, function=Linear, name="CURRENT TASK") # function=Logistic) - hidden = TransferMechanism(size=HIDDEN_SIZE, function=Logistic, name="HIDDEN LAYER") - decision = ProcessingMechanism(size=2, name="DECISION LAYER") - - control = ControlMechanism(name="READ/WRITE CONTROLLER", - monitor_for_control=decision, - function=control_function, - control=(STORAGE_PROB, em),) - - # Compositions: - ffn = Composition([{input_current_stim, - input_current_context, - input_retrieved_stim, - input_retrieved_context, - input_task}, - hidden, decision], - name="WORKING MEMORY (fnn)") - comp = Composition(nodes=[stim, context, task, em, ffn, control], - name="N-back Model") - comp.add_projection(MappingProjection(), stim, input_current_stim) - comp.add_projection(MappingProjection(), context, input_current_context) - comp.add_projection(MappingProjection(), task, input_task) - comp.add_projection(MappingProjection(), em.output_ports["RETRIEVED_STIMULUS_FIELD"], input_retrieved_stim) - comp.add_projection(MappingProjection(), em.output_ports["RETRIEVED_CONTEXT_FIELD"], input_retrieved_context) - comp.add_projection(MappingProjection(), stim, em.input_ports["STIMULUS_FIELD"]) - comp.add_projection(MappingProjection(), context, em.input_ports["CONTEXT_FIELD"]) - comp.add_projection(MappingProjection(), decision, control) - - if display: - comp.show_graph() - # comp.show_graph(show_cim=True, - # show_node_structure=ALL, - # show_dimensions=True) - - # Execution: - - # Define a function that detects when the a Mechanism's value has converged, such that the change in all of the - # elements of its value attribute from the last execution (given by its delta attribute) falls below ``epsilon`` - # - # def converge(mech, thresh): - # return all(abs(v) <= thresh for v in mech.delta) - # - # # Add Conditions to the ``color_hidden`` and ``word_hidden`` Mechanisms that depend on the converge function: - # epsilon = 0.01 - # Stroop_model.scheduler.add_condition(color_hidden, When(converge, task, epsilon))) - # Stroop_model.scheduler.add_condition(word_hidden, When(converge, task, epsilon))) - return comp - - -def execute_model(model): - input_dict = {model.nodes['STIM']: np.array(list(range(NUM_TRIALS))).reshape(NUM_TRIALS,1)+1, - model.nodes['CONTEXT']:[[CONTEXT_DRIFT_RATE]]*NUM_TRIALS, - model.nodes['TASK']: np.array([[0,0,1]]*NUM_TRIALS)} - model.run(inputs=input_dict, - termination_processing={TimeScale.TRIAL: - Condition(terminate_trial, # termination function - model.nodes["READ/WRITE CONTROLLER"])}, # function arg - report_output=ReportOutput.ON - ) - -nback_model = construct_model(display=True) -execute_model(nback_model) - - -# TEST OF SPHERICAL DRIFT: -# stims = np.array([x[0] for x in em.memory]) -# contexts = np.array([x[1] for x in em.memory]) -# cos = Distance(metric=COSINE) -# dist = Distance(metric=EUCLIDEAN) -# diffs = [np.sum([contexts[i+1] - contexts[1]]) for i in range(NUM_TRIALS)] -# diffs_1 = [np.sum([contexts[i+1] - contexts[i]]) for i in range(NUM_TRIALS)] -# diffs_2 = [np.sum([contexts[i+2] - contexts[i]]) for i in range(NUM_TRIALS-1)] -# dots = [[contexts[i+1] @ contexts[1]] for i in range(NUM_TRIALS)] -# dot_diffs_1 = [[contexts[i+1] @ contexts[i]] for i in range(NUM_TRIALS)] -# dot_diffs_2 = [[contexts[i+2] @ contexts[i]] for i in range(NUM_TRIALS-1)] -# angle = [cos([contexts[i+1], contexts[1]]) for i in range(NUM_TRIALS)] -# angle_1 = [cos([contexts[i+1], contexts[i]]) for i in range(NUM_TRIALS)] -# angle_2 = [cos([contexts[i+2], contexts[i]]) for i in range(NUM_TRIALS-1)] -# euclidean = [dist([contexts[i+1], contexts[1]]) for i in range(NUM_TRIALS)] -# euclidean_1 = [dist([contexts[i+1], contexts[i]]) for i in range(NUM_TRIALS)] -# euclidean_2 = [dist([contexts[i+2], contexts[i]]) for i in range(NUM_TRIALS-1)] -# print("STIMS:", stims, "\n") -# print("DIFFS:", diffs, "\n") -# print("DIFFS 1:", diffs_1, "\n") -# print("DIFFS 2:", diffs_2, "\n") -# print("DOT PRODUCTS:", dots, "\n") -# print("DOT DIFFS 1:", dot_diffs_1, "\n") -# print("DOT DIFFS 2:", dot_diffs_2, "\n") -# print("ANGLE: ", angle, "\n") -# print("ANGLE_1: ", angle_1, "\n") -# print("ANGLE_2: ", angle_2, "\n") -# print("EUCILDEAN: ", euclidean, "\n") -# print("EUCILDEAN 1: ", euclidean_1, "\n") -# print("EUCILDEAN 2: ", euclidean_2, "\n") - -# n_back_model() diff --git a/Scripts/Models (Under Development)/N-back/N-back.py b/Scripts/Models (Under Development)/N-back/N-back.py deleted file mode 100644 index 6504493494a..00000000000 --- a/Scripts/Models (Under Development)/N-back/N-back.py +++ /dev/null @@ -1,537 +0,0 @@ -""" -This implements a model of the `N-back task `_ -described in `Beukers et al. (2022) `_. The model uses a simple implementation of episodic -(content-addressable) memory to store previous stimuli and the temporal context in which they occured, -and a feedforward neural network to evaluate whether the current stimulus is a match to the n'th preceding stimulus -(n-back level). This model is an example of proposed interactions between working memory (e.g., in neocortex) and -episodic memory e.g., in hippocampus and/or cerebellum) in the performance of tasks demanding of sequential processing -and control, and along the lines of models emerging machine learning that augment the use of recurrent neural networks -(e.g., long short-term memory mechanisms; LSTMs) for active memory and control with an external memory capable of -rapid storage and content-based retrieval, such as the Neural Turing Machine (NTN; `Graves et al., 2016 -`_), Episodic Planning Networks (EPN; `Ritter et al., 2020 -`_), and Emergent Symbols through Binding Networks (ESBN; `Webb et al., 2021 -`_). - -There are three primary methods in the script: - -* construct_model(args): - takes as arguments parameters used to construct the model; for convenience, defaults are defined below, - (under "Construction parameters") - -* train_network(args) - takes as arguments the feedforward neural network Composition (FFN_COMPOSITION) and number of epochs to train. - Note: learning_rate is set at construction (can specify using LEARNING_RATE under "Training parameters" below). - -* run_model() - takes the context drift rate to be applied on each trial and the number of trials to execute as args, as well as - reporting and animation specifications (see "Execution parameters" below). - -See "Settings for running the script" to specify whether the model is trained and/or executed when the script is run, -and whether a graphic display of the network is generated when it is constructed. - -TODO: - - from Andre - - network architecture; in particular, size of hidden layer and projection patterns to and from it - - the stim+context input vector (length 90) projects to a hidden layer (length 80); - - the task input vector (length 2) projects to a different hidden layer (length 80); - - those two hidden layers project (over fixed, nonlearnable, one-one-projections?) to a third hidden layer (length 80) that simply sums them; - - the third hidden layer projects to the length 2 output layer; - - a softmax is taken over the output layer to determine the response. - - fix: were biases trained? - - training: - - learning rate: 0.001; epoch: 1 trial per epoch of training - - fix: state_dict with weights (still needed) - - get empirical stimulus sequences (still needed) - - put N-back script (with pointer to latest version on PNL) in nback-paper repo - - fix: get rid of objective_mechanism (see "VERSION *WITHOUT* ObjectiveMechanism" under control(...) - - fix: warnings on run - - complete documentation in BeukersNbackModel.rst - - validate against nback-paper results - - after validation: - - try with STIM_SIZE = NUM_STIMS rather than 20 (as in nback-paper) - - refactor generate_stim_sequence() to use actual empirical stimulus sequences - - replace get_input_sequence and get_training_inputs with generators passed to nback_model.run() and ffn.learn - -""" - -from graph_scheduler import * - -from psyneulink import * -import numpy as np - -# Settings for running script: -TRAIN = True -RUN = True -DISPLAY_MODEL = False # show visual graphic of model - -# PARAMETERS ------------------------------------------------------------------------------------------------------- - -# Fixed (structural) parameters: -MAX_NBACK_LEVELS = 3 -NUM_STIM = 8 # number of different stimuli in stimulus set - QUESTION: WHY ISN"T THIS EQUAL TO STIM_SIZE OR VICE VERSA? -FFN_TRANSFER_FUNCTION = ReLU - -# Constructor parameters: (values are from nback-paper) -STIM_SIZE=8 # length of stimulus vector -CONTEXT_SIZE=25 # length of context vector -HIDDEN_SIZE=STIM_SIZE*4 # dimension of hidden units in ff -NBACK_LEVELS = [2,3] # Currently restricted to these -NUM_NBACK_LEVELS = len(NBACK_LEVELS) -CONTEXT_DRIFT_NOISE=0.0 # noise used by DriftOnASphereIntegrator (function of Context mech) -RANDOM_WEIGHTS_INITIALIZATION=RandomMatrix(center=0.0, range=0.1) # Matrix spec used to initialize all Projections -RETRIEVAL_SOFTMAX_TEMP=1/8 # express as gain # precision of retrieval process -RETRIEVAL_HAZARD_RATE=0.04 # rate of re=sampling of em following non-match determination in a pass through ffn -RETRIEVAL_STIM_WEIGHT=.05 # weighting of stimulus field in retrieval from em -RETRIEVAL_CONTEXT_WEIGHT = 1-RETRIEVAL_STIM_WEIGHT # weighting of context field in retrieval from em -DECISION_SOFTMAX_TEMP=1 - -# Training parameters: -NUM_EPOCHS= 6250 # nback-paper: 400,000 @ one trial per epoch = 6,250 @ 64 trials per epoch -LEARNING_RATE=0.01 # nback-paper: .001 - -# Execution parameters: -CONTEXT_DRIFT_RATE=.1 # drift rate used for DriftOnASphereIntegrator (function of Context mech) on each trial -NUM_TRIALS = 48 # number of stimuli presented in a trial sequence -REPORT_OUTPUT = ReportOutput.OFF # Sets console output during run -REPORT_PROGRESS = ReportProgress.OFF # Sets console progress bar during run -REPORT_LEARNING = ReportLearning.OFF # Sets console progress bar during training -ANIMATE = False # {UNIT:EXECUTION_SET} # Specifies whether to generate animation of execution - -# Names of Compositions and Mechanisms: -NBACK_MODEL = "N-back Model" -FFN_COMPOSITION = "WORKING MEMORY (fnn)" -FFN_STIMULUS_INPUT = "CURRENT STIMULUS" -FFN_CONTEXT_INPUT = "CURRENT CONTEXT" -FFN_STIMULUS_RETRIEVED = "RETRIEVED STIMULUS" -FFN_CONTEXT_RETRIEVED = "RETRIEVED CONTEXT" -FFN_TASK = "CURRENT TASK" -FFN_HIDDEN = "HIDDEN LAYER" -FFN_OUTPUT = "DECISION LAYER" -MODEL_STIMULUS_INPUT ='STIM' -MODEL_CONTEXT_INPUT = 'CONTEXT' -MODEL_TASK_INPUT = "TASK" -EM = "EPISODIC MEMORY (dict)" -CONTROLLER = "READ/WRITE CONTROLLER" - -# ======================================== MODEL CONSTRUCTION ========================================================= - -def construct_model(stim_size = STIM_SIZE, - context_size = CONTEXT_SIZE, - hidden_size = HIDDEN_SIZE, - num_nback_levels = NUM_NBACK_LEVELS, - context_drift_noise = CONTEXT_DRIFT_NOISE, - retrievel_softmax_temp = RETRIEVAL_SOFTMAX_TEMP, - retrieval_hazard_rate = RETRIEVAL_HAZARD_RATE, - retrieval_stimulus_weight = RETRIEVAL_STIM_WEIGHT, - retrieval_context_weight = RETRIEVAL_CONTEXT_WEIGHT, - decision_softmax_temp = DECISION_SOFTMAX_TEMP): - """Construct nback_model""" - - print(f"constructing '{FFN_COMPOSITION}'...") - - # FEED FORWARD NETWORK ----------------------------------------- - - # inputs: encoding of current stimulus and context, retrieved stimulus and retrieved context, - # output: decision: match [1,0] or non-match [0,1] - # Must be trained to detect match for specified task (1-back, 2-back, etc.) - input_current_stim = TransferMechanism(name=FFN_STIMULUS_INPUT, - size=stim_size, - function=FFN_TRANSFER_FUNCTION) - input_current_context = TransferMechanism(name=FFN_CONTEXT_INPUT, - size=context_size, - function=FFN_TRANSFER_FUNCTION) - input_retrieved_stim = TransferMechanism(name=FFN_STIMULUS_RETRIEVED, - size=stim_size, - function=FFN_TRANSFER_FUNCTION) - input_retrieved_context = TransferMechanism(name=FFN_CONTEXT_RETRIEVED, - size=context_size, - function=FFN_TRANSFER_FUNCTION) - input_task = TransferMechanism(name=FFN_TASK, - size=num_nback_levels, - function=FFN_TRANSFER_FUNCTION) - hidden = TransferMechanism(name=FFN_HIDDEN, - size=hidden_size, - function=FFN_TRANSFER_FUNCTION) - decision = ProcessingMechanism(name=FFN_OUTPUT, - size=2, function=SoftMax(output=MAX_INDICATOR, - gain=decision_softmax_temp)) - ffn = AutodiffComposition(([{input_current_stim, - input_current_context, - input_retrieved_stim, - input_retrieved_context, - input_task}, - hidden, decision], - RANDOM_WEIGHTS_INITIALIZATION, - ), - name=FFN_COMPOSITION, - learning_rate=LEARNING_RATE - ) - - # FULL MODEL (Outer Composition, including input, EM and control Mechanisms) ------------------------ - - print(f"'constructing {NBACK_MODEL}'...") - - # Stimulus Encoding: takes STIM_SIZE vector as input - stim = TransferMechanism(name=MODEL_STIMULUS_INPUT, size=stim_size) - - # Context Encoding: takes scalar as drift step for current trial - context = ProcessingMechanism(name=MODEL_CONTEXT_INPUT, - function=DriftOnASphereIntegrator( - initializer=np.random.random(context_size-1), - noise=context_drift_noise, - dimension=context_size)) - - # Task: task one-hot indicating n-back (1, 2, 3 etc.) - must correspond to what ffn has been trained to do - task = ProcessingMechanism(name=MODEL_TASK_INPUT, - size=num_nback_levels) - - # Episodic Memory: - # - entries: stimulus (field[0]) and context (field[1]); randomly initialized - # - uses Softmax to retrieve best matching input, subject to weighting of stimulus and context by STIM_WEIGHT - em = EpisodicMemoryMechanism(name=EM, - input_ports=[{NAME:"STIMULUS_FIELD", - SIZE:stim_size}, - {NAME:"CONTEXT_FIELD", - SIZE:context_size}], - function=ContentAddressableMemory( - initializer=[[[0]*stim_size, [0]*context_size]], - distance_field_weights=[retrieval_stimulus_weight, - retrieval_context_weight], - # equidistant_entries_select=NEWEST, - selection_function=SoftMax(output=MAX_INDICATOR, - gain=retrievel_softmax_temp)), - ) - - # Control Mechanism - # Ensures current stimulus and context are only encoded in EM once (at beginning of trial) - # by controlling the storage_prob parameter of em: - # - if outcome of decision signifies a match or hazard rate is realized: - # - set EM[store_prob]=1 (as prep encoding stimulus in EM on next trial) - # - this also serves to terminate trial (see nback_model.termination_processing condition) - # - if outcome of decision signifies a non-match - # - set EM[store_prob]=0 (as prep for another retrieval from EM without storage) - # - continue trial - control = ControlMechanism(name=CONTROLLER, - default_variable=[[1]], # Ensure EM[store_prob]=1 at beginning of first trial - # --------- - # VERSION *WITH* ObjectiveMechanism: - objective_mechanism=ObjectiveMechanism(name="OBJECTIVE MECHANISM", - monitor=decision, - # Outcome=1 if match, else 0 - function=lambda x: int(x[0][1]>x[0][0])), - # Set ControlSignal for EM[store_prob] - function=lambda outcome: int(bool(outcome) - or (np.random.random() > retrieval_hazard_rate)), - # --------- - # # VERSION *WITHOUT* ObjectiveMechanism: - # monitor_for_control=decision, - # # Set Evaluate outcome and set ControlSignal for EM[store_prob] - # # - outcome is received from decision as one hot in the form: [[match, no-match]] - # function=lambda outcome: int(int(outcome[0][1]>outcome[0][0]) - # or (np.random.random() > retrieval_hazard_rate)), - # --------- - control=(STORAGE_PROB, em)) - - nback_model = Composition(name=NBACK_MODEL, - nodes=[stim, context, task, ffn, em, control], - # Terminate trial if value of control is still 1 after first pass through execution - termination_processing={TimeScale.TRIAL: And(Condition(lambda: control.value), - AfterPass(0, TimeScale.TRIAL))}, - ) - # # Terminate trial if value of control is still 1 after first pass through execution - # # FIX: ALL OF THE FOLLOWING STOP AFTER ~ NUMBER OF TRIALS (?90+); SHOULD BE: NUM_TRIALS*NUM_NBACK_LEVELS + 1 - # nback_model.scheduler.add_condition(nback_model, And(Condition(lambda: control.value), AfterPass(0, TimeScale.TRIAL))) - # nback_model.scheduler.termination_conds = ({TimeScale.TRIAL: And(Condition(lambda: control.value), - # AfterPass(0, TimeScale.TRIAL))}) - # nback_model.scheduler.termination_conds.update({TimeScale.TRIAL: And(Condition(lambda: control.value), - # AfterPass(0, TimeScale.TRIAL))}) - nback_model.add_projection(MappingProjection(), stim, input_current_stim) - nback_model.add_projection(MappingProjection(), context, input_current_context) - nback_model.add_projection(MappingProjection(), task, input_task) - nback_model.add_projection(MappingProjection(), em.output_ports["RETRIEVED_STIMULUS_FIELD"], input_retrieved_stim) - nback_model.add_projection(MappingProjection(), em.output_ports["RETRIEVED_CONTEXT_FIELD"], input_retrieved_context) - nback_model.add_projection(MappingProjection(), stim, em.input_ports["STIMULUS_FIELD"]) - nback_model.add_projection(MappingProjection(), context, em.input_ports["CONTEXT_FIELD"]) - - if DISPLAY_MODEL: - nback_model.show_graph( - # show_cim=True, - # show_node_structure=ALL, - # show_dimensions=True - ) - - print(f'full model constructed') - return nback_model - -# ==========================================STIMULUS GENERATION ======================================================= -# Based on nback-paper - -def get_stim_set(num_stim=STIM_SIZE): - """Construct an array of stimuli for use an experiment""" - # For now, use one-hots - return np.eye(num_stim) - -def get_task_input(nback_level): - """Construct input to task Mechanism for a given nback_level, used by run_model() and train_network()""" - task_input = list(np.zeros_like(NBACK_LEVELS)) - task_input[nback_level-NBACK_LEVELS[0]] = 1 - return task_input - -def get_run_inputs(model, nback_level, context_drift_rate, num_trials): - """Construct set of stimulus inputs for run_model()""" - - def generate_stim_sequence(nback_level, trial_num, trial_type=0, num_stim=NUM_STIM, num_trials=NUM_TRIALS): - assert nback_level in {2,3} # At present, only 2- and 3-back levels are supported - - def gen_subseq_stim(): - A = np.random.randint(0,num_stim) - B = np.random.choice( - np.setdiff1d(np.arange(num_stim),[A]) - ) - C = np.random.choice( - np.setdiff1d(np.arange(num_stim),[A,B]) - ) - X = np.random.choice( - np.setdiff1d(np.arange(num_stim),[A,B]) - ) - return A,B,C,X - - def generate_match_no_foils_sequence(nback_level,trial_num): - # AXA (2-back) or ABXA (3-back) - seq = np.random.randint(0,num_stim,num_trials) - A,B,C,X = gen_subseq_stim() - # - if nback_level==2: - subseq = [A,X,A] - elif nback_level==3: - subseq = [A,B,X,A] - seq[trial_num-(nback_level+1):trial_num] = subseq - return seq[:trial_num] - - def generate_non_match_no_foils_sequence(nback_level,trial_num): - # AXB (2-back) or ABXC (3-back) - seq = np.random.randint(0,num_stim,num_trials) - A,B,C,X = gen_subseq_stim() - # - if nback_level==2: - subseq = [A,X,B] - elif nback_level==3: - subseq = [A,B,X,C] - seq[trial_num-(nback_level+1):trial_num] = subseq - return seq[:trial_num] - - def generate_match_with_foil_sequence(nback_level,trial_num): - # AAA (2-back) or AAXA (3-back) - seq = np.random.randint(0,num_stim,num_trials) - A,B,C,X = gen_subseq_stim() - # - if nback_level==2: - subseq = [A,A,A] - elif nback_level==3: - subseq = [A,A,X,A] - seq[trial_num-(nback_level+1):trial_num] = subseq - return seq[:trial_num] - - def generate_non_match_with_foil_sequence(nback_level,trial_num): - # XAA (2-back) or ABXB (3-back) - seq = np.random.randint(0,num_stim,num_trials) - A,B,C,X = gen_subseq_stim() - # - if nback_level==2: - subseq = [X,A,A] - elif nback_level==3: - subseq = [A,B,X,B] - seq[trial_num-(nback_level+1):trial_num] = subseq - return seq[:trial_num] - - trial_types = [generate_match_no_foils_sequence, - generate_match_with_foil_sequence, - generate_non_match_no_foils_sequence, - generate_non_match_with_foil_sequence] - stim_seq = trial_types[trial_type](nback_level,trial_num) - # ytarget = [1,1,0,0][trial_type] - # ctxt = spherical_drift(trial_num) - # return stim,ctxt,ytarget - return stim_seq - - # def stim_set_generation(nback_level, num_trials): - # stim_sequence = [] - # # for seq_int, trial in itertools.product(range(4),np.arange(5,trials)): # This generates all length sequences - # for trial_type, trial_num in itertools.product(range(4),[num_trials]): # This generates only longest seq ( - # # num_trials) - # return stim_sequence.append(generate_stim_sequence(nback_level, trial_num, trial_type=trial_type, trials=num_trials)) - - def get_input_sequence(nback_level, num_trials=NUM_TRIALS): - """Get sequence of inputs for a run""" - input_set = get_stim_set() - # Construct sequence of stimulus indices - trial_seq = generate_stim_sequence(nback_level, num_trials) - # Return list of corresponding stimulus input vectors - return [input_set[trial_seq[i]] for i in range(num_trials)] - - return {model.nodes[MODEL_STIMULUS_INPUT]: get_input_sequence(nback_level, num_trials), - model.nodes[MODEL_CONTEXT_INPUT]: [[context_drift_rate]]*num_trials, - model.nodes[MODEL_TASK_INPUT]: [get_task_input(nback_level)]*num_trials} - -def get_training_inputs(network, num_epochs, nback_levels): - """Construct set of training stimuli used by ffn.learn() in train_network() - Construct one example of each condition: - match: stim_current = stim_retrieved and context_current = context_retrieved - stim_lure: stim_current = stim_retrieved and context_current != context_retrieved - context_lure: stim_current != stim_retrieved and context_current == context_retrieved - non_lure: stim_current != stim_retrieved and context_current != context_retrieved - """ - assert is_iterable(nback_levels) and all([0", - "image/svg+xml": "\n\n\n\n\n\nN-back Model\n\nN-back Model\n\ncluster_WORKING MEMORY (fnn)\n\nWORKING MEMORY (fnn)\n\n\n\nTASK\n\nTASK\n\n\n\nCURRENT TASK\n\nCURRENT TASK\n\n\n\nTASK->CURRENT TASK\n\n\n\n\n\nCONTEXT\n\nCONTEXT\n\n\n\nCURRENT CONTEXT\n\nCURRENT CONTEXT\n\n\n\nCONTEXT->CURRENT CONTEXT\n\n\n\n\n\nEPISODIC MEMORY (dict)\n\nEPISODIC MEMORY (dict)\n\n\n\nCONTEXT->EPISODIC MEMORY (dict)\n\n\n\n\n\nSTIM\n\nSTIM\n\n\n\nCURRENT STIMULUS\n\nCURRENT STIMULUS\n\n\n\nSTIM->CURRENT STIMULUS\n\n\n\n\n\nSTIM->EPISODIC MEMORY (dict)\n\n\n\n\n\nHIDDEN LAYER\n\nHIDDEN LAYER\n\n\n\nCURRENT TASK->HIDDEN LAYER\n\n\n\n\n\nCURRENT STIMULUS->HIDDEN LAYER\n\n\n\n\n\nCURRENT CONTEXT->HIDDEN LAYER\n\n\n\n\n\nRETRIEVED CONTEXT\n\nRETRIEVED CONTEXT\n\n\n\nEPISODIC MEMORY (dict)->RETRIEVED CONTEXT\n\n\n\n\n\nRETRIEVED STIMULUS\n\nRETRIEVED STIMULUS\n\n\n\nEPISODIC MEMORY (dict)->RETRIEVED STIMULUS\n\n\n\n\n\nRETRIEVED CONTEXT->HIDDEN LAYER\n\n\n\n\n\nRETRIEVED STIMULUS->HIDDEN LAYER\n\n\n\n\n\nREAD/WRITE CONTROLLER\n\nREAD/WRITE CONTROLLER\n\n\n\nREAD/WRITE CONTROLLER->EPISODIC MEMORY (dict)\n\n\n\n\n\n\nOBJECTIVE MECHANISM\n\nOBJECTIVE MECHANISM\n\n\n\nOBJECTIVE MECHANISM->READ/WRITE CONTROLLER\n\n\n\n\n\nDECISION LAYER\n\nDECISION LAYER\n\n\n\nDECISION LAYER->OBJECTIVE MECHANISM\n\n\n\n\n\nHIDDEN LAYER->DECISION LAYER\n\n\n\n\n\n" - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "nback_model.show_graph(output_fmt='jupyter')" - ] - }, - { - "cell_type": "markdown", - "source": [ - "### Train the model:" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%% md\n" - } - } - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "ffn = nback_model.nodes['WORKING MEMORY (fnn)']\n", - "train_network(ffn, num_epochs=100)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Run the model:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "run_model(nback_model)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 1 -} \ No newline at end of file diff --git a/Scripts/Models (Under Development)/N-back/Nback.py b/Scripts/Models (Under Development)/N-back/Nback.py deleted file mode 100644 index abd8173c02a..00000000000 --- a/Scripts/Models (Under Development)/N-back/Nback.py +++ /dev/null @@ -1,507 +0,0 @@ -""" -This implements a model of the `N-back task `_ -described in `Beukers et al. (2022) `_. The model uses a simple implementation of episodic -(content-addressable) memory to store previous stimuli and the temporal context in which they occured, -and a feedforward neural network to evaluate whether the current stimulus is a match to the n'th preceding stimulus -(n-back level). This model is an example of proposed interactions between working memory (e.g., in neocortex) and -episodic memory e.g., in hippocampus and/or cerebellum) in the performance of tasks demanding of sequential processing -and control, and along the lines of models emerging machine learning that augment the use of recurrent neural networks -(e.g., long short-term memory mechanisms; LSTMs) for active memory and control with an external memory capable of -rapid storage and content-based retrieval, such as the Neural Turing Machine (NTN; `Graves et al., 2016 -`_), Episodic Planning Networks (EPN; `Ritter et al., 2020 -`_), and Emergent Symbols through Binding Networks (ESBN; `Webb et al., 2021 -`_). - -There are three primary methods in the script: - -* construct_model(args): - takes as arguments parameters used to construct the model; for convenience, defaults are defined below, - (under "Construction parameters") - -* train_network(args) - takes as arguments the feedforward neural network Composition (FFN_COMPOSITION) and number of epochs to train. - Note: learning_rate is set at construction (can specify using LEARNING_RATE under "Training parameters" below). - -* run_model() - takes the context drift rate to be applied on each trial and the number of trials to execute as args, as well as - reporting and animation specifications (see "Execution parameters" below). - -See "Settings for running the script" to specify whether the model is trained and/or executed when the script is run, -and whether a graphic display of the network is generated when it is constructed. - -TODO: - - from Andre - - network architecture; in particular, size of hidden layer and projection patterns to and from it - - the stim+context input vector (length 90) projects to a hidden layer (length 80); - - the task input vector (length 2) projects to a different hidden layer (length 80); - - those two hidden layers project (over fixed, nonlearnable, one-one-projections?) to a third hidden layer (length 80) that simply sums them; - - the third hidden layer projects to the length 2 output layer; - - a softmax is taken over the output layer to determine the response. - - fix: were biases trained? - - training: - - learning rate: 0.001; epoch: 1 trial per epoch of training - - fix: state_dict with weights (still needed) - - get empirical stimulus sequences (still needed) - - put N-back script (with pointer to latest version on PNL) in nback-paper repo - - fix: get rid of objective_mechanism (see "VERSION *WITHOUT* ObjectiveMechanism" under control(...) - - fix: warnings on run - - complete documentation in BeukersNbackModel.rst - - validate against nback-paper results - - after validation: - - try with STIM_SIZE = NUM_STIMS rather than 20 (as in nback-paper) - - refactor generate_stim_sequence() to use actual empirical stimulus sequences - - replace get_input_sequence and get_training_inputs with generators passed to nback_model.run() and ffn.learn - -""" - -from graph_scheduler import * - -from psyneulink import * -import numpy as np - -# Settings for running script: -DISPLAY_MODEL = False # show visual graphic of model - -# PARAMETERS ------------------------------------------------------------------------------------------------------- - -# Fixed (structural) parameters: -MAX_NBACK_LEVELS = 3 -NUM_STIM = 8 # number of different stimuli in stimulus set - QUESTION: WHY ISN"T THIS EQUAL TO STIM_SIZE OR VICE VERSA? -FFN_TRANSFER_FUNCTION = ReLU - -# Constructor parameters: (values are from nback-paper) -STIM_SIZE=20 # length of stimulus vector -CONTEXT_SIZE=25 # length of context vector -HIDDEN_SIZE=STIM_SIZE*4 # dimension of hidden units in ff -NBACK_LEVELS = [2,3] # Currently restricted to these -NUM_NBACK_LEVELS = len(NBACK_LEVELS) -CONTEXT_DRIFT_NOISE=0.0 # noise used by DriftOnASphereIntegrator (function of Context mech) -RANDOM_WEIGHTS_INITIALIZATION=RandomMatrix(center=0.0, range=0.1) # Matrix spec used to initialize all Projections -RETRIEVAL_SOFTMAX_TEMP=1/8 # express as gain # precision of retrieval process -RETRIEVAL_HAZARD_RATE=0.04 # rate of re=sampling of em following non-match determination in a pass through ffn -RETRIEVAL_STIM_WEIGHT=.05 # weighting of stimulus field in retrieval from em -RETRIEVAL_CONTEXT_WEIGHT = 1-RETRIEVAL_STIM_WEIGHT # weighting of context field in retrieval from em -DECISION_SOFTMAX_TEMP=1 - -# Training parameters: -NUM_EPOCHS=3 # nback-paper: 400,000 @ one trial per epoch = 2,500 @ 160 trials per epoch -LEARNING_RATE=0.01 # nback-paper: .001 - -# Execution parameters: -CONTEXT_DRIFT_RATE=.1 # drift rate used for DriftOnASphereIntegrator (function of Context mech) on each trial -NUM_TRIALS = 48 # number of stimuli presented in a trial sequence for a given nback_level during run -REPORT_OUTPUT = ReportOutput.OFF # Sets console output during run -REPORT_PROGRESS = ReportProgress.OFF # Sets console progress bar during run -REPORT_LEARNING = ReportLearning.OFF # Sets console progress bar during training -ANIMATE = False # {UNIT:EXECUTION_SET} # Specifies whether to generate animation of execution - -# Names of Compositions and Mechanisms: -NBACK_MODEL = "N-back Model" -FFN_COMPOSITION = "WORKING MEMORY (fnn)" -FFN_STIMULUS_INPUT = "CURRENT STIMULUS" -FFN_CONTEXT_INPUT = "CURRENT CONTEXT" -FFN_STIMULUS_RETRIEVED = "RETRIEVED STIMULUS" -FFN_CONTEXT_RETRIEVED = "RETRIEVED CONTEXT" -FFN_TASK = "CURRENT TASK" -FFN_HIDDEN = "HIDDEN LAYER" -FFN_OUTPUT = "DECISION LAYER" -MODEL_STIMULUS_INPUT ='STIM' -MODEL_CONTEXT_INPUT = 'CONTEXT' -MODEL_TASK_INPUT = "TASK" -EM = "EPISODIC MEMORY (dict)" -CONTROLLER = "READ/WRITE CONTROLLER" - -# ======================================== MODEL CONSTRUCTION ========================================================= - -def construct_model(stim_size = STIM_SIZE, - context_size = CONTEXT_SIZE, - hidden_size = HIDDEN_SIZE, - num_nback_levels = NUM_NBACK_LEVELS, - context_drift_noise = CONTEXT_DRIFT_NOISE, - retrievel_softmax_temp = RETRIEVAL_SOFTMAX_TEMP, - retrieval_hazard_rate = RETRIEVAL_HAZARD_RATE, - retrieval_stimulus_weight = RETRIEVAL_STIM_WEIGHT, - retrieval_context_weight = RETRIEVAL_CONTEXT_WEIGHT, - decision_softmax_temp = DECISION_SOFTMAX_TEMP): - """Construct nback_model""" - - print(f'constructing {FFN_COMPOSITION}...') - - # FEED FORWARD NETWORK ----------------------------------------- - - # inputs: encoding of current stimulus and context, retrieved stimulus and retrieved context, - # output: decIsion: match [1,0] or non-match [0,1] - # Must be trained to detect match for specified task (1-back, 2-back, etc.) - input_current_stim = TransferMechanism(name=FFN_STIMULUS_INPUT, - size=stim_size, - function=FFN_TRANSFER_FUNCTION) - input_current_context = TransferMechanism(name=FFN_CONTEXT_INPUT, - size=context_size, - function=FFN_TRANSFER_FUNCTION) - input_retrieved_stim = TransferMechanism(name=FFN_STIMULUS_RETRIEVED, - size=stim_size, - function=FFN_TRANSFER_FUNCTION) - input_retrieved_context = TransferMechanism(name=FFN_CONTEXT_RETRIEVED, - size=context_size, - function=FFN_TRANSFER_FUNCTION) - input_task = TransferMechanism(name=FFN_TASK, - size=num_nback_levels, - function=FFN_TRANSFER_FUNCTION) - hidden = TransferMechanism(name=FFN_HIDDEN, - size=hidden_size, - function=FFN_TRANSFER_FUNCTION) - decision = ProcessingMechanism(name=FFN_OUTPUT, - size=2, function=SoftMax(output=MAX_INDICATOR, - gain=decision_softmax_temp)) - ffn = AutodiffComposition(([{input_current_stim, - input_current_context, - input_retrieved_stim, - input_retrieved_context, - input_task}, - hidden, decision], - RANDOM_WEIGHTS_INITIALIZATION, - ), - name=FFN_COMPOSITION, - learning_rate=LEARNING_RATE - ) - - # FULL MODEL (Outer Composition, including input, EM and control Mechanisms) ------------------------ - - print(f'constructing {NBACK_MODEL}...') - - # Stimulus Encoding: takes STIM_SIZE vector as input - stim = TransferMechanism(name=MODEL_STIMULUS_INPUT, size=stim_size) - - # Context Encoding: takes scalar as drift step for current trial - context = ProcessingMechanism(name=MODEL_CONTEXT_INPUT, - function=DriftOnASphereIntegrator( - initializer=np.random.random(context_size-1), - noise=context_drift_noise, - dimension=context_size)) - - # Task: task one-hot indicating n-back (1, 2, 3 etc.) - must correspond to what ffn has been trained to do - task = ProcessingMechanism(name=MODEL_TASK_INPUT, - size=NUM_NBACK_LEVELS) - - # Episodic Memory: - # - entries: stimulus (field[0]) and context (field[1]); randomly initialized - # - uses Softmax to retrieve best matching input, subject to weighting of stimulus and context by STIM_WEIGHT - em = EpisodicMemoryMechanism(name=EM, - input_ports=[{NAME:"STIMULUS_FIELD", - SIZE:stim_size}, - {NAME:"CONTEXT_FIELD", - SIZE:context_size}], - function=ContentAddressableMemory( - initializer=[[[0]*stim_size, [0]*context_size]], - distance_field_weights=[retrieval_stimulus_weight, - retrieval_context_weight], - # equidistant_entries_select=NEWEST, - selection_function=SoftMax(output=MAX_INDICATOR, - gain=retrievel_softmax_temp)), - ) - - # Control Mechanism - # Ensures current stimulus and context are only encoded in EM once (at beginning of trial) - # by controlling the storage_prob parameter of em: - # - if outcome of decision signifies a match or hazard rate is realized: - # - set EM[store_prob]=1 (as prep encoding stimulus in EM on next trial) - # - this also serves to terminate trial (see nback_model.termination_processing condition) - # - if outcome of decision signifies a non-match - # - set EM[store_prob]=0 (as prep for another retrieval from EM without storage) - # - continue trial - control = ControlMechanism(name=CONTROLLER, - default_variable=[[1]], # Ensure EM[store_prob]=1 at beginning of first trial - # --------- - # VERSION *WITH* ObjectiveMechanism: - objective_mechanism=ObjectiveMechanism(name="OBJECTIVE MECHANISM", - monitor=decision, - # Outcome=1 if match, else 0 - function=lambda x: int(x[0][1]>x[0][0])), - # Set ControlSignal for EM[store_prob] - function=lambda outcome: int(bool(outcome) - or (np.random.random() > retrieval_hazard_rate)), - # --------- - # # VERSION *WITHOUT* ObjectiveMechanism: - # monitor_for_control=decision, - # # Set Evaluate outcome and set ControlSignal for EM[store_prob] - # # - outcome is received from decision as one hot in the form: [[match, no-match]] - # function=lambda outcome: int(int(outcome[0][1]>outcome[0][0]) - # or (np.random.random() > retrieval_hazard_rate)), - # --------- - control=(STORAGE_PROB, em)) - - nback_model = Composition(name=NBACK_MODEL, - nodes=[stim, context, task, ffn, em, control], - # Terminate trial if value of control is still 1 after first pass through execution - termination_processing={TimeScale.TRIAL: And(Condition(lambda: control.value), - AfterPass(0, TimeScale.TRIAL))}, - ) - # # Terminate trial if value of control is still 1 after first pass through execution - # # FIX: ALL OF THE FOLLOWING STOP AFTER ~ NUMBER OF TRIALS (?90+); SHOULD BE: NUM_TRIALS*NUM_NBACK_LEVELS + 1 - # nback_model.scheduler.add_condition(nback_model, And(Condition(lambda: control.value), AfterPass(0, TimeScale.TRIAL))) - # nback_model.scheduler.termination_conds = ({TimeScale.TRIAL: And(Condition(lambda: control.value), - # AfterPass(0, TimeScale.TRIAL))}) - # nback_model.scheduler.termination_conds.update({TimeScale.TRIAL: And(Condition(lambda: control.value), - # AfterPass(0, TimeScale.TRIAL))}) - nback_model.add_projection(MappingProjection(), stim, input_current_stim) - nback_model.add_projection(MappingProjection(), context, input_current_context) - nback_model.add_projection(MappingProjection(), task, input_task) - nback_model.add_projection(MappingProjection(), em.output_ports["RETRIEVED_STIMULUS_FIELD"], input_retrieved_stim) - nback_model.add_projection(MappingProjection(), em.output_ports["RETRIEVED_CONTEXT_FIELD"], input_retrieved_context) - nback_model.add_projection(MappingProjection(), stim, em.input_ports["STIMULUS_FIELD"]) - nback_model.add_projection(MappingProjection(), context, em.input_ports["CONTEXT_FIELD"]) - - if DISPLAY_MODEL: - nback_model.show_graph( - # show_cim=True, - # show_node_structure=ALL, - # show_dimensions=True - ) - - print(f'full model constructed') - return nback_model - -# ==========================================STIMULUS GENERATION ======================================================= -# Based on nback-paper - -def get_stim_set(num_stim=STIM_SIZE): - """Construct an array of stimuli for use an experiment""" - # For now, use one-hots - return np.eye(num_stim) - -def get_task_input(nback_level): - """Construct input to task Mechanism for a given nback_level, used by run_model() and train_network()""" - task_input = list(np.zeros_like(NBACK_LEVELS)) - task_input[nback_level-NBACK_LEVELS[0]] = 1 - return task_input - -def get_run_inputs(model, nback_level, context_drift_rate, num_trials): - """Construct set of stimulus inputs for run_model()""" - - def generate_stim_sequence(nback_level, trial_num, trial_type=0, num_stim=NUM_STIM, num_trials=NUM_TRIALS): - assert nback_level in {2,3} # At present, only 2- and 3-back levels are supported - - def gen_subseq_stim(): - A = np.random.randint(0,num_stim) - B = np.random.choice( - np.setdiff1d(np.arange(num_stim),[A]) - ) - C = np.random.choice( - np.setdiff1d(np.arange(num_stim),[A,B]) - ) - X = np.random.choice( - np.setdiff1d(np.arange(num_stim),[A,B]) - ) - return A,B,C,X - - def generate_match_no_foils_sequence(nback_level,trial_num): - # AXA (2-back) or ABXA (3-back) - seq = np.random.randint(0,num_stim,num_trials) - A,B,C,X = gen_subseq_stim() - # - if nback_level==2: - subseq = [A,X,A] - elif nback_level==3: - subseq = [A,B,X,A] - seq[trial_num-(nback_level+1):trial_num] = subseq - return seq[:trial_num] - - def generate_non_match_no_foils_sequence(nback_level,trial_num): - # AXB (2-back) or ABXC (3-back) - seq = np.random.randint(0,num_stim,num_trials) - A,B,C,X = gen_subseq_stim() - # - if nback_level==2: - subseq = [A,X,B] - elif nback_level==3: - subseq = [A,B,X,C] - seq[trial_num-(nback_level+1):trial_num] = subseq - return seq[:trial_num] - - def generate_match_with_foil_sequence(nback_level,trial_num): - # AAA (2-back) or AAXA (3-back) - seq = np.random.randint(0,num_stim,num_trials) - A,B,C,X = gen_subseq_stim() - # - if nback_level==2: - subseq = [A,A,A] - elif nback_level==3: - subseq = [A,A,X,A] - seq[trial_num-(nback_level+1):trial_num] = subseq - return seq[:trial_num] - - def generate_non_match_with_foil_sequence(nback_level,trial_num): - # XAA (2-back) or ABXB (3-back) - seq = np.random.randint(0,num_stim,num_trials) - A,B,C,X = gen_subseq_stim() - # - if nback_level==2: - subseq = [X,A,A] - elif nback_level==3: - subseq = [A,B,X,B] - seq[trial_num-(nback_level+1):trial_num] = subseq - return seq[:trial_num] - - trial_types = [generate_match_no_foils_sequence, - generate_match_with_foil_sequence, - generate_non_match_no_foils_sequence, - generate_non_match_with_foil_sequence] - stim_seq = trial_types[trial_type](nback_level,trial_num) - # ytarget = [1,1,0,0][trial_type] - # ctxt = spherical_drift(trial_num) - # return stim,ctxt,ytarget - return stim_seq - - # def stim_set_generation(nback_level, num_trials): - # stim_sequence = [] - # # for seq_int, trial in itertools.product(range(4),np.arange(5,trials)): # This generates all length sequences - # for trial_type, trial_num in itertools.product(range(4),[num_trials]): # This generates only longest seq ( - # # num_trials) - # return stim_sequence.append(generate_stim_sequence(nback_level, trial_num, trial_type=trial_type, trials=num_trials)) - - def get_input_sequence(nback_level, num_trials=NUM_TRIALS): - """Get sequence of inputs for a run""" - input_set = get_stim_set() - # Construct sequence of stimulus indices - trial_seq = generate_stim_sequence(nback_level, num_trials) - # Return list of corresponding stimulus input vectors - return [input_set[trial_seq[i]] for i in range(num_trials)] - - return {model.nodes[MODEL_STIMULUS_INPUT]: get_input_sequence(nback_level, num_trials), - model.nodes[MODEL_CONTEXT_INPUT]: [[context_drift_rate]]*num_trials, - model.nodes[MODEL_TASK_INPUT]: [get_task_input(nback_level)]*num_trials} - -def get_training_inputs(network, num_epochs, nback_levels): - """Construct set of training stimuli used by ffn.learn() in train_network() - Construct one example of each condition: - match: stim_current = stim_retrieved and context_current = context_retrieved - stim_lure: stim_current = stim_retrieved and context_current != context_retrieved - context_lure: stim_current != stim_retrieved and context_current == context_retrieved - non_lure: stim_current != stim_retrieved and context_current != context_retrieved - """ - assert is_iterable(nback_levels) and all([0