diff --git a/psyneulink/core/components/functions/optimizationfunctions.py b/psyneulink/core/components/functions/optimizationfunctions.py index e4e6a2cf37f..a6729fdb16b 100644 --- a/psyneulink/core/components/functions/optimizationfunctions.py +++ b/psyneulink/core/components/functions/optimizationfunctions.py @@ -2236,8 +2236,8 @@ def __init__(self, # its crap. random_state = np.random.RandomState([seed]) - # Has the ELFI model been setup yet. Nope! - self._is_model_initialized = False + # Our instance of elfi model + self._elfi_model = None # The simulator function we will pass to ELFI, this is really the objective_function # with some stuff wrapped around it to massage its return values and arguments. @@ -2362,7 +2362,7 @@ def _initialize_model(self, context): """ # If the model has not been initialized, try to do it. - if not self._is_model_initialized: + if self._elfi_model is None: elfi = ParamEstimationFunction._import_elfi() @@ -2374,6 +2374,10 @@ def _initialize_model(self, context): if self._sim_func is None: return + old_model = elfi.get_default_model() + + my_elfi_model = elfi.new_model(self.name, True) + # FIXME: A lot of checking needs to happen, here. Correct order, valid elfi prior, etc. # Construct the ELFI priors from the list of prior specifcations elfi_priors = [elfi.Prior(*args, name=param_name) for param_name, args in self._priors.items()] @@ -2393,8 +2397,11 @@ def _initialize_model(self, context): self._sampler = elfi.Rejection(d, batch_size=1, seed=self._seed) - # Mark that we are initialized - self._is_model_initialized = True + # Store our new model + self._elfi_model = my_elfi_model + + # Restore the previous default + elfi.set_default_model(old_model) def function(self, variable=None, @@ -2429,15 +2436,19 @@ def function(self, return_optimal_value= 0.0 # Try to initialize the model if it hasn't been. - if not self._is_model_initialized: + if self._elfi_model is None: self._initialize_model(context) # Intialization can fail for reasons silenty, mainly that PsyNeuLink needs to # invoke these functions multiple times during initialization. We only want # to proceed if this is the real deal. - if not self._is_model_initialized: + if self._elfi_model is None: return return_optimal_sample, return_optimal_value, return_all_samples, return_all_values + elfi = ParamEstimationFunction._import_elfi() + + old_model = elfi.get_default_model() + elfi.set_default_model(self._elfi_model) # Run the sampler result = self._sampler.sample(**self._sampler_args) @@ -2453,5 +2464,8 @@ def function(self, return_all_samples = np.array(result.samples_array) return_all_values = np.array(result.discrepancies) + # Restore the old default + elfi.set_default_model(old_model) + print(result) return return_optimal_sample, return_optimal_value, return_all_samples, return_all_values diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 8a6d356dc56..f3718aab0a0 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -7335,7 +7335,8 @@ def _get_total_cost_of_control_allocation(self, control_allocation, context, run ) # Get control signal costs - all_costs = convert_to_np_array(self.controller.parameters.costs._get(context) + [reconfiguration_cost]) + other_costs = self.controller.parameters.costs._get(context) or [] + all_costs = convert_to_np_array(other_costs + [reconfiguration_cost]) # Compute a total for the candidate control signal(s) total_cost = self.controller.combine_costs(all_costs) return total_cost diff --git a/tests/control/test_param_estimation.py b/tests/control/test_param_estimation.py index 95af2bd4217..636f7ce750c 100644 --- a/tests/control/test_param_estimation.py +++ b/tests/control/test_param_estimation.py @@ -10,9 +10,10 @@ from psyneulink.core.components.mechanisms.modulatory.control.optimizationcontrolmechanism import OptimizationControlMechanism from psyneulink.core.components.ports.modulatorysignals.controlsignal import ControlSignal from psyneulink.core.globals.keywords import OVERRIDE -from psyneulink.core.components.functions.optimizationfunctions import ParamEstimationFunction +from psyneulink.core.components.functions.optimizationfunctions import ParamEstimationFunction, GridSearch, MINIMIZE -def test_moving_average(): +@pytest.mark.parametrize("mode", ['elfi', 'GridSearch']) +def test_moving_average(mode): # Set an arbitrary seed and a global random state to keep the randomly generated quantities the same between runs seed = 20170530 # this will be separately given to ELFI @@ -43,7 +44,7 @@ def MA2(input=[0], t1=0.5, t2=0.5, n_obs=100, batch_size=1, random_state=None): return x # Lets make some observed data. This will be the data we try to fit parameters for. - y_obs = MA2(t1_true, t2_true) + y_obs = MA2(t1=t1_true, t2=t2_true) # Make a processing mechanism out of our simulator. ma_mech = ProcessingMechanism(function=MA2, @@ -59,15 +60,13 @@ def MA2(input=[0], t1=0.5, t2=0.5, n_obs=100, batch_size=1, random_state=None): signalSearchRange = SampleSpec(start=0.1, stop=2.0, step=0.2) t1_control_signal = ControlSignal(projections=[('t1', ma_mech)], allocation_samples=signalSearchRange, + cost_options=[], modulation=OVERRIDE) t2_control_signal = ControlSignal(projections=[('t2', ma_mech)], allocation_samples=signalSearchRange, + cost_options=[], modulation=OVERRIDE) - # A function to calculate the auto-covariance with specific lag for a - # time series. We will use this function to compute the summary statistics - # for generated and observed data so that we can compute a metric between the - # two. In PsyNeuLink terms, this will be part of an ObjectiveMechanism. # A function to calculate the auto-covariance with specific lag for a # time series. We will use this function to compute the summary statistics # for generated and observed data so that we can compute a metric between the @@ -90,18 +89,37 @@ def autocov(agent_rep, x=None, lag=1): # monitor=[ma_mech]) # Setup the controller with the ParamEstimationFunction - comp.add_controller( - controller=OptimizationControlMechanism( - agent_rep=comp, - function=ParamEstimationFunction( - priors={'t1': (scipy.stats.uniform, 0, 2), 't2': (scipy.stats.uniform, 0, 2)}, - observed=y_obs, - summary=[(autocov, 1), (autocov, 2)], - discrepancy='euclidean', - n_samples=3, quantile=0.01, # Set very small now cause things are slow. - seed=seed), - objective_mechanism=False, - control_signals=[t1_control_signal, t2_control_signal])) + if mode == 'elfi': + comp.add_controller( + controller=OptimizationControlMechanism( + agent_rep=comp, + function=ParamEstimationFunction( + priors={'t1': (scipy.stats.uniform, 0, 2), + 't2': (scipy.stats.uniform, 0, 2)}, + observed=y_obs, + summary=[(autocov, 1), (autocov, 2)], + discrepancy='euclidean', + n_samples=3, quantile=0.01, # Set very small now cause things are slow. + seed=seed), + objective_mechanism=False, + control_signals=[t1_control_signal, t2_control_signal])) + elif mode == 'GridSearch': + observed_C = np.array([autocov(None, y_obs, 1), autocov(None, y_obs, 2)]) + def objective_f(val): + C = np.array([autocov(None, val, 1), autocov(None, val, 2)]) + ret = np.linalg.norm(C - observed_C) + return ret + + objective_mech = ObjectiveMechanism(function=objective_f, + size=len(y_obs[0]), + monitor=[ma_mech], + name='autocov - observed autocov') + comp.add_controller( + controller=OptimizationControlMechanism( + agent_rep=comp, + function=GridSearch(save_values=True, direction=MINIMIZE), + objective_mechanism=objective_mech, + control_signals=[t1_control_signal, t2_control_signal])) comp.disable_all_history() @@ -117,7 +135,7 @@ def autocov(agent_rep, x=None, lag=1): comp.run(inputs=stim_list_dict) - # FIXME: The final test should be to check if the true parameters set above are - # recovered approximately. Not sure how to get all the samples out from - # above yet though so just pass for now. - assert True + if mode == 'elfi': + assert np.allclose(comp.controller.value, [[0.5314349], [0.19140103]]) + if mode == 'GridSearch': + assert np.allclose(comp.controller.value, [[0.5], [0.3]])