diff --git a/src/optimize/CheKiPEUQ_from_Frhodo.py b/src/optimize/CheKiPEUQ_from_Frhodo.py index 126bc43..7521b08 100644 --- a/src/optimize/CheKiPEUQ_from_Frhodo.py +++ b/src/optimize/CheKiPEUQ_from_Frhodo.py @@ -161,8 +161,5 @@ def get_consolidated_parameters_arrays(rate_constants_values, rate_constants_low return pars_values, pars_lower_bnds, pars_upper_bnds, pars_bnds_exist, unbounded_indices def remove_unbounded_values(array_to_truncate, unbounded_indices): - print("line 164", unbounded_indices, array_to_truncate) truncated_array = np.delete(array_to_truncate, unbounded_indices, axis=0) - print("line 164", unbounded_indices, truncated_array) - return truncated_array diff --git a/src/optimize/CheKiPEUQ_local/InverseProblem.py b/src/optimize/CheKiPEUQ_local/InverseProblem.py index 8307b57..489d26e 100644 --- a/src/optimize/CheKiPEUQ_local/InverseProblem.py +++ b/src/optimize/CheKiPEUQ_local/InverseProblem.py @@ -110,7 +110,6 @@ def __init__(self, UserInput = None): #We will also fill the model['InputParameterPriorValues'] to have the mean of the two bounds. This can matter for some of the scaling that occurs later. self.UserInput.mu_prior[parameterIndex] = (UserInput.model['InputParameterPriorValues_upperBounds'][parameterIndex] + UserInput.model['InputParameterPriorValues_lowerBounds'][parameterIndex])/2 - print("line 112 of CheKiPEUQ", UserInput.InputParametersPriorValuesUncertainties) #Now to make covmat. Leaving the original dictionary object intact, but making a new object to make covmat_prior. if len(np.shape(UserInput.InputParametersPriorValuesUncertainties)) == 1 and (len(UserInput.InputParametersPriorValuesUncertainties) > 0): #If it's a 1D array/list that is filled, we'll diagonalize it. UserInput.std_prior = np.array(UserInput.InputParametersPriorValuesUncertainties, dtype='float') #using 32 since not everyone has 64. diff --git a/src/optimize/fit_fcn.py b/src/optimize/fit_fcn.py index 3c090f2..4979d2f 100644 --- a/src/optimize/fit_fcn.py +++ b/src/optimize/fit_fcn.py @@ -450,7 +450,7 @@ def get_last_obs_sim_interp(varying_rate_vals): #A. Savara added this. It needs SSE = generalized_loss_fcn(loss_resid, mu=loss_min) SSE = rescale_loss_fcn(loss_resid, SSE) exp_loss_weights = loss_exp/SSE # comparison is between selected loss fcn and SSE (L2 loss) - Bayesian_dict['weights_data'] = np.concatenate(aggregate_weights*exp_loss_weights, axis=0) + Bayesian_dict['weights_data'] = np.concatenate(aggregate_weights*exp_loss_weights, axis=0).flatten() #Bayesian_dict['weights_data'] /= np.max(Bayesian_dict['weights_data']) # if we want to normalize by maximum @@ -491,7 +491,6 @@ def get_last_obs_sim_interp(varying_rate_vals): #A. Savara added this. It needs log_posterior_density = optimize.CheKiPEUQ_from_Frhodo.get_log_posterior_density(CheKiPEUQ_PE_object, Bayesian_dict['pars_current_guess_truncated']) #Step 5 of Bayesian: return the objective function and any other metrics desired. obj_fcn = -1*log_posterior_density #need neg_logP because minimizing. - print("line 481 of fit_fcn, Bayesian obj_fcn", obj_fcn) # For updating self.i += 1