Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Che ki peuq integration v3 #10

Merged
merged 2 commits into from
Jan 28, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions src/optimize/CheKiPEUQ_from_Frhodo.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,8 +161,5 @@ def get_consolidated_parameters_arrays(rate_constants_values, rate_constants_low
return pars_values, pars_lower_bnds, pars_upper_bnds, pars_bnds_exist, unbounded_indices

def remove_unbounded_values(array_to_truncate, unbounded_indices):
print("line 164", unbounded_indices, array_to_truncate)
truncated_array = np.delete(array_to_truncate, unbounded_indices, axis=0)
print("line 164", unbounded_indices, truncated_array)

return truncated_array
1 change: 0 additions & 1 deletion src/optimize/CheKiPEUQ_local/InverseProblem.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,6 @@ def __init__(self, UserInput = None):
#We will also fill the model['InputParameterPriorValues'] to have the mean of the two bounds. This can matter for some of the scaling that occurs later.
self.UserInput.mu_prior[parameterIndex] = (UserInput.model['InputParameterPriorValues_upperBounds'][parameterIndex] + UserInput.model['InputParameterPriorValues_lowerBounds'][parameterIndex])/2

print("line 112 of CheKiPEUQ", UserInput.InputParametersPriorValuesUncertainties)
#Now to make covmat. Leaving the original dictionary object intact, but making a new object to make covmat_prior.
if len(np.shape(UserInput.InputParametersPriorValuesUncertainties)) == 1 and (len(UserInput.InputParametersPriorValuesUncertainties) > 0): #If it's a 1D array/list that is filled, we'll diagonalize it.
UserInput.std_prior = np.array(UserInput.InputParametersPriorValuesUncertainties, dtype='float') #using 32 since not everyone has 64.
Expand Down
3 changes: 1 addition & 2 deletions src/optimize/fit_fcn.py
Original file line number Diff line number Diff line change
Expand Up @@ -450,7 +450,7 @@ def get_last_obs_sim_interp(varying_rate_vals): #A. Savara added this. It needs
SSE = generalized_loss_fcn(loss_resid, mu=loss_min)
SSE = rescale_loss_fcn(loss_resid, SSE)
exp_loss_weights = loss_exp/SSE # comparison is between selected loss fcn and SSE (L2 loss)
Bayesian_dict['weights_data'] = np.concatenate(aggregate_weights*exp_loss_weights, axis=0)
Bayesian_dict['weights_data'] = np.concatenate(aggregate_weights*exp_loss_weights, axis=0).flatten()

#Bayesian_dict['weights_data'] /= np.max(Bayesian_dict['weights_data']) # if we want to normalize by maximum

Expand Down Expand Up @@ -491,7 +491,6 @@ def get_last_obs_sim_interp(varying_rate_vals): #A. Savara added this. It needs
log_posterior_density = optimize.CheKiPEUQ_from_Frhodo.get_log_posterior_density(CheKiPEUQ_PE_object, Bayesian_dict['pars_current_guess_truncated'])
#Step 5 of Bayesian: return the objective function and any other metrics desired.
obj_fcn = -1*log_posterior_density #need neg_logP because minimizing.
print("line 481 of fit_fcn, Bayesian obj_fcn", obj_fcn)

# For updating
self.i += 1
Expand Down