Skip to content

Commit

Permalink
Rollback
Browse files Browse the repository at this point in the history
Rolling back to working CRS2 Troe opt
  • Loading branch information
tsikes committed Aug 20, 2021
1 parent 3c577e9 commit b95d5a6
Showing 1 changed file with 108 additions and 81 deletions.
189 changes: 108 additions & 81 deletions src/calculate/optimize/fit_coeffs.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import numpy as np
from numba import jit
import cantera as ct
import nlopt, pygmo, rbfopt # add rbfopt, supposed to be quite good
import nlopt
import warnings, sys, platform, pathlib, io, contextlib
from copy import deepcopy
from scipy.optimize import curve_fit, minimize, root_scalar, OptimizeWarning, least_squares, approx_fprime
Expand Down Expand Up @@ -61,6 +61,10 @@
'T2': {'-': [-1E4, 1E6], '+': [-1E4, 1E6]}}


@jit(nopython=True, error_model='numpy')
def ln_arrhenius_k(T, Ea, ln_A, n): # LPL, HPL, Fcent
return ln_A + n*np.log(T) - Ea/(Ru*T)

def fit_arrhenius(rates, T, x0=[], coefNames=default_arrhenius_coefNames, bnds=[], loss='linear'):
def fit_fcn_decorator(x0, alter_idx, jac=False):
def set_coeffs(*args):
Expand All @@ -70,8 +74,8 @@ def set_coeffs(*args):
return coeffs

def ln_arrhenius(T, *args):
[Ea, ln_A, n] = set_coeffs(*args)
return ln_A + n*np.log(T) - Ea/(Ru*T)
x = set_coeffs(*args)
return ln_arrhenius_k(T, *x)

def ln_arrhenius_jac(T, *args):
[Ea, ln_A, n] = set_coeffs(*args)
Expand Down Expand Up @@ -172,8 +176,9 @@ def ln_Troe(T, M, *x): # LPL, HPL, Fcent
Ea_inf, ln_A_inf, n_inf = x[3:6]
Fcent_coeffs = x[-4:] # A, T3, T1, T2

ln_k_0 = ln_A_0 + n_0*np.log(T) - Ea_0/(Ru*T)
ln_k_inf = ln_A_inf + n_inf*np.log(T) - Ea_inf/(Ru*T)

ln_k_0 = ln_arrhenius_k(T, Ea_0, ln_A_0, n_0)
ln_k_inf = ln_arrhenius_k(T, Ea_inf, ln_A_inf, n_inf)

for idx in np.argwhere(ln_k_0 < min_ln_val):
ln_k_0[idx[0], idx[1]] = min_ln_val
Expand Down Expand Up @@ -307,94 +312,35 @@ def fit(self):
self.s = self.calc_s(p0_opt)
bnds = (self.p_bnds-self.p0)/self.s


'''
#opt = nlopt.opt(nlopt.AUGLAG, len(self.p0))
opt = nlopt.opt(self.algo['algorithm'], len(self.p0))

opt = nlopt.opt(nlopt.AUGLAG, len(self.p0))
#opt = nlopt.opt(self.algo['algorithm'], len(self.p0))

opt.set_min_objective(self.objective)
#opt.add_inequality_constraint(self.Fcent_constraint, 0.0)
#opt.add_inequality_constraint(self.Arrhenius_constraint, 1E-8)
opt.add_inequality_constraint(self.Fcent_constraint, 0.0)
opt.add_inequality_constraint(self.Arrhenius_constraint, 1E-8)
opt.set_maxeval(self.algo['max_eval'])
#opt.set_maxtime(10)

opt.set_xtol_rel(self.algo['xtol_rel'])
opt.set_ftol_rel(self.algo['ftol_rel'])

p0_opt = np.zeros_like(self.p0)
self.s = self.calc_s(p0_opt)
opt.set_lower_bounds((self.p_bnds[0]-self.p0)/self.s)
opt.set_upper_bounds((self.p_bnds[1]-self.p0)/self.s)
opt.set_lower_bounds(bnds[0])
opt.set_upper_bounds(bnds[1])

opt.set_initial_step(self.algo['initial_step'])
#opt.set_population(int(np.rint(10*(len(idx)+1)*10)))

#sub_opt = nlopt.opt(self.algo['algorithm'], len(self.p0))
#sub_opt.set_initial_step(self.algo['initial_step'])
#sub_opt.set_xtol_rel(self.algo['xtol_rel'])
#sub_opt.set_ftol_rel(self.algo['ftol_rel'])
#opt.set_local_optimizer(sub_opt)
sub_opt = nlopt.opt(self.algo['algorithm'], len(self.p0))
sub_opt.set_initial_step(self.algo['initial_step'])
sub_opt.set_xtol_rel(self.algo['xtol_rel'])
sub_opt.set_ftol_rel(self.algo['ftol_rel'])
opt.set_local_optimizer(sub_opt)

x_fit = opt.optimize(p0_opt) # optimize!
#print('Fcent_constraint: ', self.Fcent_constraint(x_fit))
#print('Arrhe_constraint: ', self.Arrhenius_constraint(x_fit))
x_fit = self.set_x_from_opt(x_fit)
'''

class pygmo_objective_fcn:
def __init__(self, obj_fcn, bnds):
self.obj_fcn = obj_fcn
self.bnds = bnds

def fitness(self, x):
return [self.obj_fcn(x)]

def get_bounds(self):
return self.bnds

def gradient(self, x):
return pygmo.estimate_gradient_h(lambda x: self.fitness(x), x)

pop_size = int(np.max([35, 5*(len(p0_opt)+1)]))
num_gen = int(np.ceil(self.algo['max_eval']/pop_size))

prob = pygmo.problem(pygmo_objective_fcn(self.objective, bnds))
pop = pygmo.population(prob, pop_size)
pop.push_back(x = p0_opt) # puts initial guess into the initial population

#F = (0.107 - 0.141)/(1 + (num_gen/225)**7.75)
#F = 0.2
#CR = 0.8032*np.exp(-1.165E-3*num_gen)
#algo = pygmo.algorithm(pygmo.de(gen=num_gen, F=F, CR=CR, variant=6))
algo = pygmo.algorithm(pygmo.sade(gen=num_gen, variant=6))
#algo = pygmo.algorithm(pygmo.pso_gen(gen=num_gen))
#algo = pygmo.algorithm(pygmo.ipopt())
#algo = pygmo.algorithm(pygmo.gwo(gen=num_gen))

pop = algo.evolve(pop)

x_fit = pop.champion_x

'''
max_eval = int(self.algo['max_eval'])
max_time = 1E30
stdout = io.StringIO()
stderr = io.StringIO()
with contextlib.redirect_stderr(stderr):
with contextlib.redirect_stdout(stdout):
var_type = ['R']*np.size(p0_opt) # specifies that all variables are continious
bb = rbfopt.RbfoptUserBlackBox(np.size(p0_opt), np.array(bnds[0]), np.array(bnds[1]),
np.array(var_type), self.objective)
settings = rbfopt.RbfoptSettings(max_iterations=max_eval,
max_clock_time=max_time,
minlp_solver_path=path['bonmin'],
nlp_solver_path=path['ipopt'])
algo = rbfopt.RbfoptAlgorithm(settings, bb, init_node_pos=p0_opt)
val, x, itercount, evalcount, fast_evalcount = algo.optimize()
x_fit = x
'''

x_fit = self.set_x_from_opt(x_fit)

#print('nlopt:', x_fit)
Expand All @@ -408,9 +354,7 @@ def gradient(self, x):
x_fit[1] = np.exp(x_fit[1])
x_fit[4] = np.exp(x_fit[4])

#res = {'x': x_fit, 'fval': opt.last_optimum_value(), 'nfev': opt.get_numevals()}
res = {'x': x_fit, 'fval': pop.champion_f[0], 'nfev': pop.problem.get_fevals()}
#res = {'x': x_fit, 'fval': val, 'nfev': evalcount + fast_evalcount}
res = {'x': x_fit, 'fval': opt.last_optimum_value(), 'nfev': opt.get_numevals()}

return res

Expand Down Expand Up @@ -474,7 +418,7 @@ def jacobian(self, T, *x): # defunct

return jac

def objective(self, x_fit, grad=np.array([]), obj_type='obj_sum', aug_lagrangian=True):
def objective(self, x_fit, grad=np.array([]), obj_type='obj_sum', aug_lagrangian=False):
x = self.set_x_from_opt(x_fit)
T = self.T
M = self.M
Expand Down Expand Up @@ -558,6 +502,89 @@ def objective(self, x_fit, grad=np.array([]), obj_type='obj_sum', aug_lagrangian

return obj_val

def Fcent_constraint(self, x_fit, grad=np.array([])):
def f_fp(T, A, T3, T1, T2, fprime=False, fprime2=False): # dFcent_dT
f = T2/T**2*np.exp(-T2/T) - (1-A)/T3*np.exp(-T/T3) - A/T1*np.exp(-T/T1)

if not fprime and not fprime2:
return f
elif fprime and not fprime2:
fp = T2*(T2 - 2*T)/T**4*np.exp(-T2/T) + (1-A)/T3**2*np.exp(-T/T3) +A/T1**2*np.exp(-T/T1)
return f, fp

x = self.set_x_from_opt(x_fit)
[A, T3, T1, T2] = x[-4:]
Tmin = self.Tmin
Tmax = self.Tmax

try:
T_deriv_eq_0 = root_scalar(lambda T: f_fp(A, T3, T1, T2),
x0=(Tmax+Tmin)/4, x1=3*(Tmax+Tmin)/4, method='secant')
T = np.array([Tmin, T_deriv_eq_0, Tmax])
except:
T = np.array([Tmin, Tmax])

if len(T) == 3:
print(T)

Fcent = Fcent_calc(T, A, T3, T1, T2) #TODO: OVERFLOW WARNING HERE
min_con = np.max(self.Fcent_min - Fcent)
max_con = np.max(Fcent - 1.0)
con = np.max([max_con, min_con])*1E8

if grad.size > 0:
grad[:] = self.constraint_gradient(x, numerical_gradient=self.Fcent_constraint)

return con

def Arrhenius_constraint(self, x_fit, grad=np.array([])):
x = self.set_x_from_opt(x_fit)

T = self.T
T_max = T[-1]

Ea_0, ln_A_0, n_0 = x[:3]
Ea_inf, ln_A_inf, n_inf = x[3:6]

ln_k_0 = ln_A_0 + n_0*np.log(T_max) - Ea_0/(Ru*T_max)
ln_k_inf = ln_A_inf + n_inf*np.log(T_max) - Ea_inf/(Ru*T_max)

ln_k_limit_max = np.max([ln_k_0, ln_k_inf])

con = ln_k_limit_max - ln_k_max

if grad.size > 0:
grad[:] = self.constraint_gradient(x, numerical_gradient=self.Arrhenius_constraint)

return con

def constraints(self, x_fit, grad=np.array([])):
x = self.set_x_from_opt(x_fit)
T = self.T

Ea_0, ln_A_0, n_0 = x[:3]
Ea_inf, ln_A_inf, n_inf = x[3:6]
Fcent_coeffs = x[-4:] # A, T3, T1, T2

con = []

# Arrhenius constraints
T_max = T[0,-1]

ln_k_0 = ln_arrhenius_k(T_max, Ea_0, ln_A_0, n_0)
ln_k_inf = ln_arrhenius_k(T_max, Ea_inf, ln_A_inf, n_inf)

con.append(np.max([0, ln_k_max - ln_k_0])) # ln_k_0 <= ln_k_max
con.append(np.max([0, ln_k_max - ln_k_inf])) # ln_k_0 <= ln_k_max

# Fcent constraints
T_con = np.array([self.Tmin, *T[0,:], self.Tmax])
Fcent = Fcent_calc(T_con, *Fcent_coeffs)
con.append(np.max([0, np.min(Fcent - self.Fcent_min)])) # Fcent >= Fcent_min
con.append(np.max([0, np.min(1.0 - Fcent)])) # Fcent <= 1.0

con = np.array(con)

def objective_gradient(self, x, resid=[], numerical_gradient=False):
if numerical_gradient:
#x = (x - self.p0)/self.s
Expand Down

0 comments on commit b95d5a6

Please sign in to comment.