Skip to content

Commit

Permalink
Troe opt changes
Browse files Browse the repository at this point in the history
Troe opt changes.
fixed bug from updating dependencies
  • Loading branch information
tsikes committed Aug 3, 2021
1 parent 3735e38 commit 44d7ddb
Show file tree
Hide file tree
Showing 3 changed files with 167 additions and 109 deletions.
19 changes: 11 additions & 8 deletions frhodo_environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,15 @@ channels:
- conda-forge
- defaults
dependencies:
- cantera=2.5.0b1
- matplotlib=3.3.1
- nlopt=2.6.1
- numpy=1.19.1
- pyside2=5.13.1
- cantera=2.6.0a1
- matplotlib=3.4.2
- nlopt=2.7.0
- numpy=1.21.1
- pip
- pyside2=5.13.2
- qtpy=1.9.0
- requests=2.24.0
- scipy=1.5.2
- tabulate=0.8.7
- requests=2.26.0
- scipy=1.7.0
- tabulate=0.8.9
- pip:
- dtcwt==0.12.0
233 changes: 147 additions & 86 deletions src/calculate/optimize/fit_coeffs.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@

from calculate.convert_units import OoM
from calculate.optimize.misc_fcns import generalized_loss_fcn, set_arrhenius_bnds
from calculate.smooth_data import dual_tree_complex_wavelet_filter

Ru = ct.gas_constant
# Ru = 1.98720425864083
Expand All @@ -27,6 +28,10 @@
'activation_energy_inf', 'pre_exponential_factor_inf', 'temperature_exponent_inf',
'A', 'T3', 'T1', 'T2']

#falloff_bnds = np.array([[1E-12, 1E-12, 0.1], [1E30, 1E30, 1]])
#falloff_bnds[:,0:2] = np.log(falloff_bnds[:,0:2]) # ln(k_0), ln(k_inf), Fcent
falloff_bnds = np.log([[1E-6, 1E-6, 1E-3], [1E25, 1E25, 1]]) # ln(k_0), ln(k_inf), ln(Fcent)

troe_falloff_0 = [[0.6, 200, 600, 1200], # (0, 0, 0)
# (0, 0, 1)
[0.05, 1000, -2000, 3000], # (0, 1, 0)
Expand All @@ -37,8 +42,6 @@
# (1, 1, 1)
]

falloff_bnds = np.log([[1E-12, 1E-12, 0.1], [1E30, 1E30, 1]]) # ln(k_0), ln(k_inf), ln(Fcent)

#troe_min_pos_system_value = (np.finfo(float).tiny*(1E20))**(1/3)
#troe_max_pos_system_value = (np.finfo(float).max*(1E-20))**(1/3)
#troe_min_neg_system_value = -max_pos_system_value
Expand Down Expand Up @@ -112,11 +115,7 @@ def ln_arrhenius_jac(T, *args):
bnds[1][A_idx] = np.log(bnds[1][A_idx])

# only valid initial guesses
for n, val in enumerate(p0):
if val < bnds[0][n]:
p0[n] = bnds[0][n]
elif val > bnds[1][n]:
p0[n] = bnds[1][n]
p0 = np.clip(p0, bnds[0], bnds[1])

with warnings.catch_warnings():
warnings.simplefilter('ignore', OptimizeWarning)
Expand Down Expand Up @@ -159,6 +158,7 @@ def x_bnds(self, x0):
bnds.append(troe_all_bnds[coef]['-'])
else:
bnds.append(troe_all_bnds[coef]['+'])

return np.array(bnds).T

def fit(self):
Expand Down Expand Up @@ -565,6 +565,21 @@ def fit_arrhenius_rates(self):
def LPL_HPL_Fcent(self): # CURRENTLY A KNOWN BUG IF FULL LIMIT IS HELD CONSTANT
arrhenius_coeffs, T, ln_k = self.fit_arrhenius_rates()

# set nlopt opt parameters
options = [{'algorithm': nlopt.GN_DIRECT_NOSCAL, 'xtol_rel': 1E-4, 'ftol_rel': 1E-4, # GN_DIRECT_NOSCAL GN_DIRECT
'initial_step': 1, 'max_eval': 2000},
#{'algorithm': nlopt.LN_COBYLA, 'xtol_rel': 1E-2, 'ftol_rel': 1E-2,
# 'initial_step': 1E-3, 'max_eval': 500},
#{'algorithm': nlopt.LN_COBYLA, 'xtol_rel': 1E-2, 'ftol_rel': 1E-2,
# 'initial_step': 1E-4, 'max_eval': 1000} # LN_COBYLA
]

# set scipy curve-fit loss
if self.loss_fcn_par == [1, 1]: # huber-like
loss = 'huber' # 'soft_l1', 'huber', 'cauchy', 'arctan'
else:
loss = 'linear'

# set T, P, M as arrays
P = np.unique(self.P)
P[1:] = np.roll(P[1:], 1)
Expand All @@ -583,96 +598,122 @@ def LPL_HPL_Fcent(self): # CURRENTLY A KNOWN BUG IF FULL LIMIT IS HELD CONSTA
if self.P_limit_constrained[i]:
del idx_fit[i]

if self.loss_fcn_par == [1, 1]: # huber-like
loss = 'huber' # 'soft_l1', 'huber', 'cauchy', 'arctan'
else:
loss = 'linear'

res = []
for i in range(0, np.shape(T)[1]): # keep T constant and fit log_k_0, log_k_inf, log_Fcent
# set fit function, might be a better way, but this works for now
if idx_fit == [0, 1, 2]:
fit_fcn = self.ln_Troe
M_fit = M[:,i]
ln_k_fit = ln_k[:,i]

elif idx_fit == [0, 2]:
n = np.concatenate([[0], np.arange(2, len(M[:,i]))])
M_fit = M[n,i]
ln_k_fit = ln_k[n,i]

fit_fcn = lambda M, ln_k_0, Fcent: self.ln_Troe(M, [ln_k_0, ln_k[1,i], Fcent])

elif idx_fit == [1, 2]:
n = np.concatenate([[1], np.arange(2, len(M[:,i]))])
M_fit = M[n,i]
ln_k_fit = ln_k[n,i]

fit_fcn = lambda M, ln_k_inf, Fcent: self.ln_Troe(M, [ln_k[0,i], ln_k_inf, Fcent])

elif idx_fit == [2]:
n = np.arange(2, len(M[:,i]))
M_fit = M[n,i]
ln_k_fit = ln_k[n,i]

fit_fcn = lambda M, Fcent: self.ln_Troe(M, [ln_k[0,i], ln_k[1,i], Fcent])

p_bnds = np.copy(falloff_bnds)[:, idx_fit] # ln(k_0), ln(k_inf), ln(Fcent)

if len(res) == 0:
p0 = np.array([20, 14, -0.5])[idx_fit] # ln(k_0), ln(k_inf), ln(Fcent)
else:
p0 = np.log([res[-1][1:][i] for i in idx_fit])

# TODO: Change this to nlopt?
if self.scipy_curvefit:
x_fit, _ = curve_fit(fit_fcn, M_fit, ln_k_fit, p0=p0, method='trf', bounds=p_bnds, # dogbox
jac='2-point', x_scale='jac', max_nfev=len(p0)*1000, loss=loss)
else:
self.obj_vars = {'fcn': fit_fcn, 'x': M_fit, 'y': ln_k_fit}

xtol_rel = 1E-4
ftol_rel = 1E-4
initial_step = 1E-8
for n, opt_options in enumerate(options):
if n > 0: # smooth prior optimization for initial guess in next opt
res = np.log(res)
for i in [1, 2, 3]:
res[:, i] = dual_tree_complex_wavelet_filter(res[:, i], lvls=2) # smooth

res = np.exp(res)
#print(f'smoothed res {n-1}')
#print(res)

for i in np.arange(0, np.shape(T)[1]): # keep T constant and fit log_k_0, log_k_inf, log_Fcent
# set fit function, might be a better way, but this works for now
if idx_fit == [0, 1, 2]:
fit_fcn = self.ln_Troe
M_fit = M[:,i]
ln_k_fit = ln_k[:,i]

elif idx_fit == [0, 2]:
j = np.concatenate([[0], np.arange(2, len(M[:,i]))])
M_fit = M[j,i]
ln_k_fit = ln_k[j,i]

fit_fcn = lambda M, ln_k_0, Fcent: self.ln_Troe(M, [ln_k_0, ln_k[1,i], Fcent])

elif idx_fit == [1, 2]:
j = np.concatenate([[1], np.arange(2, len(M[:,i]))])
M_fit = M[j,i]
ln_k_fit = ln_k[j,i]

fit_fcn = lambda M, ln_k_inf, Fcent: self.ln_Troe(M, [ln_k[0,i], ln_k_inf, Fcent])

elif idx_fit == [2]:
j = np.arange(2, len(M[:,i]))
M_fit = M[j,i]
ln_k_fit = ln_k[j,i]

fit_fcn = lambda M, Fcent: self.ln_Troe(M, [ln_k[0,i], ln_k[1,i], Fcent])

# set p0 and p_bnds for optimization
if len(res) == 0:
p0 = np.array([20, 14, -0.5])[idx_fit] # ln(k_0), ln(k_inf), ln(Fcent)
#p0 = np.array([20, 14, 0.6])[idx_fit] # ln(k_0), ln(k_inf), Fcent
p_bnds = np.copy(falloff_bnds)[:, idx_fit]

self.opt = opt = nlopt.opt(nlopt.LN_SBPLX, len(idx_fit))
else:
if n == 0:
p0 = np.log(res[i-1][1:])
else:
p0 = np.log(res[i][1:])

p_bnds = np.copy(falloff_bnds)
if i > 0:
p_bnds[0][0:2] = np.log(res[i-1][1:])[0:2] # change lower rate bnds to be prior rate

p0 = p0[idx_fit]
p_bnds = p_bnds[:, idx_fit] # ln(k_0), ln(k_inf), ln(Fcent)

if len(p_bnds) > 0: # remove guesses outside of bounds
p0 = np.clip(p0, p_bnds[0], p_bnds[1])

if self.scipy_curvefit: # not setup after global/local change
x_fit, _ = curve_fit(fit_fcn, M_fit, ln_k_fit, p0=x0, method='trf', bounds=p_bnds, # dogbox
jac='2-point', x_scale='jac', max_nfev=len(x0)*1000, loss=loss)

opt.set_min_objective(self.objective)
opt.set_maxeval(5000)
else:
s = np.ones_like(p0)
self.obj_vars = {'fcn': fit_fcn, 'x': M_fit, 'y': ln_k_fit, 'coefs_0': p0, 's': s}
s = self.obj_vars['s'] = self.calc_s(np.zeros_like(p0))

opt.set_xtol_rel(xtol_rel)
opt.set_ftol_rel(ftol_rel)
self.opt = opt = nlopt.opt(opt_options['algorithm'], len(idx_fit))

opt.set_lower_bounds(p_bnds[0])
opt.set_upper_bounds(p_bnds[1])
opt.set_min_objective(self.objective)
opt.set_maxeval(opt_options['max_eval'])
opt.set_xtol_rel(opt_options['xtol_rel'])
opt.set_ftol_rel(opt_options['ftol_rel'])

opt.set_initial_step(initial_step)
#opt.set_population(int(np.rint(10*(len(idx)+1)*10)))
opt.set_lower_bounds((p_bnds[0]-p0)/s)
opt.set_upper_bounds((p_bnds[1]-p0)/s)

x_fit = opt.optimize(p0) # optimize!
initial_step = opt_options['initial_step']
opt.set_initial_step(initial_step)
#opt.set_population(int(np.rint(10*(len(idx)+1)*10)))

x_fit = opt.optimize(np.zeros_like(p0)) # optimize!
x_fit = x_fit*s + p0

if idx_fit == [0, 1, 2]:
res.append([T[0,i], *np.exp(x_fit)])
#print(f'{opt.last_optimum_value()} in {opt.get_numevals()}')

elif idx_fit == [0, 2]:
res.append([T[0,i], *np.exp([x_fit[0], ln_k[1,i], x_fit[1]])])
if idx_fit == [0, 1, 2]:
res_vector = [T[0,i], *np.exp(x_fit)]

elif idx_fit == [1, 2]:
res.append([T[0,i], *np.exp([ln_k[0,i], x_fit[0], x_fit[1]])])
elif idx_fit == [0, 2]:
res_vector = [T[0,i], *np.exp([x_fit[0], ln_k[1,i], x_fit[1]])]

elif idx_fit == [2]:
res.append([T[0,i], *np.exp([ln_k[0,i], ln_k[1,i], x_fit[0]])])
elif idx_fit == [1, 2]:
res_vector = [T[0,i], *np.exp([ln_k[0,i], x_fit[0], x_fit[1]])]

#cmp = np.array([T[idx], M[idx], ln_k[idx], fit_func(M[idx], *x_fit)]).T
#for entry in cmp:
# print(*entry)
#print('')
elif idx_fit == [2]:
res_vector = [T[0,i], *np.exp([ln_k[0,i], ln_k[1,i], x_fit[0]])]

if n == 0:
res.append(res_vector)
else:
res[i] = res_vector

#cmp = np.array([T[idx], M[idx], ln_k[idx], fit_func(M[idx], *x_fit)]).T
#for entry in cmp:
# print(*entry)
#print('')

#print(f'res {n}')
#print(np.array(res))

res = np.array(res)

print(res)

# Fit HPL and LPL Arrhenius parameters
for idx, arrhenius_type in enumerate(['low_rate', 'high_rate']):
if idx in idx_fit:
Expand All @@ -687,6 +728,8 @@ def ln_Troe(self, M, *x):
if len(x) == 1:
x = x[0]

#Fcent = x[2]
#k_0, k_inf = np.exp(x[:2])
[k_0, k_inf, Fcent] = np.exp(x)
with np.errstate(all='raise'):
try:
Expand All @@ -695,21 +738,23 @@ def ln_Troe(self, M, *x):
except:
return np.ones_like(M)*max_pos_system_value

log10_Fcent = np.log10(Fcent)
C = -0.4 - 0.67*log10_Fcent
N = 0.75 - 1.27*log10_Fcent
log_Fcent = np.log10(Fcent)
C = -0.4 - 0.67*log_Fcent
N = 0.75 - 1.27*log_Fcent
f1 = (log_P_r + C)/(N - 0.14*(log_P_r + C))

ln_F = np.log(Fcent)/(1 + f1**2)

ln_k_calc = np.log(k_inf*P_r/(1 + P_r)) + ln_F

return ln_k_calc

def objective(self, coefs, grad=np.array([]), obj_type='obj_sum'):
def objective(self, coefs_in, grad=np.array([]), obj_type='obj_sum'):
fit_fcn, x, y = self.obj_vars['fcn'], self.obj_vars['x'], self.obj_vars['y']
coefs_0, s = self.obj_vars['coefs_0'], self.obj_vars['s']
loss_scale, loss_alpha = self.loss_fcn_par

coefs = coefs_in*s + coefs_0

resid = fit_fcn(x, coefs) - y
if obj_type == 'obj_sum':
obj_val = generalized_loss_fcn(resid, a=loss_alpha, c=loss_scale).sum()
Expand All @@ -725,6 +770,22 @@ def objective(self, coefs, grad=np.array([]), obj_type='obj_sum'):

return obj_val

def calc_s(self, x, grad=[]):
if len(grad) == 0:
grad = approx_fprime(x, self.objective, 1E-10)

y = np.abs(grad)
if (y < min_pos_system_value).all():
y = np.ones_like(y)*1E-14
else:
y[y < min_pos_system_value] = 10**(OoM(np.min(y[y>=min_pos_system_value])) - 1) # TODO: MAKE THIS BETTER running into problem when s is zero, this is a janky workaround

s = 1/y
#s = s/np.min(s)
s = s/np.max(s)

return s


def fit_generic(rates, T, P, X, rxnIdx, coefKeys, coefNames, is_falloff_limit, mech, bnds, mpPool=None):
rxn = mech.gas.reaction(rxnIdx)
Expand Down
Loading

0 comments on commit 44d7ddb

Please sign in to comment.