After constructive comments from @M Newville, I have altered my original question in order to produce a minimal example.
I have performed experiments under different conditions. I have a lot more experimental data at Condition 1 than Condition 2:
import numpy as np
from lmfit import minimize, Parameters, fit_report
# CONDITION 1.
x_data_1 = np.array([4.68, 4.70, 4.71, 4.72, 4.74])
y_data_1 = np.array([7.01, 7.03, 7.04, 7.04, 7.04])
z_data_1 = np.array([5.67, 5.67, 5.68, 5.69, 5.72])
n1_data_1 = np.array([0.374, 0.371, 0.369, 0.376, 0.375])
n2_data_1 = np.array([0.284, 0.284, 0.282, 0.282, 0.283])
n3_data_1 = np.array([0.284, 0.284, 0.282, 0.282, 0.283])
# CONDITION 2.
x_data_2 = np.array([10.72, 10.68, 10.77, 10.76, 10.79])
n1_data_2 = np.array([0.207, 0.205, 0.203, 0.224, 0.205])
However, I need Y>Z>X
to be true for both conditions. X
, Y
, Z
calculated by Function_X
, Function_Y
, and Function_Z
depend on parameters A
, B
, C
, D
, and E
. As do N1
, N2
, and N3
described by Function_N1
, Function_N2
, and Function_N3
.
# FUNCTIONS.
def Function_X(A, B, C, E):
return C-B**2/(A-E)
def Function_Y(A, B, C, E):
return (4*E*((A-E)*C-B**2))/(A*C-B**2)
def Function_Z(A, B, C, D, E, X, Y):
return 4*(1/X+1/Y+1/D-B/((A-E)*C-B**2))**(-1)
def Function_N1(A, B, E):
return B/(2*(A-E))
def Function_N2(A, B, C, E):
return 2*E*B/(A*C-B**2)
def Function_N3(A, B, C, E):
return ((A-2*E)*C-B**2)/(A*C-B**2)
On the one hand, FitFunction1
is able to achieve the desired Y>Z>X
(y_computed_1>z_computed_1>x_computed_1
) without using any mathematical constraints due to the larger amount of data.
# FIT FUNCTION (CONDITION 1).
def FitFunction1(params1, x_data_1=None, y_data_1=None, z_data_1=None, n1_data_1=None, n2_data_1=None, n3_data_1=None):
# MODEL.
x_mod = Function_X(params1['A'], params1['B'], params1['C'], params1['E'])
y_mod = Function_Y(params1['A'], params1['B'], params1['C'], params1['E'])
z_mod = Function_Z(params1['A'], params1['B'], params1['C'], params1['D'], params1['E'], x_mod, y_mod)
n1_mod = Function_N1(params1['A'], params1['B'], params1['E'])
n2_mod = Function_N2(params1['A'], params1['B'], params1['C'], params1['E'])
n3_mod = Function_N3(params1['A'], params1['B'], params1['C'], params1['E'])
# RESIDUALS.
x_res = x_data_1 - x_mod
y_res = y_data_1 - y_mod
z_res = z_data_1 - z_mod
n1_res = n1_data_1 - n1_mod
n2_res = n2_data_1 - n2_mod
n3_res = n3_data_1 - n3_mod
return np.concatenate((x_res.ravel(), y_res.ravel(), z_res.ravel(), n1_res.ravel(), n2_res.ravel(), n3_res.ravel()))
params1 = Parameters()
params1.add('fc', value= 1000, min=10, max=100000)
params1.add('alpha', value=0.4, min=0.2, max=0.8)
params1.add('A', value=20, min=10, max=30)
params1.add('B', value=10, min=1, max=15)
params1.add('C', value=15, min=10, max=20)
params1.add('D', value=2.5, min=1, max=5)
params1.add('E', value=5, min=2.5, max=7.5)
res1 = minimize(FitFunction1, params1, kws={'x_data_1': x_data_1, 'y_data_1': y_data_1, 'z_data_1': z_data_1, 'n1_data_1': n1_data_1, 'n2_data_1': n2_data_1, 'n3_data_1': n3_data_1})
print(fit_report(res1))
for keys in res1.params:
exec(f'jf_{keys}_1 = res1.params[keys].value')
x_computed_1 = Function_X(jf_A_1, jf_B_1, jf_C_1, jf_E_1)
y_computed_1 = Function_Y(jf_A_1, jf_B_1, jf_C_1, jf_E_1)
z_computed_1 = Function_Z(jf_A_1, jf_B_1, jf_C_1, jf_D_1, jf_E_1, Function_X(jf_A_1, jf_B_1, jf_C_1, jf_E_1), Function_Y(jf_A_1, jf_B_1, jf_C_1, jf_E_1))
On the other hand, FitFunction2
is unable to produce the same (Y>Z>X
) left unconstrained (without mathematical constraints) due to the smaller amount of data.
# FIT FUNCTION (CONDITION 2).
def FitFunction2(params2, x_data_2=None, n1_data_2=None):
# MODEL.
x_mod = Function_X(params2['A'], params2['B'], params2['C'], params2['E'])
n1_mod = Function_N1(params2['A'], params2['B'], params2['E'])
# RESIDUALS.
x_res = x_data_2 - x_mod
n1_res = n1_data_2 - n1_mod
return np.concatenate((x_res.ravel(), n1_res.ravel()))
params2 = Parameters()
params2.add('fc', value= 1000, min=10, max=100000)
params2.add('alpha', value=0.4, min=0.2, max=0.8)
params2.add('A', value=20, min=10, max=30)
params2.add('B', value=10, min=1, max=15)
params2.add('C', value=15, min=10, max=20)
params2.add('D', value=2.5, min=1, max=5)
params2.add('E', value=5, min=2.5, max=7.5)
params2.add('X', value=np.mean(x_data_2), min=0, expr='C-B**2/(A-E)')
params2.add('Z_minus_X', value=1, vary=True, min=0)
params2.add('Y_minus_Z', value=1, vary=True, min=0)
params2.add('Z', value=10, expr='X + Z_minus_X', min=0)#, expr='(4*E*((A-E)*C-B**2))/(A*C-B**2)')
params2.add('Y', value=10, expr='Z + Y_minus_Z', min=0)#, expr='4*(1/X+1/Y+1/D-B/((A-E)*C-B**2))**(-1)')
res2 = minimize(FitFunction2, params2, kws={'x_data_2': x_data_2, 'n1_data_2': n1_data_2})
print(fit_report(res2))
Note that parameter D
is not present in FitFunction2
. I tried to introduce additional five constraints to enforce Y>Z>X
based on another question but it does not work. I know that the result of FitFunction2
does not depend on parameters X
, Y
, and Z
in the current configuration. I wonder however if there is a way to make the result of FitFunction2
somehow depend on them with another configuration.
- Is it possible to arrange parameters
A
,B
,C
,D
,E
inFitFunction2
so thatY>Z>X
using mathematical constraints (or any other method)?