I'm using ceres-solver to optimize a set of parameters (9 total) for my Monte-Carlo (MC) simulation. Basically, the MC code return an armadillo double matrix of the type results = { {1,2,3,...,19} }
for every set of parameters. I have a set of corresponding data. How do I optimize the parameters using ceres-solver?
Here's the code so far:
using ceres::AutoDiffCostFunction;
using ceres::CostFunction;
using ceres::CauchyLoss;
using ceres::Problem;
using ceres::Solver;
using ceres::Solve;
struct SimulationResidual {
SimulationResidual(): y_{346.301,346.312,346.432,346.394,346.471,346.605,346.797,346.948,347.121,347.384,347.626,348.08,348.561,349.333,350.404,351.761,352.975,354.17,354.809} {};
template <typename T> bool operator()(const T* const T_params,
T* residual) const {
Tm = T_params[0];
g31= T_params[1];
g32= T_params[2];
gg31= T_params[3];
gg32= T_params[4];
bn = T_params[5];
bu = T_params[6];
mn = T_params[7];
mu = T_params[8];
mat Tjumpave = Tjump_ave(); // MC simulation
residual[0] = Tjumpave(0,0) - T(y_[0]);
residual[1] = Tjumpave(0,1) - T(y_[1]);
residual[2] = Tjumpave(0,2) - T(y_[2]);
residual[3] = Tjumpave(0,3) - T(y_[3]);
residual[4] = Tjumpave(0,4) - T(y_[4]);
residual[5] = Tjumpave(0,5) - T(y_[5]);
residual[6] = Tjumpave(0,6) - T(y_[6]);
residual[7] = Tjumpave(0,7) - T(y_[7]);
residual[8] = Tjumpave(0,8) - T(y_[8]);
residual[9] = Tjumpave(0,9) - T(y_[9]);
residual[10] = Tjumpave(0,10) - T(y_[10]);
residual[11] = Tjumpave(0,11) - T(y_[11]);
residual[12] = Tjumpave(0,12) - T(y_[12]);
residual[13] = Tjumpave(0,13) - T(y_[13]);
residual[14] = Tjumpave(0,14) - T(y_[14]);
residual[15] = Tjumpave(0,15) - T(y_[15]);
residual[16] = Tjumpave(0,16) - T(y_[16]);
residual[17] = Tjumpave(0,17) - T(y_[17]);
residual[18] = Tjumpave(0,18) - T(y_[18]);
return true;
}
private:
const double y_[19];
};
int main(int argc, char** argv) {
wall_clock timer;
timer.tic();
google::InitGoogleLogging(argv[0]);
double T_params[] = { Tm,g31,g32,gg31,gg32,bn,bu,mn,mu }; // initial guess
Problem problem;
CostFunction* cost_function =
new AutoDiffCostFunction<SimulationResidual, 19, 1, 1>
( new SimulationResidual() );
problem.AddResidualBlock(cost_function, new CauchyLoss(0.5), &T_params);
}
When compiled in Xcode with -lceres and -lglog (and I had eigen3 installed and everything. Examples of the package ran fine without errors), I have non-matching member function for call to 'AddResidualBlock'
as one of the errors.
Please let me know if I need to provide more information to help you help me.
Thanks.
Edit The error is on the problem.AddResidualBlock
line.
I know this question is almost 5 years old, but you gotta pass
T_params
, not&T_params
, you are welcome :)