smooth_feedback
Control and estimation on Lie groups
Loading...
Searching...
No Matches
ipopt.hpp
Go to the documentation of this file.
1// Copyright (C) 2022 Petter Nilsson. MIT License.
2
3#pragma once
4
10#define HAVE_CSTDDEF
11#include <IpIpoptApplication.hpp>
12#include <IpIpoptData.hpp>
13#include <IpTNLP.hpp>
14#undef HAVE_CSTDDEF
15
17
18namespace smooth::feedback {
19
25template<NLP Problem>
26class IpoptNLP : public Ipopt::TNLP
27{
28public:
32 inline IpoptNLP(Problem && nlp, bool use_hessian = false) : nlp_(std::move(nlp)), use_hessian_(use_hessian) {}
33
37 inline IpoptNLP(const Problem & nlp, bool use_hessian = false) : nlp_(nlp), use_hessian_(use_hessian) {}
38
42 inline NLPSolution & sol() { return sol_; }
43
47 inline bool get_nlp_info(
48 Ipopt::Index & n,
49 Ipopt::Index & m,
50 Ipopt::Index & nnz_jac_g,
51 Ipopt::Index & nnz_h_lag,
52 IndexStyleEnum & index_style) override
53 {
54 n = nlp_.n();
55 m = nlp_.m();
56
57 const auto J = nlp_.dg_dx(Eigen::VectorXd::Zero(n));
58 nnz_jac_g = J.nonZeros();
59
60 nnz_h_lag = 0; // default
61 if constexpr (HessianNLP<Problem>) {
62 if (use_hessian_) {
63 H_ = nlp_.d2f_dx2(Eigen::VectorXd::Zero(n));
64 H_ += nlp_.d2g_dx2(Eigen::VectorXd::Zero(n), Eigen::VectorXd::Zero(m));
65 H_.makeCompressed();
66 nnz_h_lag = H_.nonZeros();
67 }
68 } else {
69 assert(use_hessian_ == false);
70 }
71
72 index_style = TNLP::C_STYLE; // zero-based
73
74 return true;
75 }
76
80 inline bool get_bounds_info(
81 Ipopt::Index n, Ipopt::Number * x_l, Ipopt::Number * x_u, Ipopt::Index m, Ipopt::Number * g_l, Ipopt::Number * g_u)
82 override
83 {
84 Eigen::Map<Eigen::VectorXd>(x_l, n) = nlp_.xl().cwiseMax(Eigen::VectorXd::Constant(n, -2e19));
85 Eigen::Map<Eigen::VectorXd>(x_u, n) = nlp_.xu().cwiseMin(Eigen::VectorXd::Constant(n, 2e19));
86 Eigen::Map<Eigen::VectorXd>(g_l, m) = nlp_.gl().cwiseMax(Eigen::VectorXd::Constant(m, -2e19));
87 Eigen::Map<Eigen::VectorXd>(g_u, m) = nlp_.gu().cwiseMin(Eigen::VectorXd::Constant(m, 2e19));
88
89 return true;
90 }
91
95 inline bool get_starting_point(
96 Ipopt::Index n,
97 bool init_x,
98 Ipopt::Number * x,
99 bool init_z,
100 Ipopt::Number * z_L,
101 Ipopt::Number * z_U,
102 Ipopt::Index m,
103 bool init_lambda,
104 Ipopt::Number * lambda) override
105 {
106 if (init_x) { Eigen::Map<Eigen::VectorXd>(x, n) = sol_.x; }
107
108 if (init_z) {
109 Eigen::Map<Eigen::VectorXd>(z_L, n) = sol_.zl;
110 Eigen::Map<Eigen::VectorXd>(z_U, n) = sol_.zu;
111 }
112
113 if (init_lambda) { Eigen::Map<Eigen::VectorXd>(lambda, m) = sol_.lambda; }
114
115 return true;
116 }
117
121 inline bool
122 eval_f(Ipopt::Index n, const Ipopt::Number * x, [[maybe_unused]] bool new_x, Ipopt::Number & obj_value) override
123 {
124 obj_value = nlp_.f(Eigen::Map<const Eigen::VectorXd>(x, n));
125 return true;
126 }
127
131 inline bool
132 eval_grad_f(Ipopt::Index n, const Ipopt::Number * x, [[maybe_unused]] bool new_x, Ipopt::Number * grad_f) override
133 {
134 const auto & df_dx = nlp_.df_dx(Eigen::Map<const Eigen::VectorXd>(x, n));
135 for (auto i = 0; i < n; ++i) { grad_f[i] = df_dx.coeff(0, i); }
136 return true;
137 }
138
142 inline bool eval_g(
143 Ipopt::Index n, const Ipopt::Number * x, [[maybe_unused]] bool new_x, Ipopt::Index m, Ipopt::Number * g) override
144 {
145 Eigen::Map<Eigen::VectorXd>(g, m) = nlp_.g(Eigen::Map<const Eigen::VectorXd>(x, n));
146 return true;
147 }
148
152 inline bool eval_jac_g(
153 Ipopt::Index n,
154 const Ipopt::Number * x,
155 [[maybe_unused]] bool new_x,
156 [[maybe_unused]] Ipopt::Index m,
157 [[maybe_unused]] Ipopt::Index nele_jac,
158 Ipopt::Index * iRow,
159 Ipopt::Index * jCol,
160 Ipopt::Number * values) override
161 {
162 if (values == NULL) {
163 const auto & J = nlp_.dg_dx(Eigen::VectorXd::Zero(n));
164 assert(nele_jac == J.nonZeros());
165
166 for (auto cntr = 0u, od = 0u; od < J.outerSize(); ++od) {
167 for (Eigen::InnerIterator it(J, od); it; ++it) {
168 iRow[cntr] = it.row();
169 jCol[cntr++] = it.col();
170 }
171 }
172 } else {
173 const auto & J = nlp_.dg_dx(Eigen::Map<const Eigen::VectorXd>(x, n));
174 assert(nele_jac == J.nonZeros());
175
176 for (auto cntr = 0u, od = 0u; od < J.outerSize(); ++od) {
177 for (Eigen::InnerIterator it(J, od); it; ++it) { values[cntr++] = it.value(); }
178 }
179 }
180 return true;
181 }
182
186 inline bool eval_h(
187 Ipopt::Index n,
188 const Ipopt::Number * x,
189 [[maybe_unused]] bool new_x,
190 Ipopt::Number sigma,
191 Ipopt::Index m,
192 const Ipopt::Number * lambda,
193 [[maybe_unused]] bool new_lambda,
194 [[maybe_unused]] Ipopt::Index nele_hess,
195 Ipopt::Index * iRow,
196 Ipopt::Index * jCol,
197 Ipopt::Number * values) override
198 {
199 assert(use_hessian_);
200 assert(HessianNLP<Problem>);
201 assert(H_.isCompressed());
202 assert(H_.nonZeros() == nele_hess);
203
204 H_.coeffs().setZero();
205
206 if (values == NULL) {
207 for (auto cntr = 0u, od = 0u; od < H_.outerSize(); ++od) {
208 for (Eigen::InnerIterator it(H_, od); it; ++it) {
209 // transpose: H upper triangular but ipopt expects lower-triangular
210 iRow[cntr] = it.col();
211 jCol[cntr++] = it.row();
212 }
213 }
214 } else {
215 Eigen::Map<const Eigen::VectorXd> xvar(x, n);
216 Eigen::Map<const Eigen::VectorXd> lvar(lambda, m);
217
218 H_ += nlp_.d2f_dx2(xvar);
219 H_ *= sigma;
220 H_ += nlp_.d2g_dx2(xvar, lvar);
221
222 for (auto cntr = 0u, od = 0u; od < H_.outerSize(); ++od) {
223 for (Eigen::InnerIterator it(H_, od); it; ++it) { values[cntr++] = it.value(); }
224 }
225 }
226
227 return true;
228 }
229
233 inline void finalize_solution(
234 Ipopt::SolverReturn status,
235 Ipopt::Index n,
236 const Ipopt::Number * x,
237 const Ipopt::Number * z_L,
238 const Ipopt::Number * z_U,
239 Ipopt::Index m,
240 [[maybe_unused]] const Ipopt::Number * g,
241 const Ipopt::Number * lambda,
242 Ipopt::Number obj_value,
243 const Ipopt::IpoptData * ip_data,
244 [[maybe_unused]] Ipopt::IpoptCalculatedQuantities * ip_cq) override
245 {
246 switch (status) {
247 case Ipopt::SolverReturn::SUCCESS:
248 sol_.status = NLPSolution::Status::Optimal;
249 break;
250 case Ipopt::SolverReturn::STOP_AT_ACCEPTABLE_POINT:
251 sol_.status = NLPSolution::Status::Optimal;
252 break;
253 case Ipopt::SolverReturn::MAXITER_EXCEEDED:
254 sol_.status = NLPSolution::Status::MaxIterations;
255 break;
256 case Ipopt::SolverReturn::CPUTIME_EXCEEDED:
257 sol_.status = NLPSolution::Status::MaxTime;
258 break;
259 case Ipopt::SolverReturn::LOCAL_INFEASIBILITY:
260 sol_.status = NLPSolution::Status::PrimalInfeasible;
261 break;
262 case Ipopt::SolverReturn::DIVERGING_ITERATES:
263 sol_.status = NLPSolution::Status::DualInfeasible;
264 break;
265 default:
266 sol_.status = NLPSolution::Status::Unknown;
267 break;
268 }
269
270 sol_.iter = ip_data->iter_count();
271
272 sol_.x = Eigen::Map<const Eigen::VectorXd>(x, n);
273 sol_.zl = Eigen::Map<const Eigen::VectorXd>(z_L, n);
274 sol_.zu = Eigen::Map<const Eigen::VectorXd>(z_U, n);
275 sol_.lambda = Eigen::Map<const Eigen::VectorXd>(lambda, m);
276
277 sol_.objective = obj_value;
278 }
279
280private:
281 Problem nlp_;
282 bool use_hessian_;
283 NLPSolution sol_;
284 Eigen::SparseMatrix<double> H_;
285};
286
299 NLP auto && nlp,
300 std::optional<NLPSolution> warmstart = {},
301 std::vector<std::pair<std::string, int>> opts_integer = {},
302 std::vector<std::pair<std::string, std::string>> opts_string = {},
303 std::vector<std::pair<std::string, double>> opts_numeric = {})
304{
305 using nlp_t = std::decay_t<decltype(nlp)>;
306 bool use_hessian = false;
307 const auto n = nlp.n();
308
309 auto it = std::find_if(
310 opts_string.begin(), opts_string.end(), [](const auto & p) { return p.first == "hessian_approximation"; });
311 if (it != opts_string.end() && it->second == "exact") { use_hessian = true; }
312
313 Ipopt::SmartPtr<IpoptNLP<nlp_t>> ipopt_nlp = new IpoptNLP<nlp_t>(std::forward<decltype(nlp)>(nlp), use_hessian);
314 Ipopt::SmartPtr<Ipopt::IpoptApplication> app = new Ipopt::IpoptApplication();
315
316 // silence welcome message
317 app->Options()->SetStringValue("sb", "yes");
318
319 if (warmstart.has_value()) {
320 // initial guess is given
321 app->Options()->SetStringValue("warm_start_init_point", "yes");
322 ipopt_nlp->sol() = warmstart.value();
323 } else {
324 // initial guess not given, set to zero
325 app->Options()->SetStringValue("warm_start_init_point", "no");
326 ipopt_nlp->sol().x.setZero(n);
327 }
328
329 // override with user-provided options
330 for (auto [opt, val] : opts_integer) { app->Options()->SetIntegerValue(opt, val); }
331 for (auto [opt, val] : opts_string) { app->Options()->SetStringValue(opt, val); }
332 for (auto [opt, val] : opts_numeric) { app->Options()->SetNumericValue(opt, val); }
333
334 app->Initialize();
335 app->OptimizeTNLP(ipopt_nlp);
336
337 return ipopt_nlp->sol();
338}
339
340} // namespace smooth::feedback
Ipopt interface to solve NLPs.
Definition: ipopt.hpp:27
IpoptNLP(Problem &&nlp, bool use_hessian=false)
Ipopt wrapper for NLP (rvlaue version).
Definition: ipopt.hpp:32
bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, Ipopt::Index &nnz_jac_g, Ipopt::Index &nnz_h_lag, IndexStyleEnum &index_style) override
IPOPT info overload.
Definition: ipopt.hpp:47
IpoptNLP(const Problem &nlp, bool use_hessian=false)
Ipopt wrapper for NLP (lvalue version).
Definition: ipopt.hpp:37
bool get_starting_point(Ipopt::Index n, bool init_x, Ipopt::Number *x, bool init_z, Ipopt::Number *z_L, Ipopt::Number *z_U, Ipopt::Index m, bool init_lambda, Ipopt::Number *lambda) override
IPOPT initial guess overload.
Definition: ipopt.hpp:95
bool eval_grad_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number *grad_f) override
IPOPT method to define gradient of objective.
Definition: ipopt.hpp:132
bool eval_jac_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values) override
IPOPT method to define jacobian of constraint function.
Definition: ipopt.hpp:152
bool eval_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number &obj_value) override
IPOPT method to define objective.
Definition: ipopt.hpp:122
bool get_bounds_info(Ipopt::Index n, Ipopt::Number *x_l, Ipopt::Number *x_u, Ipopt::Index m, Ipopt::Number *g_l, Ipopt::Number *g_u) override
IPOPT bounds overload.
Definition: ipopt.hpp:80
bool eval_h(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number sigma, Ipopt::Index m, const Ipopt::Number *lambda, bool new_lambda, Ipopt::Index nele_hess, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values) override
IPOPT method to define problem Hessian.
Definition: ipopt.hpp:186
bool eval_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Number *g) override
IPOPT method to define constraint function.
Definition: ipopt.hpp:142
NLPSolution & sol()
Access solution.
Definition: ipopt.hpp:42
void finalize_solution(Ipopt::SolverReturn status, Ipopt::Index n, const Ipopt::Number *x, const Ipopt::Number *z_L, const Ipopt::Number *z_U, Ipopt::Index m, const Ipopt::Number *g, const Ipopt::Number *lambda, Ipopt::Number obj_value, const Ipopt::IpoptData *ip_data, Ipopt::IpoptCalculatedQuantities *ip_cq) override
IPOPT method called after optimization done.
Definition: ipopt.hpp:233
Nonlinear Programming Problem with Hessian information.
Definition: nlp.hpp:58
Nonlinear Programming Problem.
Definition: nlp.hpp:31
NLPSolution solve_nlp_ipopt(NLP auto &&nlp, std::optional< NLPSolution > warmstart={}, std::vector< std::pair< std::string, int > > opts_integer={}, std::vector< std::pair< std::string, std::string > > opts_string={}, std::vector< std::pair< std::string, double > > opts_numeric={})
Solve an NLP with the Ipopt solver.
Definition: ipopt.hpp:298
Nonlinear program definition.
Solution to a Nonlinear Programming Problem.
Definition: nlp.hpp:70
Eigen::VectorXd zl
Inequality multipliers.
Definition: nlp.hpp:92
double objective
Objective.
Definition: nlp.hpp:99
Eigen::VectorXd lambda
Constraint multipliers.
Definition: nlp.hpp:96
Eigen::VectorXd x
Variable values.
Definition: nlp.hpp:88
Status status
Solver status.
Definition: nlp.hpp:82
std::size_t iter
Number of iterations.
Definition: nlp.hpp:85
Eigen::VectorXd zu
Inequality multipliers.
Definition: nlp.hpp:92