smooth_feedback
Control and estimation on Lie groups
Loading...
Searching...
No Matches
mesh_function.hpp
Go to the documentation of this file.
1// Copyright (C) 2022 Petter Nilsson. MIT License.
2
3#pragma once
4
10#include <Eigen/Dense>
11#include <Eigen/Sparse>
12#include <smooth/diff.hpp>
13
14#include "mesh.hpp"
16
17namespace smooth::feedback {
18
19template<uint8_t Deriv>
20struct MeshValue;
21
36template<>
37struct MeshValue<0>
38{
40 Eigen::VectorXd F;
41
43 bool allocated{false};
44};
45
51template<>
52struct MeshValue<1> : public MeshValue<0>
53{
55 Eigen::SparseMatrix<double> dF;
56};
57
63template<>
64struct MeshValue<2> : public MeshValue<1>
65{
67 Eigen::VectorXd lambda;
68
70 Eigen::SparseMatrix<double> d2F;
71};
72
79template<uint8_t Deriv>
81{
82 mv.F.setZero();
83 if constexpr (Deriv >= 1) { set_zero(mv.dF); }
84 if constexpr (Deriv >= 2) { set_zero(mv.d2F); }
85}
86
114template<uint8_t Deriv, diff::Type DT = diff::Type::Default>
115 requires(Deriv <= 2)
117 MeshValue<Deriv> & out,
118 const MeshType auto & m,
119 auto & f,
120 const double t0,
121 const double tf,
122 std::ranges::range auto && xs,
123 std::ranges::range auto && us,
124 bool scale = false)
125{
126 using utils::zip, std::views::iota;
127 using X = PlainObject<std::decay_t<std::ranges::range_value_t<decltype(xs)>>>;
128 using U = PlainObject<std::decay_t<std::ranges::range_value_t<decltype(us)>>>;
129
130 static constexpr auto nx = Dof<X>;
131 static constexpr auto nu = Dof<U>;
132 static constexpr auto nf = Dof<std::invoke_result_t<decltype(f), double, X, U>>;
133
134 static_assert(nx > -1, "State dimension must be static");
135 static_assert(nu > -1, "Input dimension must be static");
136 static_assert(nf > -1, "Output size must be static");
137
138 const auto N = m.N_colloc();
139
140 const Eigen::Index numOuts = nf * N;
141 const Eigen::Index numVars = 2 + nx * (N + 1) + nu * N;
142
143 // ALLOCATION
144
145 if (!out.allocated) {
146 out.F.resize(numOuts);
147
148 if constexpr (Deriv >= 1) {
149 Eigen::VectorXi pattern = Eigen::VectorXi::Zero(numVars);
150 pattern.segment(0, 1).setConstant(numOuts); // t0 is dense
151 pattern.segment(1, 1).setConstant(numOuts); // tf is dense
152 pattern.segment(2, nx * N).setConstant(nf); // block diagonal, last x not used
153 pattern.segment(2 + nx * (N + 1), nu * N).setConstant(nf); // block diagonal
154
155 out.dF.resize(numOuts, numVars);
156 out.dF.reserve(pattern);
157 }
158
159 if constexpr (Deriv >= 2u) {
160 Eigen::VectorXi pattern = Eigen::VectorXi::Zero(numVars);
161 pattern.segment(0, 1).setConstant(1); // t0
162 pattern.segment(1, 1).setConstant(2); // t0, tf
163 for (auto i = 0u; i < N; ++i) {
164 for (auto j = 0u; j < nx; ++j) {
165 pattern(2 + nx * i + j) = 2 + (j + 1); // t0, tf, x upper diag
166 }
167 }
168 for (auto i = 0u; i < N; ++i) {
169 for (auto j = 0u; j < nu; ++j) {
170 pattern(2 + nx * (N + 1) + nu * i + j) = 2 + nx + (j + 1); // t0, tf, x, u upper diag
171 }
172 }
173 out.d2F.resize(numVars, numVars);
174 out.d2F.reserve(pattern);
175 }
176
177 out.allocated = true;
178 }
179
180 set_zero(out);
181
182 for (const auto & [i, tau, w_quad, x, u] : zip(iota(0u, N), m.all_nodes(), m.all_weights(), xs, us)) {
183 const double ti = t0 + (tf - t0) * tau;
184 const X xi = x;
185 const U ui = u;
186
187 const double mtau = 1. - tau;
188 const double w = scale ? w_quad : 1.;
189
190 const auto fvals = diff::dr<Deriv, DT>(f, wrt(ti, xi, ui));
191
192 out.F.segment(i * nf, nf) = w * std::get<0>(fvals);
193
194 if constexpr (Deriv >= 1u) {
195 const auto & df = std::get<1>(fvals);
196 const auto row0 = nf * i;
197
198 block_add(out.dF, row0, 0, df.middleCols(0, 1), w * mtau);
199 block_add(out.dF, row0, 1, df.middleCols(0, 1), w * tau);
200 block_add(out.dF, row0, 2 + i * nx, df.middleCols(1, nx), w);
201 block_add(out.dF, row0, 2 + nx * (N + 1) + nu * i, df.middleCols(1 + nx, nu), w);
202
203 if constexpr (Deriv >= 2u) {
204 assert(out.lambda.size() == numOuts);
205
206 const auto & d2f = std::get<2>(fvals);
207
208 for (auto j = 0u; j < nf; ++j) {
209 const double wl = w * out.lambda(row0 + j);
210
211 // destination block locations
212 const auto t0_d = 0;
213 const auto tf_d = 1;
214 const auto x_d = 2 + i * nx;
215 const auto u_d = 2 + (N + 1) * nx + i * nu;
216
217 // source block locations
218 const auto b_s = (1 + nx + nu) * j;
219 const auto t_s = 0;
220 const auto x_s = 1;
221 const auto u_s = 1 + nx;
222
223 // clang-format off
224 // t0 row
225 block_add(out.d2F, t0_d, t0_d, d2f.block(t_s, b_s + t_s, 1, 1 ), wl * mtau * mtau, true);
226 block_add(out.d2F, t0_d, tf_d, d2f.block(t_s, b_s + t_s, 1, 1 ), wl * mtau * tau, true);
227 block_add(out.d2F, t0_d, x_d, d2f.block(t_s, b_s + x_s, 1, nx), wl * mtau, true);
228 block_add(out.d2F, t0_d, u_d, d2f.block(t_s, b_s + u_s, 1, nu), wl * mtau, true);
229
230 // tf row
231 block_add(out.d2F, tf_d, tf_d, d2f.block(t_s, b_s + t_s, 1, 1 ), wl * tau * tau, true);
232 block_add(out.d2F, tf_d, x_d, d2f.block(t_s, b_s + x_s, 1, nx), wl * tau, true);
233 block_add(out.d2F, tf_d, u_d, d2f.block(t_s, b_s + u_s, 1, nu), wl * tau, true);
234
235 // x row
236 block_add(out.d2F, x_d, x_d, d2f.block(x_s, b_s + x_s, nx, nx), wl, true);
237 block_add(out.d2F, x_d, u_d, d2f.block(x_s, b_s + u_s, nx, nu), wl, true);
238
239 // u row
240 block_add(out.d2F, u_d, u_d, d2f.block(u_s, b_s + u_s, nu, nu), wl, true);
241 // clang-format on
242 }
243 }
244 }
245 }
246}
247
273template<uint8_t Deriv, diff::Type DT = diff::Type::Default>
274 requires(Deriv <= 2)
276 MeshValue<Deriv> & out,
277 const MeshType auto & m,
278 auto & f,
279 const double t0,
280 const double tf,
281 std::ranges::range auto && xs,
282 std::ranges::range auto && us)
283{
284 using utils::zip, std::views::iota;
285 using X = PlainObject<std::decay_t<std::ranges::range_value_t<decltype(xs)>>>;
286 using U = PlainObject<std::decay_t<std::ranges::range_value_t<decltype(us)>>>;
287
288 static constexpr auto nx = Dof<X>;
289 static constexpr auto nu = Dof<U>;
290 static constexpr auto nf = Dof<std::invoke_result_t<decltype(f), double, X, U>>;
291
292 static_assert(nx > -1, "State dimension must be static");
293 static_assert(nu > -1, "Input dimension must be static");
294 static_assert(nf > -1, "Output size must be static");
295
296 const auto N = m.N_colloc();
297
298 const Eigen::Index numOuts = nf;
299 const Eigen::Index numVars = 2 + nx * (N + 1) + nu * N;
300
301 // ALLOCATION
302
303 if (!out.allocated) {
304 out.F.resize(numOuts);
305
306 if constexpr (Deriv >= 1) {
307 Eigen::VectorXi pattern = Eigen::VectorXi::Constant(numVars, numOuts); // dense
308 pattern.segment(2 + nx * N, nx).setZero();
309
310 out.dF.resize(numOuts, numVars);
311 out.dF.reserve(pattern);
312 }
313
314 if constexpr (Deriv >= 2) {
315 Eigen::VectorXi pattern = Eigen::VectorXi::Zero(numVars); // dense
316 pattern(0) = 1; // t0 dense
317 pattern(1) = 2; // tf dense
318 for (auto i = 0u; i < N; ++i) {
319 for (auto j = 0u; j < nx; ++j) {
320 pattern(2 + nx * i + j) = 2 + (j + 1); // t0, tf, x upper diag
321 }
322 }
323 for (auto i = 0u; i < N; ++i) {
324 for (auto j = 0u; j < nu; ++j) {
325 pattern(2 + nx * (N + 1) + nu * i + j) = 2 + nx + (j + 1); // t0, tf, x, u upper diag
326 }
327 }
328
329 out.d2F.resize(numVars, numVars);
330 out.d2F.reserve(pattern.replicate(numOuts, 1).reshaped());
331 }
332
333 out.allocated = true;
334 }
335
336 set_zero(out);
337
338 for (const auto & [i, tau, w, x, u] : zip(iota(0u, N), m.all_nodes(), m.all_weights(), xs, us)) {
339 const double ti = t0 + (tf - t0) * tau;
340 const X xi = x;
341 const U ui = u;
342
343 const double mtau = 1. - tau;
344
345 const auto fvals = diff::dr<Deriv, DT>(f, wrt(ti, xi, ui));
346
347 const auto & fval = std::get<0>(fvals);
348 out.F.noalias() += w * (tf - t0) * fval;
349
350 if constexpr (Deriv >= 1u) {
351 const auto & df = std::get<1>(fvals);
352 // t0
353 block_add(out.dF, 0, 0, df.middleCols(0, 1), w * (tf - t0) * mtau);
354 block_add(out.dF, 0, 0, fval, -w);
355 // tf
356 block_add(out.dF, 0, 1, df.middleCols(0, 1), w * (tf - t0) * tau);
357 block_add(out.dF, 0, 1, fval, w);
358 // x
359 block_add(out.dF, 0, 2 + i * nx, df.middleCols(1, nx), w * (tf - t0));
360 // u
361 block_add(out.dF, 0, 2 + nx * (N + 1) + nu * i, df.middleCols(1 + nx, nu), w * (tf - t0));
362
363 if constexpr (Deriv >= 2u) {
364 assert(out.lambda.size() == numOuts);
365
366 const auto & d2f = std::get<2>(fvals);
367
368 for (auto j = 0u; j < nf; ++j) {
369 const double wl = w * out.lambda(j);
370
371 // source block locations
372 const auto b_s = (1 + nx + nu) * j; // horizontal block
373 const auto t_s = 0; // t
374 const auto x_s = 1; // x
375 const auto u_s = 1 + nx; // u
376
377 // destination block locations
378 const auto t0_d = 0; // t0
379 const auto tf_d = 1; // tf
380 const auto x_d = 2 + nx * i; // x[i]
381 const auto u_d = 2 + nx * (N + 1) + nu * i; // u[i]
382
383 // clang-format off
384 // t0t0
385 block_add(out.d2F, t0_d, t0_d, d2f.block(t_s, b_s + t_s, 1, 1), wl * (tf - t0) * mtau * mtau, true);
386 block_add(out.d2F, t0_d, t0_d, df.block(j, t_s, 1, 1), -wl * 2 * mtau, true);
387 // t0tf
388 block_add(out.d2F, t0_d, tf_d, d2f.block(t_s, b_s + t_s, 1, 1), wl * (tf - t0) * mtau * tau, true);
389 block_add(out.d2F, t0_d, tf_d, df.block(j, t_s, 1, 1), wl * (1 - 2 * tau), true);
390 // t0x
391 block_add(out.d2F, t0_d, x_d, d2f.block(t_s, b_s + x_s, 1, nx), wl * (tf - t0) * mtau, true);
392 block_add(out.d2F, t0_d, x_d, df.block(j, x_s, 1, nx), -wl, true);
393 // t0u
394 block_add(out.d2F, t0_d, u_d, d2f.block(t_s, b_s + u_s, 1, nu), wl * (tf - t0) * mtau, true);
395 block_add(out.d2F, t0_d, u_d, df.block(j, u_s, 1, nu), -wl, true);
396
397 // tftf
398 block_add(out.d2F, tf_d, tf_d, d2f.block(t_s, b_s + t_s, 1, 1), wl * (tf - t0) * tau * tau, true);
399 block_add(out.d2F, tf_d, tf_d, df.block(j, t_s, 1, 1), wl * 2 * tau, true);
400 // tfx
401 block_add(out.d2F, tf_d, x_d, d2f.block(t_s, b_s + x_s, 1, nx), wl * (tf - t0) * tau, true);
402 block_add(out.d2F, tf_d, x_d, df.block(j, x_s, 1, nx), wl, true);
403 // tfu
404 block_add(out.d2F, tf_d, u_d, d2f.block(t_s, b_s + u_s, 1, nu), wl * (tf - t0) * tau, true);
405 block_add(out.d2F, tf_d, u_d, df.block(j, u_s, 1, nu), wl, true);
406
407 // xx
408 block_add(out.d2F, x_d, x_d, d2f.block(x_s, b_s + x_s, nx, nx), wl * (tf - t0), true);
409 // xu
410 block_add(out.d2F, x_d, u_d, d2f.block(x_s, b_s + u_s, nx, nu), wl * (tf - t0), true);
411
412 // uu
413 block_add(out.d2F, u_d, u_d, d2f.block(u_s, b_s + u_s, nu, nu), wl * (tf - t0), true);
414 // clang-format on
415 }
416 }
417 }
418 }
419}
420
450template<uint8_t Deriv, diff::Type DT = diff::Type::Default>
451 requires(Deriv <= 2)
453 MeshValue<Deriv> & out,
454 const MeshType auto & m,
455 auto & f,
456 const double t0,
457 const double tf,
458 std::ranges::range auto && xs,
459 std::ranges::range auto && us)
460{
461 using utils::zip, std::views::iota;
462 using X = PlainObject<std::decay_t<std::ranges::range_value_t<decltype(xs)>>>;
463 using U = PlainObject<std::decay_t<std::ranges::range_value_t<decltype(us)>>>;
464
465 static constexpr auto nx = Dof<X>;
466 static constexpr auto nu = Dof<U>;
467 static constexpr auto nf = Dof<std::invoke_result_t<decltype(f), double, X, U>>;
468
469 static_assert(nx > -1, "State dimension must be static");
470 static_assert(nu > -1, "Input dimension must be static");
471 static_assert(nf > -1, "Output size must be static");
472 static_assert(nx == nf, "Output dimension must be same as state dimension");
473
474 const auto N = m.N_colloc();
475 const Eigen::Index numOuts = nx * N;
476 const Eigen::Index numVars = 2 + nx * (N + 1) + nu * N;
477
478 // ALLOCATION
479
480 if (!out.allocated) {
481 out.F.resize(numOuts);
482
483 if constexpr (Deriv >= 1) {
484 Eigen::VectorXi pattern = Eigen::VectorXi::Zero(numVars);
485 pattern(0) = numOuts; // t0 dense
486 pattern(1) = numOuts; // tf dense
487
488 // x has blocks depending on mesh size
489 auto idx0 = 2u;
490 for (auto ival = 0u; ival < m.N_ivals(); ++ival) {
491 const std::size_t K = m.N_colloc_ival(ival);
492 pattern.segment(idx0, K * nx) += Eigen::VectorXi::Constant(K * nx, nx + K - 1);
493 pattern.segment(idx0 + K * nx, nx) += Eigen::VectorXi::Constant(nx, K);
494 idx0 += K * nx;
495 }
496
497 // u is block diagonal with small blocks
498 pattern.segment(2 + nx * (N + 1), nu * N).setConstant(nx);
499
500 out.dF.resize(numOuts, numVars);
501 out.dF.reserve(pattern);
502 }
503
504 if constexpr (Deriv >= 2) {
505 Eigen::VectorXi pattern = Eigen::VectorXi::Zero(numVars);
506 pattern(0) = 1;
507 pattern(1) = 2;
508 for (auto i = 0u; i < N; ++i) {
509 for (auto j = 0u; j < nx; ++j) {
510 pattern(2 + nx * i + j) = 2 + (j + 1); // t0, tf, x upper diag
511 }
512 }
513 for (auto i = 0u; i < N; ++i) {
514 for (auto j = 0u; j < nu; ++j) {
515 pattern(2 + nx * (N + 1) + nu * i + j) = 2 + nx + (j + 1); // t0, tf, x, u upper diag
516 }
517 }
518
519 out.d2F.resize(numVars, numVars);
520 out.d2F.reserve(pattern);
521 }
522
523 out.allocated = true;
524 }
525
526 set_zero(out);
527
528 // We build the constraint through two loops over xi:
529 // - the first loop adds wk * f(tk, xk, uk)
530 // - the second loop adds -wk * [x0 ... Xni] * dk
531
532 // ADD FIRST PART
533
534 for (const auto & [i, tau, w, x, u] : zip(iota(0u, N), m.all_nodes(), m.all_weights(), xs, us)) {
535 const double ti = t0 + (tf - t0) * tau;
536 const X xi = x;
537 const U ui = u;
538
539 const auto row0 = nx * i;
540 const double mtau = 1. - tau;
541
542 const auto fvals = diff::dr<Deriv, DT>(f, wrt(ti, xi, ui));
543 const auto fval = std::get<0>(fvals);
544
545 out.F.segment(row0, nx) += w * (tf - t0) * fval;
546
547 if constexpr (Deriv >= 1) {
548 const auto & df = std::get<1>(fvals);
549
550 // clang-format off
551 // dF/dt0 = -f + (tf - t0) * df/dti * (1-tau)
552 block_add(out.dF, row0, 0, fval, -w);
553 block_add(out.dF, row0, 0, df.col(0), w * (tf - t0) * mtau);
554 // dF/dtf = f + (tf - t0) * df/dti * tau
555 block_add(out.dF, row0, 1, fval, w);
556 block_add(out.dF, row0, 1, df.col(0), w * (tf - t0) * tau);
557 // dF/dx
558 block_add(out.dF, row0, 2 + nx * i, df.middleCols(1, nx), w * (tf - t0));
559 // dF/du
560 block_add(out.dF, row0, 2 + nx * (N + 1) + nu * i, df.middleCols(1 + nx, nu), w * (tf - t0));
561 // clang-format on
562
563 if constexpr (Deriv >= 2) {
564 assert(out.lambda.size() == numOuts);
565
566 const auto & d2f = std::get<2>(fvals);
567
568 for (auto j = 0u; j < nx; ++j) {
569 const double wl = w * out.lambda(row0 + j);
570
571 // destination block locations
572 const auto t0_d = 0;
573 const auto tf_d = 1;
574 const auto x_d = 2 + i * nx;
575 const auto u_d = 2 + (N + 1) * nx + i * nu;
576
577 // source block locations
578 const auto b_s = (1 + nx + nu) * j;
579 const auto t_s = 0;
580 const auto x_s = 1;
581 const auto u_s = 1 + nx;
582
583 // clang-format off
584 // t0t0
585 block_add(out.d2F, t0_d, t0_d, d2f.block(t_s, b_s + t_s, 1, 1 ), wl * (tf - t0) * mtau * mtau, true);
586 block_add(out.d2F, t0_d, t0_d, df.block(j, t_s, 1, 1 ), -wl * 2 * mtau, true);
587 // t0tf
588 block_add(out.d2F, t0_d, tf_d, d2f.block(t_s, b_s + t_s, 1, 1 ), wl * (tf - t0) * mtau * tau, true);
589 block_add(out.d2F, t0_d, tf_d, df.block(j, t_s, 1, 1 ), wl * (1. - 2 * tau), true);
590 // t0x
591 block_add(out.d2F, t0_d, x_d, d2f.block(t_s, b_s + x_s, 1, nx), wl * (tf - t0) * mtau, true);
592 block_add(out.d2F, t0_d, x_d, df.block(j, x_s, 1, nx), -wl, true);
593 // t0u
594 block_add(out.d2F, t0_d, u_d, d2f.block(t_s, b_s + u_s, 1, nu), wl * (tf - t0) * mtau, true);
595 block_add(out.d2F, t0_d, u_d, df.block(j, u_s, 1, nu), -wl, true);
596
597 // tftf
598 block_add(out.d2F, tf_d, tf_d, d2f.block(t_s, b_s + t_s, 1, 1), wl * (tf - t0) * tau * tau, true);
599 block_add(out.d2F, tf_d, tf_d, df.block(j, t_s, 1, 1), wl * 2 * tau, true);
600 // tfx
601 block_add(out.d2F, tf_d, x_d, d2f.block(t_s, b_s + x_s, 1, nx), wl * (tf - t0) * tau, true);
602 block_add(out.d2F, tf_d, x_d, df.block(j, x_s, 1, nx), wl, true);
603 // tfu
604 block_add(out.d2F, tf_d, u_d, d2f.block(t_s, b_s + u_s, 1, nu), wl * (tf - t0) * tau, true);
605 block_add(out.d2F, tf_d, u_d, df.block(j, u_s, 1, nu), wl, true);
606
607 // xx
608 block_add(out.d2F, x_d, x_d, d2f.block(x_s, b_s + x_s, nx, nx), wl * (tf - t0), true);
609 // xu
610 block_add(out.d2F, x_d, u_d, d2f.block(x_s, b_s + u_s, nx, nu), wl * (tf - t0), true);
611
612 // uu
613 block_add(out.d2F, u_d, u_d, d2f.block(u_s, b_s + u_s, nu, nu), wl * (tf - t0), true);
614 // clang-format on
615 }
616 }
617 }
618 }
619
620 // ADD SECOND PART (LINEAR IN X, NO SECOND DERIVATIVE..)
621
622 auto ival = 0u; // current interval index
623 auto ival_idx0 = 0u; // node start index of current interval
624 auto Nival = m.N_colloc_ival(ival);
625
626 for (const auto & [i, x] : zip(iota(0u, N + 1), xs)) {
627 if (i == ival_idx0 + Nival) {
628 // jumping to new interval --- add overlap to current interval before switching
629 const auto [alpha, Dus] = m.interval_diffmat_unscaled(ival);
630 for (const auto & [j, w] : zip(iota(0u, Nival), m.interval_weights(ival))) {
631 const auto row0 = (ival_idx0 + j) * nx;
632 const double coef = -w * alpha * Dus(i - ival_idx0, j);
633
634 out.F.segment(row0, nx) += coef * x;
635
636 if constexpr (Deriv >= 1) {
637 // add diagonal matrix
638 for (auto k = 0u; k < nx; ++k) { out.dF.coeffRef(row0 + k, 2 + nx * i + k) += coef; }
639 }
640 }
641
642 // update interval
643 ++ival;
644 if (ival < m.N_ivals()) {
645 ival_idx0 += Nival;
646 Nival = m.N_colloc_ival(ival);
647 }
648 }
649
650 if (i < N) {
651 const auto [alpha, Dus] = m.interval_diffmat_unscaled(ival);
652 for (const auto & [j, w] : zip(iota(0u, Nival), m.interval_weights(ival))) {
653 const auto row0 = (ival_idx0 + j) * nx;
654 const double coef = -w * alpha * Dus(i - ival_idx0, j);
655
656 out.F.segment(row0, nx) += coef * x;
657
658 if constexpr (Deriv >= 1) {
659 // add diagonal matrix
660 for (auto k = 0u; k < nx; ++k) { out.dF.coeffRef(row0 + k, 2 + nx * i + k) += coef; }
661 }
662 }
663 }
664 }
665}
666
667} // namespace smooth::feedback
MeshType is a specialization of Mesh.
Definition: mesh.hpp:488
Refinable Legendre-Gauss-Radau mesh of time interval [0, 1].
void mesh_eval(MeshValue< Deriv > &out, const MeshType auto &m, auto &f, const double t0, const double tf, std::ranges::range auto &&xs, std::ranges::range auto &&us, bool scale=false)
Evaluate function over a mesh.
void mesh_dyn(MeshValue< Deriv > &out, const MeshType auto &m, auto &f, const double t0, const double tf, std::ranges::range auto &&xs, std::ranges::range auto &&us)
Evaluate dynamic constraints over a mesh (differentiation version).
void mesh_integrate(MeshValue< Deriv > &out, const MeshType auto &m, auto &f, const double t0, const double tf, std::ranges::range auto &&xs, std::ranges::range auto &&us)
Evaluate integral over a mesh.
void set_zero(MeshValue< Deriv > &mv)
Reset MeshValue to zeros.
Sparse matrix utilities.
void block_add(Eigen::SparseMatrix< double, Options > &dest, Eigen::Index row0, Eigen::Index col0, const Source &source, double scale=1, bool upper_only=false)
Add block into a sparse matrix.
Definition: sparse.hpp:35
Eigen::VectorXd F
Function value (size M)
Eigen::SparseMatrix< double > dF
Size M x numVar.
Eigen::SparseMatrix< double > d2F
Size numVar x numVar.
Eigen::VectorXd lambda
Multipliers (must be set before)