Skip to content

Commit 9e80002

Browse files
authored
Merge pull request #197 from JuliaControl/DI_val_and_deriv
Added: `NonLinMPC` and `MovingHorizonEstimator` now use `value_and_gradient!`/`jacobian!` of DI.jl
2 parents b6341bc + 282010a commit 9e80002

File tree

6 files changed

+115
-119
lines changed

6 files changed

+115
-119
lines changed

Project.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
name = "ModelPredictiveControl"
22
uuid = "61f9bdb8-6ae4-484a-811f-bbf86720c31c"
33
authors = ["Francis Gagnon"]
4-
version = "1.5.3"
4+
version = "1.6.0"
55

66
[deps]
77
ControlSystemsBase = "aaaaaaaa-a6ca-5380-bf3e-84a91bcd477e"

docs/src/internals/predictive_control.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ ModelPredictiveControl.init_quadprog
2424
ModelPredictiveControl.init_stochpred
2525
ModelPredictiveControl.init_matconstraint_mpc
2626
ModelPredictiveControl.init_nonlincon!
27+
ModelPredictiveControl.get_optim_functions(::NonLinMPC, ::ModelPredictiveControl.GenericModel)
2728
```
2829

2930
## Update Quadratic Optimization

docs/src/internals/state_estim.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ ModelPredictiveControl.relaxX̂
2525
ModelPredictiveControl.relaxŴ
2626
ModelPredictiveControl.relaxV̂
2727
ModelPredictiveControl.init_matconstraint_mhe
28+
ModelPredictiveControl.get_optim_functions(::MovingHorizonEstimator, ::ModelPredictiveControl.GenericModel)
2829
```
2930

3031
## Update Quadratic Optimization

src/ModelPredictiveControl.jl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ using ProgressLogging
99

1010
using DifferentiationInterface: ADTypes.AbstractADType, AutoForwardDiff, AutoSparse
1111
using DifferentiationInterface: gradient!, jacobian!, prepare_gradient, prepare_jacobian
12+
using DifferentiationInterface: value_and_gradient!, value_and_jacobian!
1213
using DifferentiationInterface: Constant, Cache
1314
using SparseConnectivityTracer: TracerSparsityDetector
1415
using SparseMatrixColorings: GreedyColoringAlgorithm, sparsity_pattern

src/controller/nonlinmpc.jl

Lines changed: 71 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -565,7 +565,8 @@ equality constraint functions `geqfuncs` and gradients `∇geqfuncs!`.
565565
This method is really intricate and I'm not proud of it. That's because of 3 elements:
566566
567567
- These functions are used inside the nonlinear optimization, so they must be type-stable
568-
and as efficient as possible.
568+
and as efficient as possible. All the function outputs and derivatives are cached and
569+
updated in-place if required to use the efficient [`value_and_jacobian!`](@extref DifferentiationInterface DifferentiationInterface.value_and_jacobian!).
569570
- The `JuMP` NLP syntax forces splatting for the decision variable, which implies use
570571
of `Vararg{T,N}` (see the [performance tip][@extref Julia Be-aware-of-when-Julia-avoids-specializing]
571572
) and memoization to avoid redundant computations. This is already complex, but it's even
@@ -576,16 +577,17 @@ This method is really intricate and I'm not proud of it. That's because of 3 ele
576577
Inspired from: [User-defined operators with vector outputs](@extref JuMP User-defined-operators-with-vector-outputs)
577578
"""
578579
function get_optim_functions(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JNT<:Real
579-
# ----- common cache for Jfunc, gfuncs, geqfuncs called with floats -------------------
580+
# ----------- common cache for Jfunc, gfuncs and geqfuncs ----------------------------
580581
model = mpc.estim.model
582+
grad, jac = mpc.gradient, mpc.jacobian
581583
nu, ny, nx̂, nϵ, nk = model.nu, model.ny, mpc.estim.nx̂, mpc.nϵ, model.nk
582584
Hp, Hc = mpc.Hp, mpc.Hc
583585
ng, nc, neq = length(mpc.con.i_g), mpc.con.nc, mpc.con.neq
584586
nZ̃, nU, nŶ, nX̂, nK = length(mpc.Z̃), Hp*nu, Hp*ny, Hp*nx̂, Hp*nk
585587
nΔŨ, nUe, nŶe = nu*Hc + nϵ, nU + nu, nŶ + ny
586588
strict = Val(true)
587-
myNaN = convert(JNT, NaN) # NaN to force update_simulations! at first call:
588-
::Vector{JNT} = fill(myNaN, nZ̃)
589+
myNaN = convert(JNT, NaN)
590+
J::Vector{JNT} = zeros(JNT, 1)
589591
ΔŨ::Vector{JNT} = zeros(JNT, nΔŨ)
590592
x̂0end::Vector{JNT} = zeros(JNT, nx̂)
591593
K0::Vector{JNT} = zeros(JNT, nK)
@@ -595,128 +597,118 @@ function get_optim_functions(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JNT
595597
gc::Vector{JNT}, g::Vector{JNT} = zeros(JNT, nc), zeros(JNT, ng)
596598
geq::Vector{JNT} = zeros(JNT, neq)
597599
# ---------------------- objective function -------------------------------------------
598-
function Jfunc(Z̃arg::Vararg{T, N}) where {N, T<:Real}
599-
if isdifferent(Z̃arg, Z̃)
600-
Z̃ .= Z̃arg
601-
update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃)
602-
end
603-
return obj_nonlinprog!(Ŷ0, U0, mpc, model, Ue, Ŷe, ΔŨ)::T
604-
end
605600
function Jfunc!(Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq)
606601
update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃)
607602
return obj_nonlinprog!(Ŷ0, U0, mpc, model, Ue, Ŷe, ΔŨ)
608603
end
609-
Z̃_∇J = fill(myNaN, nZ̃)
604+
Z̃_∇J = fill(myNaN, nZ̃) # NaN to force update_predictions! at first call
610605
∇J_context = (
611606
Cache(ΔŨ), Cache(x̂0end), Cache(Ue), Cache(Ŷe), Cache(U0), Cache(Ŷ0),
612607
Cache(Û0), Cache(K0), Cache(X̂0),
613608
Cache(gc), Cache(g), Cache(geq),
614609
)
615-
∇J_prep = prepare_gradient(Jfunc!, mpc.gradient, Z̃_∇J, ∇J_context...; strict)
610+
∇J_prep = prepare_gradient(Jfunc!, grad, Z̃_∇J, ∇J_context...; strict)
616611
∇J = Vector{JNT}(undef, nZ̃)
617-
∇Jfunc! = if nZ̃ == 1
612+
function update_objective!(J, ∇J, Z̃, Z̃arg)
613+
if isdifferent(Z̃arg, Z̃)
614+
Z̃ .= Z̃arg
615+
J[], _ = value_and_gradient!(Jfunc!, ∇J, ∇J_prep, grad, Z̃_∇J, ∇J_context...)
616+
end
617+
end
618+
function Jfunc(Z̃arg::Vararg{T, N}) where {N, T<:Real}
619+
update_objective!(J, ∇J, Z̃_∇J, Z̃arg)
620+
return J[]::T
621+
end
622+
∇Jfunc! = if nZ̃ == 1 # univariate syntax (see JuMP.@operator doc):
618623
function (Z̃arg)
619-
Z̃_∇J .= Z̃arg
620-
gradient!(Jfunc!, ∇J, ∇J_prep, mpc.gradient, Z̃_∇J, ∇J_context...)
621-
return ∇J[begin] # univariate syntax, see JuMP.@operator doc
624+
update_objective!(J, ∇J, Z̃_∇J, Z̃arg)
625+
return ∇J[begin]
622626
end
623-
else
624-
function (∇J::AbstractVector{T}, Z̃arg::Vararg{T, N}) where {N, T<:Real}
625-
Z̃_∇J .= Z̃arg
626-
gradient!(Jfunc!, ∇J, ∇J_prep, mpc.gradient, Z̃_∇J, ∇J_context...)
627-
return ∇J # multivariate syntax, see JuMP.@operator doc
627+
else # multivariate syntax (see JuMP.@operator doc):
628+
function (∇Jarg::AbstractVector{T}, Z̃arg::Vararg{T, N}) where {N, T<:Real}
629+
update_objective!(J, ∇J, Z̃_∇J, Z̃arg)
630+
return ∇Jarg .= ∇J
628631
end
629632
end
630633
# --------------------- inequality constraint functions -------------------------------
631-
gfuncs = Vector{Function}(undef, ng)
632-
for i in eachindex(gfuncs)
633-
gfunc_i = function (Z̃arg::Vararg{T, N}) where {N, T<:Real}
634-
if isdifferent(Z̃arg, Z̃)
635-
Z̃ .= Z̃arg
636-
update_predictions!(
637-
ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃
638-
)
639-
end
640-
return g[i]::T
641-
end
642-
gfuncs[i] = gfunc_i
643-
end
644634
function gfunc!(g, Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, geq)
645-
return update_predictions!(
646-
ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃
647-
)
635+
update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃)
636+
return g
648637
end
649-
Z̃_∇g = fill(myNaN, nZ̃)
638+
Z̃_∇g = fill(myNaN, nZ̃) # NaN to force update_predictions! at first call
650639
∇g_context = (
651640
Cache(ΔŨ), Cache(x̂0end), Cache(Ue), Cache(Ŷe), Cache(U0), Cache(Ŷ0),
652-
Cache(Û0), Cache(K0), Cache(X̂0),
641+
Cache(Û0), Cache(K0), Cache(X̂0),
653642
Cache(gc), Cache(geq),
654643
)
655644
# temporarily enable all the inequality constraints for sparsity detection:
656645
mpc.con.i_g[1:end-nc] .= true
657-
∇g_prep = prepare_jacobian(gfunc!, g, mpc.jacobian, Z̃_∇g, ∇g_context...; strict)
646+
∇g_prep = prepare_jacobian(gfunc!, g, jac, Z̃_∇g, ∇g_context...; strict)
658647
mpc.con.i_g[1:end-nc] .= false
659-
∇g = init_diffmat(JNT, mpc.jacobian, ∇g_prep, nZ̃, ng)
648+
∇g = init_diffmat(JNT, jac, ∇g_prep, nZ̃, ng)
649+
function update_con!(g, ∇g, Z̃, Z̃arg)
650+
if isdifferent(Z̃arg, Z̃)
651+
Z̃ .= Z̃arg
652+
value_and_jacobian!(gfunc!, g, ∇g, ∇g_prep, jac, Z̃, ∇g_context...)
653+
end
654+
end
655+
gfuncs = Vector{Function}(undef, ng)
656+
for i in eachindex(gfuncs)
657+
gfunc_i = function (Z̃arg::Vararg{T, N}) where {N, T<:Real}
658+
update_con!(g, ∇g, Z̃_∇g, Z̃arg)
659+
return g[i]::T
660+
end
661+
gfuncs[i] = gfunc_i
662+
end
660663
∇gfuncs! = Vector{Function}(undef, ng)
661664
for i in eachindex(∇gfuncs!)
662-
∇gfuncs_i! = if nZ̃ == 1
665+
∇gfuncs_i! = if nZ̃ == 1 # univariate syntax (see JuMP.@operator doc):
663666
function (Z̃arg::T) where T<:Real
664-
if isdifferent(Z̃arg, Z̃_∇g)
665-
Z̃_∇g .= Z̃arg
666-
jacobian!(gfunc!, g, ∇g, ∇g_prep, mpc.jacobian, Z̃_∇g, ∇g_context...)
667-
end
668-
return ∇g[i, begin] # univariate syntax, see JuMP.@operator doc
667+
update_con!(g, ∇g, Z̃_∇g, Z̃arg)
668+
return ∇g[i, begin]
669669
end
670-
else
670+
else # multivariate syntax (see JuMP.@operator doc):
671671
function (∇g_i, Z̃arg::Vararg{T, N}) where {N, T<:Real}
672-
if isdifferent(Z̃arg, Z̃_∇g)
673-
Z̃_∇g .= Z̃arg
674-
jacobian!(gfunc!, g, ∇g, ∇g_prep, mpc.jacobian, Z̃_∇g, ∇g_context...)
675-
end
676-
return ∇g_i .= @views ∇g[i, :] # multivariate syntax, see JuMP.@operator doc
672+
update_con!(g, ∇g, Z̃_∇g, Z̃arg)
673+
return ∇g_i .= @views ∇g[i, :]
677674
end
678675
end
679676
∇gfuncs![i] = ∇gfuncs_i!
680677
end
681678
# --------------------- equality constraint functions ---------------------------------
682-
geqfuncs = Vector{Function}(undef, neq)
683-
for i in eachindex(geqfuncs)
684-
geqfunc_i = function (Z̃arg::Vararg{T, N}) where {N, T<:Real}
685-
if isdifferent(Z̃arg, Z̃)
686-
Z̃ .= Z̃arg
687-
update_predictions!(
688-
ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃
689-
)
690-
end
691-
return geq[i]::T
692-
end
693-
geqfuncs[i] = geqfunc_i
694-
end
695679
function geqfunc!(geq, Z̃, ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g)
696-
return update_predictions!(
697-
ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃
698-
)
680+
update_predictions!(ΔŨ, x̂0end, Ue, Ŷe, U0, Ŷ0, Û0, K0, X̂0, gc, g, geq, mpc, Z̃)
681+
return geq
699682
end
700-
Z̃_∇geq = fill(myNaN, nZ̃)
683+
Z̃_∇geq = fill(myNaN, nZ̃) # NaN to force update_predictions! at first call
701684
∇geq_context = (
702685
Cache(ΔŨ), Cache(x̂0end), Cache(Ue), Cache(Ŷe), Cache(U0), Cache(Ŷ0),
703686
Cache(Û0), Cache(K0), Cache(X̂0),
704687
Cache(gc), Cache(g)
705688
)
706-
∇geq_prep = prepare_jacobian(geqfunc!, geq, mpc.jacobian, Z̃_∇geq, ∇geq_context...; strict)
707-
∇geq = init_diffmat(JNT, mpc.jacobian, ∇geq_prep, nZ̃, neq)
689+
∇geq_prep = prepare_jacobian(geqfunc!, geq, jac, Z̃_∇geq, ∇geq_context...; strict)
690+
∇geq = init_diffmat(JNT, jac, ∇geq_prep, nZ̃, neq)
691+
function update_con_eq!(geq, ∇geq, Z̃, Z̃arg)
692+
if isdifferent(Z̃arg, Z̃)
693+
Z̃ .= Z̃arg
694+
value_and_jacobian!(geqfunc!, geq, ∇geq, ∇geq_prep, jac, Z̃, ∇geq_context...)
695+
end
696+
end
697+
geqfuncs = Vector{Function}(undef, neq)
698+
for i in eachindex(geqfuncs)
699+
geqfunc_i = function (Z̃arg::Vararg{T, N}) where {N, T<:Real}
700+
update_con_eq!(geq, ∇geq, Z̃_∇geq, Z̃arg)
701+
return geq[i]::T
702+
end
703+
geqfuncs[i] = geqfunc_i
704+
end
708705
∇geqfuncs! = Vector{Function}(undef, neq)
709706
for i in eachindex(∇geqfuncs!)
710707
# only multivariate syntax, univariate is impossible since nonlinear equality
711708
# constraints imply MultipleShooting, thus input increment ΔU and state X̂0 in Z̃:
712709
∇geqfuncs_i! =
713710
function (∇geq_i, Z̃arg::Vararg{T, N}) where {N, T<:Real}
714-
if isdifferent(Z̃arg, Z̃_∇geq)
715-
Z̃_∇geq .= Z̃arg
716-
jacobian!(
717-
geqfunc!, geq, ∇geq, ∇geq_prep, mpc.jacobian, Z̃_∇geq, ∇geq_context...
718-
)
719-
end
711+
update_con_eq!(geq, ∇geq, Z̃_∇geq, Z̃arg)
720712
return ∇geq_i .= @views ∇geq[i, :]
721713
end
722714
∇geqfuncs![i] = ∇geqfuncs_i!

src/estimator/mhe/construct.jl

Lines changed: 40 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -1314,97 +1314,98 @@ Also return vectors with the nonlinear inequality constraint functions `gfuncs`,
13141314
This method is really intricate and I'm not proud of it. That's because of 3 elements:
13151315
13161316
- These functions are used inside the nonlinear optimization, so they must be type-stable
1317-
and as efficient as possible.
1317+
and as efficient as possible. All the function outputs and derivatives are cached and
1318+
updated in-place if required to use the efficient [`value_and_jacobian!`](@extref DifferentiationInterface DifferentiationInterface.value_and_jacobian!).
13181319
- The `JuMP` NLP syntax forces splatting for the decision variable, which implies use
13191320
of `Vararg{T,N}` (see the [performance tip](@extref Julia Be-aware-of-when-Julia-avoids-specializing))
13201321
and memoization to avoid redundant computations. This is already complex, but it's even
13211322
worse knowing that most automatic differentiation tools do not support splatting.
1322-
- The signature of gradient and hessian functions is not the same for univariate (`nZ̃ == 1`)
1323-
and multivariate (`nZ̃ > 1`) operators in `JuMP`. Both must be defined.
13241323
13251324
Inspired from: [User-defined operators with vector outputs](@extref JuMP User-defined-operators-with-vector-outputs)
13261325
"""
13271326
function get_optim_functions(
13281327
estim::MovingHorizonEstimator, ::JuMP.GenericModel{JNT},
13291328
) where {JNT <: Real}
1330-
# ---------- common cache for Jfunc, gfuncs called with floats ------------------------
1329+
# ----------- common cache for Jfunc and gfuncs --------------------------------------
13311330
model, con = estim.model, estim.con
1331+
grad, jac = estim.gradient, estim.jacobian
13321332
nx̂, nym, nŷ, nu, nϵ, nk = estim.nx̂, estim.nym, model.ny, model.nu, estim.nϵ, model.nk
13331333
He = estim.He
13341334
nV̂, nX̂, ng, nZ̃ = He*nym, He*nx̂, length(con.i_g), length(estim.Z̃)
1335-
myNaN = convert(JNT, NaN) # NaN to force update_simulations! at first call
13361335
strict = Val(true)
1337-
::Vector{JNT} = fill(myNaN, nZ̃)
1336+
myNaN = convert(JNT, NaN)
1337+
J::Vector{JNT} = zeros(JNT, 1)
13381338
::Vector{JNT}, X̂0::Vector{JNT} = zeros(JNT, nV̂), zeros(JNT, nX̂)
1339-
k0::Vector{JNT} = zeros(JNT, nk)
1339+
k0::Vector{JNT} = zeros(JNT, nk)
13401340
û0::Vector{JNT}, ŷ0::Vector{JNT} = zeros(JNT, nu), zeros(JNT, nŷ)
13411341
g::Vector{JNT} = zeros(JNT, ng)
13421342
::Vector{JNT} = zeros(JNT, nx̂)
13431343
# --------------------- objective functions -------------------------------------------
1344-
function Jfunc(Z̃arg::Vararg{T, N}) where {N, T<:Real}
1345-
if isdifferent(Z̃arg, Z̃)
1346-
Z̃ .= Z̃arg
1347-
update_prediction!(V̂, X̂0, û0, k0, ŷ0, g, estim, Z̃)
1348-
end
1349-
return obj_nonlinprog!(x̄, estim, model, V̂, Z̃)::T
1350-
end
13511344
function Jfunc!(Z̃, V̂, X̂0, û0, k0, ŷ0, g, x̄)
13521345
update_prediction!(V̂, X̂0, û0, k0, ŷ0, g, estim, Z̃)
13531346
return obj_nonlinprog!(x̄, estim, model, V̂, Z̃)
13541347
end
1355-
Z̃_∇J = fill(myNaN, nZ̃)
1348+
Z̃_∇J = fill(myNaN, nZ̃) # NaN to force update_predictions! at first call
13561349
∇J_context = (
13571350
Cache(V̂), Cache(X̂0), Cache(û0), Cache(k0), Cache(ŷ0),
13581351
Cache(g),
13591352
Cache(x̄),
13601353
)
13611354
# temporarily "fill" the estimation window for the preparation of the gradient:
13621355
estim.Nk[] = He
1363-
∇J_prep = prepare_gradient(Jfunc!, estim.gradient, Z̃_∇J, ∇J_context...; strict)
1356+
∇J_prep = prepare_gradient(Jfunc!, grad, Z̃_∇J, ∇J_context...; strict)
13641357
estim.Nk[] = 0
13651358
∇J = Vector{JNT}(undef, nZ̃)
1366-
∇Jfunc! = function (∇J::AbstractVector{T}, Z̃arg::Vararg{T, N}) where {N, T<:Real}
1359+
function update_objective!(J, ∇J, Z̃, Z̃arg)
1360+
if isdifferent(Z̃arg, Z̃)
1361+
Z̃ .= Z̃arg
1362+
J[], _ = value_and_gradient!(Jfunc!, ∇J, ∇J_prep, grad, Z̃_∇J, ∇J_context...)
1363+
end
1364+
end
1365+
function Jfunc(Z̃arg::Vararg{T, N}) where {N, T<:Real}
1366+
update_objective!(J, ∇J, Z̃_∇J, Z̃arg)
1367+
return J[]::T
1368+
end
1369+
∇Jfunc! = function (∇Jarg::AbstractVector{T}, Z̃arg::Vararg{T, N}) where {N, T<:Real}
13671370
# only the multivariate syntax of JuMP.@operator, univariate is impossible for MHE
13681371
# since Z̃ comprises the arrival state estimate AND the estimated process noise
1369-
Z̃_∇J .= Z̃arg
1370-
gradient!(Jfunc!, ∇J, ∇J_prep, estim.gradient, Z̃_∇J, ∇J_context...)
1371-
return ∇J
1372+
update_objective!(J, ∇J, Z̃_∇J, Z̃arg)
1373+
return ∇Jarg .= ∇J
13721374
end
1373-
13741375
# --------------------- inequality constraint functions -------------------------------
1375-
gfuncs = Vector{Function}(undef, ng)
1376-
for i in eachindex(gfuncs)
1377-
gfunc_i = function (Z̃arg::Vararg{T, N}) where {N, T<:Real}
1378-
if isdifferent(Z̃arg, Z̃)
1379-
Z̃ .= Z̃arg
1380-
update_prediction!(V̂, X̂0, û0, k0, ŷ0, g, estim, Z̃)
1381-
end
1382-
return g[i]::T
1383-
end
1384-
gfuncs[i] = gfunc_i
1385-
end
13861376
function gfunc!(g, Z̃, V̂, X̂0, û0, k0, ŷ0)
13871377
return update_prediction!(V̂, X̂0, û0, k0, ŷ0, g, estim, Z̃)
13881378
end
1389-
Z̃_∇g = fill(myNaN, nZ̃)
1379+
Z̃_∇g = fill(myNaN, nZ̃) # NaN to force update_predictions! at first call
13901380
∇g_context = (
13911381
Cache(V̂), Cache(X̂0), Cache(û0), Cache(k0), Cache(ŷ0),
13921382
)
13931383
# temporarily enable all the inequality constraints for sparsity detection:
13941384
estim.con.i_g .= true
13951385
estim.Nk[] = He
1396-
∇g_prep = prepare_jacobian(gfunc!, g, estim.jacobian, Z̃_∇g, ∇g_context...; strict)
1386+
∇g_prep = prepare_jacobian(gfunc!, g, jac, Z̃_∇g, ∇g_context...; strict)
13971387
estim.con.i_g .= false
13981388
estim.Nk[] = 0
1399-
∇g = init_diffmat(JNT, estim.jacobian, ∇g_prep, nZ̃, ng)
1389+
∇g = init_diffmat(JNT, jac, ∇g_prep, nZ̃, ng)
1390+
function update_con!(g, ∇g, Z̃, Z̃arg)
1391+
if isdifferent(Z̃arg, Z̃)
1392+
Z̃ .= Z̃arg
1393+
value_and_jacobian!(gfunc!, g, ∇g, ∇g_prep, jac, Z̃, ∇g_context...)
1394+
end
1395+
end
1396+
gfuncs = Vector{Function}(undef, ng)
1397+
for i in eachindex(gfuncs)
1398+
gfunc_i = function (Z̃arg::Vararg{T, N}) where {N, T<:Real}
1399+
update_con!(g, ∇g, Z̃_∇g, Z̃arg)
1400+
return g[i]::T
1401+
end
1402+
gfuncs[i] = gfunc_i
1403+
end
14001404
∇gfuncs! = Vector{Function}(undef, ng)
14011405
for i in eachindex(∇gfuncs!)
14021406
∇gfuncs![i] = function (∇g_i, Z̃arg::Vararg{T, N}) where {N, T<:Real}
14031407
# only the multivariate syntax of JuMP.@operator, see above for the explanation
1404-
if isdifferent(Z̃arg, Z̃_∇g)
1405-
Z̃_∇g .= Z̃arg
1406-
jacobian!(gfunc!, g, ∇g, ∇g_prep, estim.jacobian, Z̃_∇g, ∇g_context...)
1407-
end
1408+
update_con!(g, ∇g, Z̃_∇g, Z̃arg)
14081409
return ∇g_i .= @views ∇g[i, :]
14091410
end
14101411
end

0 commit comments

Comments
 (0)