Skip to content
6 changes: 5 additions & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,13 @@ ShiftedProximalOperators = "d4fd37fa-580c-4e43-9b30-361c21aae263"
SolverCore = "ff4d7338-4cf1-434d-91df-b86cb86fb843"

[compat]
ADNLPModels = "0.8.13"
Arpack = "0.5"
LinearOperators = "2.10.0"
ManualNLPModels = "0.2.0"
NLPModels = "0.19, 0.20, 0.21"
NLPModelsModifiers = "0.7"
OptimizationProblems = "0.9.2"
Percival = "0.7.2"
ProximalOperators = "0.15"
RegularizedProblems = "0.1.1"
Expand All @@ -32,10 +34,12 @@ SolverCore = "0.3.0"
julia = "^1.6.0"

[extras]
ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a"
OptimizationProblems = "5049e819-d29b-5fba-b941-0eee7e64c1c6"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
RegularizedProblems = "ea076b23-609f-44d2-bb12-a4ae45328278"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
TestSetExtensions = "98d24dd4-01ad-11ea-1b02-c9a08f80db04"

[targets]
test = ["Random", "RegularizedProblems", "Test", "TestSetExtensions"]
test = ["ADNLPModels", "OptimizationProblems", "Random", "RegularizedProblems", "Test", "TestSetExtensions"]
34 changes: 17 additions & 17 deletions src/AL_alg.jl
Original file line number Diff line number Diff line change
Expand Up @@ -138,14 +138,14 @@ Notably, you can access, and modify, the following:
- `stats.solver_specific[:smooth_obj]`: current value of the smooth part of the objective function;
- `stats.solver_specific[:nonsmooth_obj]`: current value of the nonsmooth part of the objective function.
"""
mutable struct ALSolver{T, V, M, ST} <: AbstractOptimizationSolver
mutable struct ALSolver{T, V, M, Pb, ST} <: AbstractOptimizationSolver
x::V
cx::V
y::V
has_bnds::Bool
sub_model::AugLagModel{M, T, V}
sub_problem::Pb
sub_solver::ST
sub_stats::GenericExecutionStats{T, V, V, Any}
sub_stats::GenericExecutionStats{T, V, V, T}
end

function ALSolver(reg_nlp::AbstractRegularizedNLPModel{T, V}; kwargs...) where {T, V}
Expand All @@ -156,11 +156,12 @@ function ALSolver(reg_nlp::AbstractRegularizedNLPModel{T, V}; kwargs...) where {
y = V(undef, ncon)
has_bnds = has_bounds(nlp)
sub_model = AugLagModel(nlp, V(undef, ncon), T(0), x, T(0), V(undef, ncon))
sub_problem = RegularizedNLPModel(sub_model, reg_nlp.h,reg_nlp.selected)
sub_solver = R2Solver(reg_nlp; kwargs...)
sub_stats = GenericExecutionStats(sub_model)
sub_stats = RegularizedExecutionStats(sub_problem)
M = typeof(nlp)
ST = typeof(sub_solver)
return ALSolver{T, V, M, ST}(x, cx, y, has_bnds, sub_model, sub_solver, sub_stats)
return ALSolver{T, V, M, typeof(sub_problem), ST}(x, cx, y, has_bnds, sub_problem, sub_solver, sub_stats)
end

@doc (@doc ALSolver) function AL(::Val{:equ}, reg_nlp::AbstractRegularizedNLPModel; kwargs...)
Expand All @@ -182,7 +183,7 @@ function SolverCore.solve!(
model::AbstractRegularizedNLPModel;
kwargs...,
)
stats = GenericExecutionStats(model.model)
stats = RegularizedExecutionStats(model)
solve!(solver, model, stats; kwargs...)
end

Expand All @@ -209,6 +210,7 @@ function SolverCore.solve!(
factor_decrease_subtol::T = T(1 // 4),
dual_safeguard = project_y!,
) where {T, V}
reset!(stats)

# Retrieve workspace
nlp = reg_nlp.model
Expand Down Expand Up @@ -254,8 +256,8 @@ function SolverCore.solve!(
set_solver_specific!(stats, :nonsmooth_obj, hx)

mu = init_penalty
solver.sub_model.y .= solver.y
update_μ!(solver.sub_model, mu)
solver.sub_problem.model.y .= solver.y
update_μ!(solver.sub_problem.model, mu)

cviol = norm(solver.cx, Inf)
cviol_old = Inf
Expand All @@ -279,15 +281,13 @@ function SolverCore.solve!(
iter += 1

# dual safeguard
dual_safeguard(solver.sub_model)
dual_safeguard(solver.sub_problem.model)

# AL subproblem
sub_reg_nlp = RegularizedNLPModel(solver.sub_model, h, selected)
subtol = max(subtol, atol)
reset!(subout)
solve!(
solver.sub_solver,
sub_reg_nlp,
solver.sub_problem,
subout,
x = solver.x,
atol = subtol,
Expand All @@ -298,8 +298,8 @@ function SolverCore.solve!(
verbose = subsolver_verbose,
)
solver.x .= subout.solution
solver.cx .= solver.sub_model.cx
subiters += subout.iter
solver.cx .= solver.sub_problem.model.cx
subiters = subout.iter

# objective
fx = obj(nlp, solver.x)
Expand All @@ -310,8 +310,8 @@ function SolverCore.solve!(
set_solver_specific!(stats, :nonsmooth_obj, hx)

# dual estimate
update_y!(solver.sub_model)
solver.y .= solver.sub_model.y
update_y!(solver.sub_problem.model)
solver.y .= solver.sub_problem.model.y
set_constraint_multipliers!(stats, solver.y)

# stationarity measure
Expand Down Expand Up @@ -362,7 +362,7 @@ function SolverCore.solve!(
if cviol > max(ctol, factor_primal_linear_improvement * cviol_old)
mu *= factor_penalty_up
end
update_μ!(solver.sub_model, mu)
update_μ!(solver.sub_problem.model, mu)
cviol_old = cviol
subtol *= factor_decrease_subtol
rem_eval = max_eval < 0 ? max_eval : max_eval - neval_obj(nlp)
Expand Down
4 changes: 3 additions & 1 deletion test/runtests.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
using LinearAlgebra: length
using LinearAlgebra, Random, Test
using ProximalOperators
using NLPModels, NLPModelsModifiers, RegularizedProblems, RegularizedOptimization, SolverCore
using ADNLPModels, OptimizationProblems, OptimizationProblems.ADNLPProblems, NLPModels, NLPModelsModifiers, RegularizedProblems, RegularizedOptimization, SolverCore

const global compound = 1
const global nz = 10 * compound
Expand All @@ -10,6 +10,8 @@ const global bpdn, bpdn_nls, sol = bpdn_model(compound)
const global bpdn2, bpdn_nls2, sol2 = bpdn_model(compound, bounds = true)
const global λ = norm(grad(bpdn, zeros(bpdn.meta.nvar)), Inf) / 10

include("test_AL.jl")

for (mod, mod_name) ∈ ((x -> x, "exact"), (LSR1Model, "lsr1"), (LBFGSModel, "lbfgs"))
for (h, h_name) ∈ ((NormL0(λ), "l0"), (NormL1(λ), "l1"), (IndBallL0(10 * compound), "B0"))
for solver_sym ∈ (:R2, :TR)
Expand Down
17 changes: 17 additions & 0 deletions test/test_AL.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@

problem_list = [:hs8]

@testset "Augmented Lagrangian" begin
for problem in problem_list
nlp = eval(problem)(backend = :optimized)
for h in (NormL1(1.0),)
stats = AL(nlp, h, atol = 1e-3, verbose = 1)
@test stats.status == :first_order
@test stats.primal_feas <= 1e-2
@test stats.dual_feas <= 1e-2
@test length(stats.solution) == nlp.meta.nvar
@test typeof(stats.solution) == typeof(nlp.meta.x0)
end
finalize(nlp)
end
end
8 changes: 8 additions & 0 deletions test/test_allocs.jl
Original file line number Diff line number Diff line change
Expand Up @@ -65,5 +65,13 @@ end
@test stats.status == :first_order
end
end
@testset "Augmented Lagrangian" begin
continue # FIXME : fails due to type instabilities in ADNLPModels...
reg_nlp = RegularizedNLPModel(hs8(backend = :generic), h)
solver = ALSolver(reg_nlp)
stats = RegularizedExecutionStats(reg_nlp)
@test @wrappedallocs(solve!(solver, reg_nlp, stats, atol = 1e-3)) == 0
@test stats.status == :first_order
end
end
end
Loading