Skip to content

Commit 6373690

Browse files
Add the examples
1 parent 269bad8 commit 6373690

File tree

5 files changed

+95
-8
lines changed

5 files changed

+95
-8
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
paper/examples/Manifest.toml

paper/examples/Project.toml

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
[deps]
2+
ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a"
3+
DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa"
4+
MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458"
5+
NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6"
6+
NLPModelsModifiers = "e01155f1-5c6f-4375-a9d8-616dd036575f"
7+
ProximalOperators = "a725b495-10eb-56fe-b38b-717eba820537"
8+
RegularizedOptimization = "196f2941-2d58-45ba-9f13-43a2532b2fa8"
9+
RegularizedProblems = "ea076b23-609f-44d2-bb12-a4ae45328278"
10+
ShiftedProximalOperators = "d4fd37fa-580c-4e43-9b30-361c21aae263"
11+
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
12+
13+
[compat]
14+
NLPModels = "0.19, 0.20, 0.21"
15+
NLPModelsModifiers = "0.7"
16+
ProximalOperators = "0.15"
17+
RegularizedProblems = "0.1"
18+
ShiftedProximalOperators = "0.2"

paper/examples/example1.jl

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
using LinearAlgebra, Random
2+
using ProximalOperators
3+
using NLPModels, NLPModelsModifiers, RegularizedProblems, RegularizedOptimization
4+
using MLDatasets
5+
6+
random_seed = 1234
7+
Random.seed!(random_seed)
8+
9+
# Load MNIST from MLDatasets
10+
imgs, labels = MLDatasets.MNIST.traindata()
11+
12+
# Use RegularizedProblems' preprocessing
13+
A, b = RegularizedProblems.generate_data(imgs, labels, (1, 7), false)
14+
15+
# Build the models
16+
model, _, _ = RegularizedProblems.svm_model(A, b)
17+
18+
# Define the Hessian approximation
19+
f = LBFGSModel(model)
20+
21+
# Define the nonsmooth regularizer (L0 norm)
22+
λ = 1.0e-1
23+
h = NormL0(λ)
24+
25+
# Define the regularized NLP model
26+
reg_nlp = RegularizedNLPModel(f, h)
27+
28+
# Choose a solver (R2DH) and execution statistics tracker
29+
solver_r2dh= R2DHSolver(reg_nlp)
30+
stats = RegularizedExecutionStats(reg_nlp)
31+
32+
# Solve the problem
33+
solve!(solver_r2dh, reg_nlp, stats, x = f.meta.x0, σk = 1e-6, atol = 2e-5, rtol = 2e-5, verbose = 1)
34+
35+
@test stats.status == :first_order

paper/examples/example2.jl

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
## After merging the PRs on TR
2+
3+
using LinearAlgebra
4+
using DifferentialEquations, ProximalOperators
5+
using ADNLPModels, NLPModels, NLPModelsModifiers, RegularizedOptimization, RegularizedProblems
6+
7+
# Define the Fitzhugh-Nagumo problem
8+
model, _, _ = RegularizedProblems.fh_model()
9+
10+
# Define the Hessian approximation
11+
f = LBFGSModel(fh_model)
12+
13+
# Define the nonsmooth regularizer (L1 norm)
14+
λ = 0.1
15+
h = NormL1(λ)
16+
17+
# Define the regularized NLP model
18+
reg_nlp = RegularizedNLPModel(f, h)
19+
20+
# Choose a solver (TR) and execution statistics tracker
21+
solver_tr = TRSolver(reg_nlp)
22+
stats = RegularizedExecutionStats(reg_nlp)
23+
24+
# Solve the problem
25+
solve!(solver_tr, reg_nlp, stats, x = f.meta.x0, atol = 1e-3, rtol = 1e-4, verbose = 10, ν = 1.0e+2)
26+
27+
@test stats.status == :first_order

paper/paper.md

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -103,14 +103,20 @@ where $\lambda = 10^{-1}$ and $A \in \mathbb{R}^{m \times n}$, with $n = 784$ re
103103
```julia
104104
using LinearAlgebra, Random
105105
using ProximalOperators
106-
using NLPModels, NLPModelsModifiers, RegularizedProblems, RegularizedOptimization, SolverCore
106+
using NLPModels, NLPModelsModifiers, RegularizedProblems, RegularizedOptimization
107107
using MLDatasets
108108

109109
random_seed = 1234
110110
Random.seed!(random_seed)
111111

112-
# Load the MNIST dataset
113-
model, _, _ = RegularizedProblems.svm_train_model()
112+
# Load MNIST from MLDatasets
113+
imgs, labels = MLDatasets.MNIST.traindata()
114+
115+
# Use RegularizedProblems' preprocessing
116+
A, b = RegularizedProblems.generate_data(imgs, labels, (1, 7), false)
117+
118+
# Build the models
119+
model, _, _ = RegularizedProblems.svm_model(A, b)
114120

115121
# Define the Hessian approximation
116122
f = LBFGSModel(model)
@@ -135,12 +141,12 @@ Another example is the FitzHugh-Nagumo inverse problem with an $\ell_1$ penalty,
135141

136142
```julia
137143
using LinearAlgebra
138-
using DifferentialEquations, ProximalOperators
139-
using ADNLPModels, NLPModels, NLPModelsModifiers, RegularizedOptimization, RegularizedProblems
144+
using ProximalOperators
145+
using NLPModels, NLPModelsModifiers, RegularizedProblems, RegularizedOptimization
146+
using DifferentialEquations, ADNLPModels
140147

141-
# Define the Fitzagerald Higgs problem
142-
data, _, _, _, _ = RegularizedProblems.FH_smooth_term()
143-
fh_model = ADNLPModel(misfit, ones(5))
148+
# Define the Fitzhugh-Nagumo problem
149+
model, _, _ = RegularizedProblems.fh_model()
144150

145151
# Define the Hessian approximation
146152
f = LBFGSModel(fh_model)

0 commit comments

Comments
 (0)