Skip to content

Commit cbeff59

Browse files
authored
Add jprod benchmark (#256)
* Add product benchmarks save result file fixes (tested locally) add hessian residual * uncomment
1 parent 5f72299 commit cbeff59

15 files changed

+551
-277
lines changed

benchmark/Manifest.toml

Lines changed: 160 additions & 266 deletions
Large diffs are not rendered by default.

benchmark/Project.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,4 +25,4 @@ Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7"
2525
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
2626

2727
[compat]
28-
OptimizationProblems = "0.8"
28+
OptimizationProblems = "0.8"

benchmark/benchmarks.jl

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,4 +21,9 @@ include("jacobian/benchmarks_jacobian_residual.jl")
2121
include("hessian/benchmarks_coloring.jl")
2222
include("hessian/benchmarks_hessian.jl")
2323
include("hessian/benchmarks_hessian_lagrangian.jl")
24-
# include("hessian/benchmarks_hessian_residual.jl")
24+
include("hessian/benchmarks_hessian_residual.jl")
25+
26+
include("jacobian/benchmarks_jprod.jl")
27+
include("jacobian/benchmarks_jprod_residual.jl")
28+
include("jacobian/benchmarks_jtprod.jl")
29+
include("jacobian/benchmarks_jtprod_residual.jl")

benchmark/hessian/benchmarks_hessian.jl

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ INTRODUCTION OF THIS BENCHMARK:
33
44
We test here the function `hess_coord!` for ADNLPModels with different backends:
55
- ADNLPModels.SparseADHessian
6+
- ADNLPModels.SparseReverseADHessian
67
=#
78
using ForwardDiff, SparseConnectivityTracer, SparseMatrixColorings
89

@@ -12,7 +13,10 @@ data_types = [Float32, Float64]
1213

1314
benchmark_list = [:optimized]
1415

15-
benchmarked_hessian_backend = Dict("sparse" => ADNLPModels.SparseADHessian)
16+
benchmarked_hessian_backend = Dict(
17+
"sparse" => ADNLPModels.SparseADHessian,
18+
"sparse-reverse" => ADNLPModels.SparseReverseADHessian,
19+
)
1620
get_backend_list(::Val{:optimized}) = keys(benchmarked_hessian_backend)
1721
get_backend(::Val{:optimized}, b::String) = benchmarked_hessian_backend[b]
1822

benchmark/hessian/benchmarks_hessian_lagrangian.jl

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ INTRODUCTION OF THIS BENCHMARK:
33
44
We test here the function `hess_coord!` for ADNLPModels with different backends:
55
- ADNLPModels.SparseADHessian
6+
- ADNLPModels.SparseReverseADHessian
67
=#
78
using ForwardDiff, SparseConnectivityTracer, SparseMatrixColorings
89

@@ -12,7 +13,10 @@ data_types = [Float32, Float64]
1213

1314
benchmark_list = [:optimized]
1415

15-
benchmarked_hessian_backend = Dict("sparse" => ADNLPModels.SparseADHessian)
16+
benchmarked_hessian_backend = Dict(
17+
"sparse" => ADNLPModels.SparseADHessian,
18+
"sparse-reverse" => ADNLPModels.SparseReverseADHessian,
19+
)
1620
get_backend_list(::Val{:optimized}) = keys(benchmarked_hessian_backend)
1721
get_backend(::Val{:optimized}, b::String) = benchmarked_hessian_backend[b]
1822

@@ -37,8 +41,8 @@ for f in benchmark_list
3741
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
3842
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
3943
@info " $(pb): $T with $n vars and $m cons"
40-
y0 = 10 * T[-(-1.0)^i for i = 1:m]
41-
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $y0) setup =
44+
y = 10 * T[-(-1.0)^i for i = 1:m]
45+
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $y) setup =
4246
(nlp = set_adnlp($pb, $(name_backend), $backend, $nscal, $T))
4347
end
4448
end
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
#=
2+
INTRODUCTION OF THIS BENCHMARK:
3+
4+
We test here the function `hess_residual_coord!` for ADNLPModels with different backends:
5+
- ADNLPModels.SparseADJacobian
6+
- ADNLPModels.SparseReverseADHessian
7+
=#
8+
using ForwardDiff, SparseConnectivityTracer, SparseMatrixColorings
9+
10+
include("additional_backends.jl")
11+
12+
data_types = [Float32, Float64]
13+
14+
benchmark_list = [:optimized]
15+
16+
benchmarked_hessian_backend = Dict(
17+
"sparse" => ADNLPModels.SparseADHessian,
18+
#"sparse-reverse" => ADNLPModels.SparseReverseADHessian, #failed
19+
)
20+
get_backend_list(::Val{:optimized}) = keys(benchmarked_hessian_backend)
21+
get_backend(::Val{:optimized}, b::String) = benchmarked_hessian_backend[b]
22+
23+
problem_sets = Dict("scalable_nls" => scalable_nls_problems)
24+
nscal = 1000
25+
26+
name_backend = "hessian_residual_backend"
27+
fun = hess_coord_residual
28+
@info "Initialize $(fun) benchmark"
29+
SUITE["$(fun)"] = BenchmarkGroup()
30+
31+
for f in benchmark_list
32+
SUITE["$(fun)"][f] = BenchmarkGroup()
33+
for T in data_types
34+
SUITE["$(fun)"][f][T] = BenchmarkGroup()
35+
for s in keys(problem_sets)
36+
SUITE["$(fun)"][f][T][s] = BenchmarkGroup()
37+
for b in get_backend_list(Val(f))
38+
SUITE["$(fun)"][f][T][s][b] = BenchmarkGroup()
39+
backend = get_backend(Val(f), b)
40+
for pb in problem_sets[s]
41+
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
42+
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
43+
nequ = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nls_nequ(n = $(nscal))"))
44+
@info " $(pb): $T with $n vars, $nequ residuals and $m cons"
45+
v = 10 * T[-(-1.0)^i for i = 1:nequ]
46+
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nls, get_x0(nls), $v) setup =
47+
(nls = set_adnls($pb, $(name_backend), $backend, $nscal, $T))
48+
end
49+
end
50+
end
51+
end
52+
end
Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
#=
2+
INTRODUCTION OF THIS BENCHMARK:
3+
4+
We test here the function `hprod!` for ADNLPModels with different backends:
5+
- ADNLPModels.ForwardDiffADHvprod
6+
- ADNLPModels.ReverseDiffADHvprod
7+
=#
8+
using ForwardDiff, ReverseDiff
9+
10+
include("additional_backends.jl")
11+
12+
data_types = [Float32, Float64]
13+
14+
benchmark_list = [:optimized]
15+
16+
benchmarked_hprod_backend =
17+
Dict("forward" => ADNLPModels.ForwardDiffADHvprod, "reverse" => ADNLPModels.ReverseDiffADHvprod)
18+
get_backend_list(::Val{:optimized}) = keys(benchmarked_hprod_backend)
19+
get_backend(::Val{:optimized}, b::String) = benchmarked_hprod_backend[b]
20+
21+
problem_sets = Dict("scalable" => scalable_problems)
22+
nscal = 1000
23+
24+
name_backend = "hprod_backend"
25+
fun = hprod!
26+
@info "Initialize $(fun) benchmark"
27+
SUITE["$(fun)"] = BenchmarkGroup()
28+
29+
for f in benchmark_list
30+
SUITE["$(fun)"][f] = BenchmarkGroup()
31+
for T in data_types
32+
SUITE["$(fun)"][f][T] = BenchmarkGroup()
33+
for s in keys(problem_sets)
34+
SUITE["$(fun)"][f][T][s] = BenchmarkGroup()
35+
for b in get_backend_list(Val(f))
36+
SUITE["$(fun)"][f][T][s][b] = BenchmarkGroup()
37+
backend = get_backend(Val(f), b)
38+
for pb in problem_sets[s]
39+
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
40+
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
41+
@info " $(pb): $T with $n vars"
42+
v = [sin(T(i) / 10) for i = 1:n]
43+
Hv = Vector{T}(undef, n)
44+
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $v, $Hv) setup =
45+
(nlp = set_adnlp($pb, $(name_backend), $backend, $nscal, $T))
46+
end
47+
end
48+
end
49+
end
50+
end
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
#=
2+
INTRODUCTION OF THIS BENCHMARK:
3+
4+
We test here the function `hprod!` for ADNLPModels with different backends:
5+
- ADNLPModels.ForwardDiffADHvprod
6+
- ADNLPModels.ReverseDiffADHvprod
7+
=#
8+
using ForwardDiff, ReverseDiff
9+
10+
include("additional_backends.jl")
11+
12+
data_types = [Float32, Float64]
13+
14+
benchmark_list = [:optimized]
15+
16+
benchmarked_hprod_backend =
17+
Dict("forward" => ADNLPModels.ForwardDiffADHvprod, "reverse" => ADNLPModels.ReverseDiffADHvprod)
18+
get_backend_list(::Val{:optimized}) = keys(benchmarked_hprod_backend)
19+
get_backend(::Val{:optimized}, b::String) = benchmarked_hprod_backend[b]
20+
21+
problem_sets = Dict("scalable_cons" => scalable_cons_problems)
22+
nscal = 1000
23+
24+
name_backend = "hprod_backend"
25+
fun = hprod!
26+
@info "Initialize $(fun) benchmark"
27+
SUITE["$(fun)"] = BenchmarkGroup()
28+
29+
for f in benchmark_list
30+
SUITE["$(fun)"][f] = BenchmarkGroup()
31+
for T in data_types
32+
SUITE["$(fun)"][f][T] = BenchmarkGroup()
33+
for s in keys(problem_sets)
34+
SUITE["$(fun)"][f][T][s] = BenchmarkGroup()
35+
for b in get_backend_list(Val(f))
36+
SUITE["$(fun)"][f][T][s][b] = BenchmarkGroup()
37+
backend = get_backend(Val(f), b)
38+
for pb in problem_sets[s]
39+
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
40+
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
41+
@info " $(pb): $T with $n vars"
42+
y = 10 * T[-(-1.0)^i for i = 1:m]
43+
v = [sin(T(i) / 10) for i = 1:n]
44+
Hv = Vector{T}(undef, n)
45+
SUITE["$(fun)"][f][T][s][b][pb] =
46+
@benchmarkable $fun(nlp, get_x0(nlp), $y, $v, $Hv) setup =
47+
(nlp = set_adnlp($pb, $(name_backend), $backend, $nscal, $T))
48+
end
49+
end
50+
end
51+
end
52+
end

benchmark/jacobian/benchmarks_jacobian_residual.jl

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,8 @@ for f in benchmark_list
3636
for pb in problem_sets[s]
3737
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
3838
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
39-
@info " $(pb): $T with $n vars and $m cons"
39+
nequ = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nls_nequ(n = $(nscal))"))
40+
@info " $(pb): $T with $n vars, $nequ residuals and $m cons"
4041
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nls, get_x0(nls)) setup =
4142
(nls = set_adnls($pb, $(name_backend), $backend, $nscal, $T))
4243
end
Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
#=
2+
INTRODUCTION OF THIS BENCHMARK:
3+
4+
We test here the function `jprod` for ADNLPModels with different backends:
5+
- ADNLPModels.ForwardDiffADJprod
6+
- ADNLPModels.ReverseDiffADJprod
7+
=#
8+
using ForwardDiff, ReverseDiff
9+
10+
include("additional_backends.jl")
11+
12+
data_types = [Float32, Float64]
13+
14+
benchmark_list = [:optimized]
15+
16+
benchmarked_jprod_backend =
17+
Dict("forward" => ADNLPModels.ForwardDiffADJprod, "reverse" => ADNLPModels.ReverseDiffADJprod)
18+
get_backend_list(::Val{:optimized}) = keys(benchmarked_jprod_backend)
19+
get_backend(::Val{:optimized}, b::String) = benchmarked_jprod_backend[b]
20+
21+
problem_sets = Dict("scalable" => scalable_cons_problems)
22+
nscal = 1000
23+
24+
name_backend = "jprod_backend"
25+
fun = jprod!
26+
@info "Initialize $(fun) benchmark"
27+
SUITE["$(fun)"] = BenchmarkGroup()
28+
29+
for f in benchmark_list
30+
SUITE["$(fun)"][f] = BenchmarkGroup()
31+
for T in data_types
32+
SUITE["$(fun)"][f][T] = BenchmarkGroup()
33+
for s in keys(problem_sets)
34+
SUITE["$(fun)"][f][T][s] = BenchmarkGroup()
35+
for b in get_backend_list(Val(f))
36+
SUITE["$(fun)"][f][T][s][b] = BenchmarkGroup()
37+
backend = get_backend(Val(f), b)
38+
for pb in problem_sets[s]
39+
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
40+
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
41+
@info " $(pb): $T with $n vars and $m cons"
42+
Jv = Vector{T}(undef, m)
43+
v = 10 * T[-(-1.0)^i for i = 1:n]
44+
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $v, $Jv) setup =
45+
(nlp = set_adnlp($pb, $(name_backend), $backend, $nscal, $T))
46+
end
47+
end
48+
end
49+
end
50+
end

0 commit comments

Comments
 (0)