Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix #626 #628

Merged
merged 6 commits into from
Jun 25, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion src/multivariate/optimize/optimize.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,18 @@ update_g!(d, state, method) = nothing
function update_g!(d, state, method::M) where M<:Union{FirstOrderOptimizer, Newton}
# Update the function value and gradient
value_gradient!(d, state.x)
if M <: FirstOrderOptimizer #only for methods that support manifold optimization
project_tangent!(method.manifold, gradient(d), state.x)
end
end
update_fg!(d, state, method) = nothing
update_fg!(d, state, method::ZerothOrderOptimizer) = value!(d, state.x)
update_fg!(d, state, method::M) where M<:Union{FirstOrderOptimizer, Newton} = value_gradient!(d, state.x)
function update_fg!(d, state, method::M) where M<:Union{FirstOrderOptimizer, Newton}
value_gradient!(d, state.x)
if M <: FirstOrderOptimizer #only for methods that support manifold optimization
project_tangent!(method.manifold, gradient(d), state.x)
end
end

# Update the Hessian
update_h!(d, state, method) = nothing
Expand Down Expand Up @@ -48,6 +56,7 @@ function optimize(d::D, initial_x::Tx, method::M,

update_state!(d, state, method) && break # it returns true if it's forced by something in update! to stop (eg dx_dg == 0.0 in BFGS, or linesearch errors)
update_g!(d, state, method) # TODO: Should this be `update_fg!`?

x_converged, f_converged,
g_converged, converged, f_increased = assess_convergence(state, d, options)
# For some problems it may be useful to require `f_converged` to be hit multiple times
Expand Down
16 changes: 9 additions & 7 deletions test/multivariate/manifolds.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,15 @@
manif = Optim.Stiefel()

# AcceleratedGradientDescent should be compatible also, but I haven't been able to make it converge
for method in (Optim.GradientDescent, Optim.ConjugateGradient, Optim.LBFGS, Optim.BFGS,
Optim.NGMRES, Optim.OACCEL)
debug_printing && print_with_color(:green, "Solver: $(summary(method()))\n")
res = Optim.optimize(fmanif, gmanif!, x0, method(manifold=manif), Optim.Options(allow_f_increases=true))
debug_printing && print_with_color(:green, "Iter\tf-calls\tg-calls\n")
debug_printing && print_with_color(:red, "$(Optim.iterations(res))\t$(Optim.f_calls(res))\t$(Optim.g_calls(res))\n")
@test Optim.converged(res)
for ls in (Optim.BackTracking,Optim.HagerZhang,Optim.StrongWolfe,Optim.MoreThuente)
for method in (Optim.GradientDescent, Optim.ConjugateGradient, Optim.LBFGS, Optim.BFGS,
Optim.NGMRES, Optim.OACCEL)
debug_printing && print_with_color(:green, "Solver: $(summary(method())), linesearch: $(summary(ls()))\n")
res = Optim.optimize(fmanif, gmanif!, x0, method(manifold=manif,linesearch=ls()), Optim.Options(allow_f_increases=true,g_tol=1e-6))
debug_printing && print_with_color(:green, "Iter\tf-calls\tg-calls\n")
debug_printing && print_with_color(:red, "$(Optim.iterations(res))\t$(Optim.f_calls(res))\t$(Optim.g_calls(res))\n")
@test Optim.converged(res)
end
end
res = Optim.optimize(fmanif, gmanif!, x0, Optim.MomentumGradientDescent(mu=0.0, manifold=manif))
@test Optim.converged(res)
Expand Down