Skip to content

Commit

Permalink
Add Subgradients and Automated Function Writing
Browse files Browse the repository at this point in the history
-- Subgradients are now included in SCMC, which enables this package to use gradient-based lower-bounding methods.
-- An automated function generator (`fgen`) is included, which stitches together smaller SCMC functions to create relaxation-generating functions for arbitrarily complicated expressions.
-- The ParBB algorithm is updated to make use of subgradients through a GPU-accelerated two-phase simplex method, which parallelizes individual simplex operations to apply simultaneously to multiple simplex tableaus.
-- Updated documentation to reflect current SCMC operation.
  • Loading branch information
RXGottlieb committed Nov 7, 2024
1 parent b292d44 commit aafd695
Show file tree
Hide file tree
Showing 22 changed files with 6,843 additions and 222 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@ jobs:
matrix:

include:
- version: '1.8'
- version: '1.10' # LTS
os: ubuntu-latest
arch: x64
- version: '1.8'
- version: '1.10' # LTS
os: windows-latest
arch: x64
- version: '1'
Expand Down
8 changes: 4 additions & 4 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "SourceCodeMcCormick"
uuid = "a7283dc5-4ecf-47fb-a95b-1412723fc960"
authors = ["Robert Gottlieb <[email protected]>"]
version = "0.3.1"
version = "0.4.0"

[deps]
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
Expand All @@ -17,9 +17,9 @@ CUDA = "5"
DocStringExtensions = "0.8 - 0.9"
Graphs = "1"
IfElse = "0.1.0 - 0.1.1"
SymbolicUtils = "1"
Symbolics = "5"
julia = "1.6"
SymbolicUtils = "3"
Symbolics = "6"
julia = "1.10"

[extras]
McCormick = "53c679d3-6890-5091-8386-c291e8c8aaa1"
Expand Down
793 changes: 661 additions & 132 deletions README.md

Large diffs are not rendered by default.

288 changes: 287 additions & 1 deletion examples/ParBB/extension.jl
Original file line number Diff line number Diff line change
Expand Up @@ -48,14 +48,300 @@ Base.@kwdef mutable struct PointwiseGPU <: ExtendGPU
"Flag for stack prepopulation. Good if the total number
of nodes throughout the solve is expected to be large (default = true)"
prepopulate::Bool = true
"Frequency of garbage collection (number of iterations)"
gc_freq::Int = 300
"(In development) Number of points to use for multistarting the NLP solver"
multistart_points::Int = 1
end

function PointwiseGPU(convex_func, var_count::Int; alpha::Float64 = 0.01, node_limit::Int = 50000,
prepopulate::Bool = true, multistart_points::Int = 1)
prepopulate::Bool = true, gc_freq::Int = 300, multistart_points::Int = 1)
return PointwiseGPU(convex_func, var_count, node_limit, alpha,
Vector{Float64}(undef, node_limit), Vector{Float64}(undef, node_limit), Vector{NodeBB}(undef, node_limit), 0,
Matrix{Float64}(undef, node_limit, var_count),
Matrix{Float64}(undef, node_limit, var_count), prepopulate, gc_freq, multistart_points)
end


"""
$(TYPEDEF)
The SubgradGPU integrator is meant to be paired with the SourceCodeMcCormick
package. SubgradGPU differs from PointwiseGPU in that SubgradGPU requires
the `convex_func_and_subgrad` term to return both evaluations of the convex
relaxation and evaluations of the subgradient of the convex relaxation.
$(TYPEDFIELDS)
"""
Base.@kwdef mutable struct SubgradGPU <: ExtendGPU
"A user-defined function taking argument `p` and returning a vector
of convex evaluations of the objective function [outdated description, [cv, lo, subgrad]]"
convex_func_and_subgrad
"Number of decision variables"
np::Int
"The number of nodes to evaluate in parallel (default = 10000)"
node_limit::Int64 = 50000
"A parameter that changes how far spread out points are. Should be
in the range (0.0, 1.0]"
α::Float64 = 0.5
"Lower bound storage to hold calculated lower bounds for multiple nodes."
lower_bound_storage::Vector{Float64} = Vector{Float64}()
"Upper bound storage to hold calculated upper bounds for multiple nodes."
upper_bound_storage::Vector{Float64} = Vector{Float64}()
"Node storage to hold individual nodes outside of the main stack"
node_storage::Vector{NodeBB} = Vector{NodeBB}()
"An internal tracker of nodes in internal storage"
node_len::Int = 0
"Variable lower bounds to evaluate"
all_lvbs::Matrix{Float64} = Matrix{Float64}()
"Variable upper bounds to evaluate"
all_uvbs::Matrix{Float64} = Matrix{Float64}()
"Internal tracker for the count in the main stack"
# node_count::Int = 0
"Flag for stack prepopulation. Good if the total number
of nodes throughout the solve is expected to be large (default = true)"
prepopulate::Bool = true
"(In development) Number of points to use for multistarting the NLP solver"
multistart_points::Int = 1
end

function SubgradGPU(convex_func_and_subgrad, var_count::Int; alpha::Float64 = 0.01, node_limit::Int = 50000,
prepopulate::Bool = true, multistart_points::Int = 1)
return SubgradGPU(convex_func_and_subgrad, var_count, node_limit, alpha,
Vector{Float64}(undef, node_limit), Vector{Float64}(undef, node_limit), Vector{NodeBB}(undef, node_limit), 0,
Matrix{Float64}(undef, node_limit, var_count),
Matrix{Float64}(undef, node_limit, var_count), prepopulate, multistart_points)
end


"""
$(TYPEDEF)
The SimplexGPU integrator is meant to be paired with the SourceCodeMcCormick
package. SimplexGPU differs from SubgradGPU in that SimplexGPU can handle
inequality constraints, and that relaxations are made tighter by solving
linear programs within the lower bounding routine to make better use of
subgradient information. Like SubgradGPU, SimplexGPU requires the
`convex_func_and_subgrad` term to return both evaluations of the convex
relaxation and evaluations of the subgradient of the convex relaxation.
$(TYPEDFIELDS)
"""
Base.@kwdef mutable struct SimplexGPU_OnlyObj <: ExtendGPU
"A user-defined function taking argument `p` and returning a vector
of convex evaluations of the objective function [outdated description, [cv, lo, subgrad]]"
convex_func_and_subgrad
"Number of decision variables"
np::Int
"The number of nodes to evaluate in parallel (default = 1024)"
node_limit::Int64 = 1024
"Lower bound storage to hold calculated lower bounds for multiple nodes."
lower_bound_storage::Vector{Float64} = Vector{Float64}()
"Upper bound storage to hold calculated upper bounds for multiple nodes."
upper_bound_storage::Vector{Float64} = Vector{Float64}()
"Node storage to hold individual nodes outside of the main stack"
node_storage::Vector{NodeBB} = Vector{NodeBB}()
"An internal tracker of nodes in internal storage"
node_len::Int = 0
"Variable lower bounds to evaluate"
all_lvbs::Matrix{Float64} = Matrix{Float64}()
"Variable upper bounds to evaluate"
all_uvbs::Matrix{Float64} = Matrix{Float64}()
"Flag for stack prepopulation. Good if the total number
of nodes throughout the solve is expected to be large (default = true)"
prepopulate::Bool = true
"Total number of cuts to do on each node"
max_cuts::Int = 3
"Frequency of garbage collection (number of iterations)"
gc_freq::Int = 15
"(In development) Number of points to use for multistarting the NLP solver"
multistart_points::Int = 1
relax_time::Float64 = 0.0
opt_time::Float64 = 0.0
lower_counter::Int = 0
node_counter::Int = 0
end

function SimplexGPU_OnlyObj(convex_func_and_subgrad, var_count::Int; node_limit::Int = 1024,
prepopulate::Bool = true, max_cuts::Int = 3, gc_freq::Int = 15,
multistart_points::Int = 1)
return SimplexGPU_OnlyObj(convex_func_and_subgrad, var_count, node_limit,
Vector{Float64}(undef, node_limit), Vector{Float64}(undef, node_limit), Vector{NodeBB}(undef, node_limit), 0,
Matrix{Float64}(undef, node_limit, var_count),
Matrix{Float64}(undef, node_limit, var_count), prepopulate, max_cuts, gc_freq, multistart_points, 0.0, 0.0, 0, 0)
end

"""
$(TYPEDEF)
The SimplexGPU_ObjAndCons structure is meant to handle optimization problems
with nontrivial constraints as well as a potentially nonlinear objective
function. Note that this struct requires the functions representing the
objective function and constraints to mutate arguments, rather than return
a tuple of results. SimplexGPU_ObjAndCons is not designed to handle mixed-integer
problems; NLPs only.
$(TYPEDFIELDS)
"""
Base.@kwdef mutable struct SimplexGPU_ObjAndCons <: ExtendGPU
"A SCMC-generated or user-defined function taking arguments [cv, lo, [cv_subgrad]..., p...],
which modifies `cv` to hold the convex relaxation of the objective function, `lo` to hold
the lower bound of the inclusion monotonic interval extension of the objective function,
and n instances of `cv_subgrad` that will hold the n subgradients of the convex relaxation
of the objective function (where n is the dimensionality of the problem), all evaluated at
points `p`"
obj_fun
"A vector of SCMC-generated or user-defined functions, each with the same form as `obj_fun`,
but with arguments [cv, [cv_subgrad]..., p...], representing all of the LEQ inequality constraints"
leq_cons
"A vector of SCMC-generated or user-defined functions, taking arguments [cc, [cc_subgrad]..., p...],
defined similarly to the objective function and GEQ constraints, representing all of the
GEQ inequality constraints"
geq_cons
"A vector of SCMC-generated or user-defined functions, taking arguments
[cv, cc, [cv_subgrad]..., [cc_subgrad]..., p...], with terms defined similarly to
the objective function and inequality constraints, representing all of the equality constraints"
eq_cons
"Number of decision variables"
np::Int
"The number of nodes to evaluate in parallel (default = 1024)"
node_limit::Int64 = 1024
"Lower bound storage to hold calculated lower bounds for multiple nodes."
lower_bound_storage::Vector{Float64} = Vector{Float64}()
"Upper bound storage to hold calculated upper bounds for multiple nodes."
upper_bound_storage::Vector{Float64} = Vector{Float64}()
"Node storage to hold individual nodes outside of the main stack"
node_storage::Vector{NodeBB} = Vector{NodeBB}()
"An internal tracker of nodes in internal storage"
node_len::Int = 0
"Variable lower bounds to evaluate"
all_lvbs::Matrix{Float64} = Matrix{Float64}()
"Variable upper bounds to evaluate"
all_uvbs::Matrix{Float64} = Matrix{Float64}()
"Flag for stack prepopulation. Good if the total number
of nodes throughout the solve is expected to be large (default = true)"
prepopulate::Bool = true
"Total number of cuts to do on each node"
max_cuts::Int = 3
"Frequency of garbage collection (number of iterations)"
gc_freq::Int = 15
"(In development) Number of points to use for multistarting the NLP solver"
multistart_points::Int = 1
relax_time::Float64 = 0.0
opt_time::Float64 = 0.0
lower_counter::Int = 0
node_counter::Int = 0
end

function SimplexGPU_ObjAndCons(obj_fun, var_count::Int; geq_cons=[], leq_cons=[], eq_cons=[], node_limit::Int = 1024,
prepopulate::Bool = true, max_cuts::Int = 3, gc_freq::Int = 15, multistart_points::Int = 1)
return SimplexGPU_ObjAndCons(obj_fun, leq_cons, geq_cons, eq_cons, var_count, node_limit,
Vector{Float64}(undef, node_limit), Vector{Float64}(undef, node_limit), Vector{NodeBB}(undef, node_limit), 0,
Matrix{Float64}(undef, node_limit, var_count),
Matrix{Float64}(undef, node_limit, var_count), prepopulate, max_cuts, gc_freq, multistart_points, 0.0, 0.0, 0, 0)
end






Base.@kwdef mutable struct SimplexGPU_ObjOnly_Mat <: ExtendGPU
"A SCMC-generated or user-defined function taking arguments [cv, lo, [cv_subgrad]..., p...],
which modifies `cv` to hold the convex relaxation of the objective function, `lo` to hold
the lower bound of the inclusion monotonic interval extension of the objective function,
and n instances of `cv_subgrad` that will hold the n subgradients of the convex relaxation
of the objective function (where n is the dimensionality of the problem), all evaluated at
points `p`"
obj_fun
"Number of decision variables"
np::Int
"The number of nodes to evaluate in parallel (default = 1024)"
node_limit::Int64 = 1024
"Lower bound storage to hold calculated lower bounds for multiple nodes."
lower_bound_storage::Vector{Float64} = Vector{Float64}()
"Upper bound storage to hold calculated upper bounds for multiple nodes."
upper_bound_storage::Vector{Float64} = Vector{Float64}()
"Node storage to hold individual nodes outside of the main stack"
node_storage::Vector{NodeBB} = Vector{NodeBB}()
"An internal tracker of nodes in internal storage"
node_len::Int = 0
"Variable lower bounds to evaluate"
all_lvbs::Matrix{Float64} = Matrix{Float64}()
"Variable upper bounds to evaluate"
all_uvbs::Matrix{Float64} = Matrix{Float64}()
"Flag for stack prepopulation. Good if the total number
of nodes throughout the solve is expected to be large (default = true)"
prepopulate::Bool = true
"Total number of cuts to do on each node"
max_cuts::Int = 3
"Frequency of garbage collection (number of iterations)"
gc_freq::Int = 15
"(In development) Number of points to use for multistarting the NLP solver"
multistart_points::Int = 1
relax_time::Float64 = 0.0
opt_time::Float64 = 0.0
lower_counter::Int = 0
node_counter::Int = 0
end

function SimplexGPU_ObjOnly_Mat(obj_fun, var_count::Int; node_limit::Int = 1024,
prepopulate::Bool = true, max_cuts::Int = 3, gc_freq::Int = 15, multistart_points::Int = 1)
return SimplexGPU_ObjOnly_Mat(obj_fun, var_count, node_limit,
Vector{Float64}(undef, node_limit), Vector{Float64}(undef, node_limit), Vector{NodeBB}(undef, node_limit), 0,
Matrix{Float64}(undef, node_limit, var_count),
Matrix{Float64}(undef, node_limit, var_count), prepopulate, max_cuts, gc_freq, multistart_points, 0.0, 0.0, 0 ,0)
end


"""
$(TYPEDEF)
This is a testing method/struct, to see if we can check fewer points per node
when we construct the LPs and still get all the same benefits. The normal
SimplexGPU method uses 2n+1 points, where n is the problem dimensionality.
This method only uses a single point in the center of the node, and can
therefore get away with more simultaneous LPs, since each one is significantly
smaller.
$(TYPEDFIELDS)
"""
Base.@kwdef mutable struct SimplexGPU_Single <: ExtendGPU
"A user-defined function taking argument `p` and returning a vector
of convex evaluations of the objective function [outdated description, [cv, lo, subgrad]]"
convex_func_and_subgrad
"Number of decision variables"
np::Int
"The number of nodes to evaluate in parallel (default = 2500)"
node_limit::Int64 = 2500
"A parameter that changes how far spread out points are. Should be
in the range (0.0, 1.0]"
α::Float64 = 0.5
"Lower bound storage to hold calculated lower bounds for multiple nodes."
lower_bound_storage::Vector{Float64} = Vector{Float64}()
"Upper bound storage to hold calculated upper bounds for multiple nodes."
upper_bound_storage::Vector{Float64} = Vector{Float64}()
"Node storage to hold individual nodes outside of the main stack"
node_storage::Vector{NodeBB} = Vector{NodeBB}()
"An internal tracker of nodes in internal storage"
node_len::Int = 0
"Variable lower bounds to evaluate"
all_lvbs::Matrix{Float64} = Matrix{Float64}()
"Variable upper bounds to evaluate"
all_uvbs::Matrix{Float64} = Matrix{Float64}()
"Flag for stack prepopulation. Good if the total number
of nodes throughout the solve is expected to be large (default = true)"
prepopulate::Bool = true
"Total number of cuts to do on each node"
max_cuts::Int = 3
"(In development) Number of points to use for multistarting the NLP solver"
multistart_points::Int = 1
end

function SimplexGPU_Single(convex_func_and_subgrad, var_count::Int; alpha::Float64 = 0.01, node_limit::Int = 2500,
prepopulate::Bool = true, max_cuts::Int = 3, multistart_points::Int = 1)
return SimplexGPU_Single(convex_func_and_subgrad, var_count, node_limit, alpha,
Vector{Float64}(undef, node_limit), Vector{Float64}(undef, node_limit), Vector{NodeBB}(undef, node_limit), 0,
Matrix{Float64}(undef, node_limit, var_count),
Matrix{Float64}(undef, node_limit, var_count), prepopulate, max_cuts, multistart_points)
end
Loading

0 comments on commit aafd695

Please sign in to comment.