Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

PINNErrorVsTime Benchmark Updates #1159

Open
wants to merge 17 commits into
base: master
Choose a base branch
from
Open
3,157 changes: 2,217 additions & 940 deletions benchmarks/PINNErrorsVsTime/Manifest.toml

Large diffs are not rendered by default.

23 changes: 11 additions & 12 deletions benchmarks/PINNErrorsVsTime/Project.toml
Original file line number Diff line number Diff line change
@@ -1,29 +1,28 @@
[deps]
Cuba = "8a292aeb-7a57-582c-b821-06e4c11590b1"
Cubature = "667455a9-e2ce-5579-9412-b964f529a492"
DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab"
Integrals = "de52edbc-65ea-441a-8357-d3a637375a31"
IntegralsCuba = "e00cd5f1-6337-4131-8b37-28b2fe4cd6cb"
IntegralsCubature = "c31f79ba-6e32-46d4-a52f-182a8ac42a54"
Lux = "b2108857-7c20-44ae-9111-449ecde12c47"
MLDataDevices = "7e8f7934-dd98-4c1a-8fe8-92b47a384d40"
ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78"
NeuralPDE = "315f7962-48a3-4962-8226-d0f33b1235f0"
Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba"
OptimizationFlux = "253f991c-a7b2-45f8-8852-8b9a9df78a86"
OptimizationOptimJL = "36348300-93cb-4f02-beb5-3c3902f8871e"
OptimizationOptimisers = "42dfb2eb-d2b4-4451-abcd-913932933ac1"
Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
QuasiMonteCarlo = "8a4e6c94-4038-4cdc-81c3-7e6ffdb2a71b"
ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267"
SciMLBenchmarks = "31c91b34-3c75-11e9-0341-95557aab0344"

[compat]
DelimitedFiles = "1"
Integrals = "3, 4"
IntegralsCuba = "0.2, 0.3"
IntegralsCubature = "0.2"
Lux = "0.4, 0.5"
ModelingToolkit = "8"
Integrals = "4"
Lux = "1"
ModelingToolkit = "9"
NeuralPDE = "5"
Optimization = "3"
OptimizationFlux = "0.1, 0.2"
OptimizationOptimJL = "0.1, 0.2, 0.3"
Optimization = "4"
OptimizationOptimJL = "0.4"
Plots = "1"
QuasiMonteCarlo = "0.2"
QuasiMonteCarlo = "0.3"
SciMLBenchmarks = "0.1"
211 changes: 119 additions & 92 deletions benchmarks/PINNErrorsVsTime/allen_cahn_et.jmd
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,14 @@ science-guided AI techniques.

```julia
using NeuralPDE
using Integrals, IntegralsCubature, IntegralsCuba
using OptimizationFlux, ModelingToolkit, Optimization, OptimizationOptimJL
using Integrals, Cubature, Cuba
using ModelingToolkit, Optimization, OptimizationOptimJL, OptimizationOptimisers
using Lux, Plots
using DelimitedFiles
using QuasiMonteCarlo
using QuasiMonteCarlo, Random
import ModelingToolkit: Interval, infimum, supremum

function allen_cahn(strategy, minimizer, maxIters)

function allen_cahn(strategy, minimizer, maxIters, time_limit=3600.0) # Added time_limit parameter (default 1 hour in seconds)
## DECLARATIONS
@parameters t x1 x2 x3 x4
@variables u(..)
Expand All @@ -32,19 +31,10 @@ function allen_cahn(strategy, minimizer, maxIters)
Dxx3 = Differential(x3)^2
Dxx4 = Differential(x4)^2


# Discretization
tmax = 1.0
x1width = 1.0
x2width = 1.0
x3width = 1.0
x4width = 1.0

tMeshNum = 10
x1MeshNum = 10
x2MeshNum = 10
x3MeshNum = 10
x4MeshNum = 10
x1width = x2width = x3width = x4width = 1.0
tMeshNum = x1MeshNum = x2MeshNum = x3MeshNum = x4MeshNum = 10

dt = tmax / tMeshNum
dx1 = x1width / x1MeshNum
Expand All @@ -53,89 +43,126 @@ function allen_cahn(strategy, minimizer, maxIters)
dx4 = x4width / x4MeshNum

domains = [t ∈ Interval(0.0, tmax),
x1 ∈ Interval(0.0, x1width),
x2 ∈ Interval(0.0, x2width),
x3 ∈ Interval(0.0, x3width),
x4 ∈ Interval(0.0, x4width)]
x1 ∈ Interval(0.0, x1width),
x2 ∈ Interval(0.0, x2width),
x3 ∈ Interval(0.0, x3width),
x4 ∈ Interval(0.0, x4width)]

# Define the coordinates
ts = 0.0:dt:tmax
x1s = 0.0:dx1:x1width
x2s = 0.0:dx2:x2width
x3s = 0.0:dx3:x3width
x4s = 0.0:dx4:x4width

# Operators
Δu = Dxx1(u(t, x1, x2, x3, x4)) + Dxx2(u(t, x1, x2, x3, x4)) + Dxx3(u(t, x1, x2, x3, x4)) + Dxx4(u(t, x1, x2, x3, x4)) # Laplacian

Δu = Dxx1(u(t,x1,x2,x3,x4)) + Dxx2(u(t,x1,x2,x3,x4)) +
Dxx3(u(t,x1,x2,x3,x4)) + Dxx4(u(t,x1,x2,x3,x4))

# Equation
eq = Dt(u(t, x1, x2, x3, x4)) - Δu - u(t, x1, x2, x3, x4) + u(t, x1, x2, x3, x4) * u(t, x1, x2, x3, x4) * u(t, x1, x2, x3, x4) ~ 0 #ALLEN CAHN EQUATION

initialCondition = 1 / (2 + 0.4 * (x1 * x1 + x2 * x2 + x3 * x3 + x4 * x4)) # see PNAS paper

bcs = [u(0, x1, x2, x3, x4) ~ initialCondition] #from literature

## NEURAL NETWORK
n = 10 #neuron number
chain = Lux.Chain(Lux.Dense(5, n, tanh), Lux.Dense(n, n, tanh), Lux.Dense(n, 1)) #Neural network from OptimizationFlux library

indvars = [t, x1, x2, x3, x4] #physically independent variables
depvars = [u(t, x1, x2, x3, x4)] #dependent (target) variable

dim = length(domains)

losses = []
error = []
times = []

dx_err = 0.2

error_strategy = GridTraining(dx_err)

discretization_ = PhysicsInformedNN(chain, error_strategy)
@named pde_system_ = PDESystem(eq, bcs, domains, indvars, depvars)
prob_ = discretize(pde_system_, discretization_)

function loss_function_(θ, p)
return prob_.f.f(θ, nothing)
end

cb_ = function (p, l)
deltaT_s = time_ns() #Start a clock when the callback begins, this will evaluate questo misurerà anche il calcolo degli uniform error

ctime = time_ns() - startTime - timeCounter #This variable is the time to use for the time benchmark plot
append!(times, ctime / 10^9) #Conversion nanosec to seconds
append!(losses, l)
loss_ = loss_function_(p, nothing)
append!(error, loss_)
timeCounter = timeCounter + time_ns() - deltaT_s #timeCounter sums all delays due to the callback functions of the previous iterations

#if (ctime/10^9 > time) #if I exceed the limit time I stop the training
# return true #Stop the minimizer and continue from line 142
#end

return false
eq = Dt(u(t,x1,x2,x3,x4)) - Δu - u(t,x1,x2,x3,x4) +
u(t,x1,x2,x3,x4)^3 ~ 0

# Initial condition
initialCondition = 1 / (2 + 0.4 * (x1^2 + x2^2 + x3^2 + x4^2))
bcs = [u(0,x1,x2,x3,x4) ~ initialCondition]

# Neural Network setup
n = 10

# Create the neural network with explicit CPU device
chain = Lux.Chain(
Lux.Dense(5 => n, tanh),
Lux.Dense(n => n, tanh),
Lux.Dense(n => 1, identity)
)

# Initialize parameters with random seed
rng = Random.default_rng()
init_params, st = Lux.setup(rng, chain)

training_strategy = NeuralPDE.QuasiRandomTraining(
500; # Number of points for domain
bcs_points = 50, # Number of points for boundary conditions
)

# Create the PINN
discretization = PhysicsInformedNN(
chain,
training_strategy;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Shouldn't this be strategy (which is an argument in this function which I assume you want to test different strategies)?

init_params = init_params,
states = st
)

# Setup PDE system
@named pde_system = PDESystem(
eq,
bcs,
domains,
[t,x1,x2,x3,x4],
[u(t,x1,x2,x3,x4)]
)

# Discretize
prob = discretize(pde_system, discretization)

# Training setup
losses = Float64[]
error = Float64[]
times = Float64[]
timeCounter = 0.0
startTime = time_ns()

# Callback function with time cap
cb = function (p, l)
try
deltaT_s = time_ns()
ctime = time_ns() - startTime - timeCounter

push!(times, ctime / 1e9)
push!(losses, l)
push!(error, l)

timeCounter += time_ns() - deltaT_s

if (ctime / 1e9 > time_limit)
println("Time limit of $time_limit seconds exceeded. Stopping training.")
return true
end

return false
catch e
@warn "Callback error: $e"
return false
end
end

@named pde_system = PDESystem(eq, bcs, domains, indvars, depvars)

discretization = NeuralPDE.PhysicsInformedNN(chain, strategy)
prob = NeuralPDE.discretize(pde_system, discretization)

timeCounter = 0.0
startTime = time_ns() #Fix initial time (t=0) before starting the training
res = Optimization.solve(prob, minimizer, callback=cb_, maxiters=maxIters)
# Solve with optimization
res = Optimization.solve(
prob,
minimizer;
callback = cb,
maxiters = maxIters
)

# Generate predictions
phi = discretization.phi

params = res.minimizer

# Model prediction
domain = [ts, x1s, x2s, x3s, x4s]

u_predict = try
[reshape([first(phi([t,x1,x2,x3,x4], res.minimizer))
for x1 in x1s, x2 in x2s, x3 in x3s, x4 in x4s],
(length(x1s), length(x2s), length(x3s), length(x4s)))
for t in ts]
catch e
@warn "Prediction generation failed: $e"
nothing
end

u_predict = [reshape([first(phi([t, x1, x2, x3, x4], res.minimizer)) for x1 in x1s for x2 in x2s for x3 in x3s for x4 in x4s], (length(x1s), length(x2s), length(x3s), length(x4s))) for t in ts] #matrix of model's prediction
# Log the time points (ts)
println("Time points (t) for strategy $strategy and minimizer $minimizer: ", collect(ts))

return [error, params, domain, times, losses]
return [error, res.minimizer, domain, times, losses]
end
```

Expand All @@ -159,7 +186,7 @@ strategies_short_name = ["CubaCuhre",
"StochasticTraining",
"QuasiRandomTraining"]

minimizers = [ADAM(0.005),BFGS()]
minimizers = [Optimisers.ADAM(0.005),BFGS()]
minimizers_short_name = ["ADAM","BFGS"]

# Run models
Expand All @@ -176,15 +203,15 @@ losses_res = Dict()
## Convergence

for min =1:length(minimizers) # minimizer
for strat=1:length(strategies) # strategy
# println(string(strategies_short_name[strat], " ", minimizers_short_name[min]))
res = allen_cahn(strategies[strat], minimizers[min], maxIters[min][strat])
push!(error_res, string(strat,min) => res[1])
push!(params_res, string(strat,min) => res[2])
push!(domains, string(strat,min) => res[3])
push!(times, string(strat,min) => res[4])
push!(losses_res, string(strat,min) => res[5])
end
for strat=1:length(strategies) # strategy
# println(string(strategies_short_name[strat], " ", minimizers_short_name[min]))
res = allen_cahn(strategies[strat], minimizers[min], maxIters[min][strat])
push!(error_res, string(strat,min) => res[1])
push!(params_res, string(strat,min) => res[2])
push!(domains, string(strat,min) => res[3])
push!(times, string(strat,min) => res[4])
push!(losses_res, string(strat,min) => res[5])
end
end
```

Expand Down
Loading