From 239e9bb926218540a4840e13ad381ef7fc0b0b1d Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 9 Aug 2023 17:35:00 +0200 Subject: [PATCH 01/85] enable dependabot for GitHub actions --- .github/dependabot.yml | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..d60f0707f --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,7 @@ +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" # Location of package manifests + schedule: + interval: "monthly" From 32c227af39bdce623133818dc17f2cb15b974b22 Mon Sep 17 00:00:00 2001 From: Alexandre Magueresse Date: Mon, 15 Apr 2024 16:32:38 +1000 Subject: [PATCH 02/85] Fixed constructor of RungeKutta with only one solver, removed symbol FE --- src/ODEs/ODESolvers.jl | 2 +- src/ODEs/ODESolvers/TableausEX.jl | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/ODEs/ODESolvers.jl b/src/ODEs/ODESolvers.jl index c17b9d52c..b00fa297b 100644 --- a/src/ODEs/ODESolvers.jl +++ b/src/ODEs/ODESolvers.jl @@ -202,5 +202,5 @@ function RungeKutta( end function RungeKutta(sysslvr_nl::NonlinearSolver, dt::Real, tableau) - RungeKutta(sysslvr_nl, sysslvr_nl, dt, name) + RungeKutta(sysslvr_nl, sysslvr_nl, dt, tableau) end diff --git a/src/ODEs/ODESolvers/TableausEX.jl b/src/ODEs/ODESolvers/TableausEX.jl index b21376580..4dd51b583 100644 --- a/src/ODEs/ODESolvers/TableausEX.jl +++ b/src/ODEs/ODESolvers/TableausEX.jl @@ -10,7 +10,6 @@ end """ EXRK_Euler_1_1 - FE """ struct EXRK_Euler_1_1 <: TableauName end From 0c0ac35181a2284f858ee5fba5b40aeeb69f9103 Mon Sep 17 00:00:00 2001 From: CompatHelper Julia Date: Sat, 18 May 2024 00:13:55 +0000 Subject: [PATCH 03/85] CompatHelper: bump compat for BlockArrays to 1, (keep existing compat) --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 3ee248317..9b71c63e9 100644 --- a/Project.toml +++ b/Project.toml @@ -33,7 +33,7 @@ WriteVTK = "64499a7a-5c06-52f2-abe2-ccb03c286192" [compat] AbstractTrees = "0.3.3, 0.4" BSON = "0.2.5, 0.3" -BlockArrays = "0.12.12, 0.13, 0.14, 0.15, 0.16" +BlockArrays = "0.12.12, 0.13, 0.14, 0.15, 0.16, 1" Combinatorics = "1.0.0" DataStructures = "0.18.13" DocStringExtensions = "0.8.1, 0.9" From fdf0b284e0f87c5e5c101cd31fdc61cadf833ab6 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Mon, 5 Aug 2024 11:56:29 +1000 Subject: [PATCH 04/85] Added type for traceless symmetric tensors - Added SymTracelessTensorValue (alias QTensorValue) It is a symmetric tensor Multivalue with null trace, the last diagonal value should not be provided in the constructor, the latter computes it and stores it in the data field - Added AbstractSymTensorValue{D} <: Multivalue{Tuple{D,D}} as abstract supertype for SymTensorValue and SymTracelessTensorValue --- src/TensorValues/Indexing.jl | 6 +- src/TensorValues/Operations.jl | 252 ++++++++++-------- src/TensorValues/SymTensorValueTypes.jl | 8 +- .../SymTracelessTensorValueTypes.jl | 176 ++++++++++++ src/TensorValues/TensorValues.jl | 5 + test/TensorValuesTests/IndexingTests.jl | 23 +- test/TensorValuesTests/OperationsTests.jl | 162 ++++++++++- test/TensorValuesTests/ReinterpretTests.jl | 6 + test/TensorValuesTests/TypesTests.jl | 84 +++++- 9 files changed, 599 insertions(+), 123 deletions(-) create mode 100644 src/TensorValues/SymTracelessTensorValueTypes.jl diff --git a/src/TensorValues/Indexing.jl b/src/TensorValues/Indexing.jl index 8f9f996d8..ee47e7f9d 100644 --- a/src/TensorValues/Indexing.jl +++ b/src/TensorValues/Indexing.jl @@ -14,7 +14,7 @@ function getindex(arg::TensorValue{D},i::Integer,j::Integer) where D arg.data[index] end -function getindex(arg::SymTensorValue{D},i::Integer,j::Integer) where D +function getindex(arg::AbstractSymTensorValue{D},i::Integer,j::Integer) where D index = _2d_sym_tensor_linear_index(D,i,j) arg.data[index] end @@ -31,7 +31,7 @@ end getindex(arg::VectorValue, ci::CartesianIndex{1}) = getindex(arg,ci[1]) getindex(arg::TensorValue,ci::CartesianIndex{2}) = getindex(arg,ci[1],ci[2]) -getindex(arg::SymTensorValue,ci::CartesianIndex{2}) = getindex(arg,ci[1],ci[2]) +getindex(arg::AbstractSymTensorValue,ci::CartesianIndex{2}) = getindex(arg,ci[1],ci[2]) getindex(arg::ThirdOrderTensorValue,ci::CartesianIndex{3}) = getindex(arg,ci[1],ci[2],ci[3]) getindex(arg::SymFourthOrderTensorValue,ci::CartesianIndex{4}) = getindex(arg,ci[1],ci[2],ci[3],ci[4]) @@ -44,7 +44,7 @@ getindex(arg::ThirdOrderTensorValue, i::Integer) = arg.data[i] data_index(::Type{<:VectorValue},i) = i data_index(::Type{<:TensorValue{D}},i,j) where D = _2d_tensor_linear_index(D,i,j) -data_index(::Type{<:SymTensorValue{D}},i,j) where D = _2d_sym_tensor_linear_index(D,i,j) +data_index(::Type{<:AbstractSymTensorValue{D}},i,j) where D = _2d_sym_tensor_linear_index(D,i,j) data_index(::Type{<:ThirdOrderTensorValue{D1,D2}},i,j,k) where {D1,D2} = _3d_tensor_linear_index(D1,D2,i,j,k) data_index(::Type{<:SymFourthOrderTensorValue{D}},i,j,k,l) where D = _4d_sym_tensor_linear_index(D,i,j,k,l) diff --git a/src/TensorValues/Operations.jl b/src/TensorValues/Operations.jl index e81d90b55..4f831de65 100644 --- a/src/TensorValues/Operations.jl +++ b/src/TensorValues/Operations.jl @@ -45,6 +45,10 @@ for op in (:+,:-) T(r) end + function ($op)(a::MultiValue,b::MultiValue) + @notimplemented "Not implemented or undefined operation \"$($op)\" on MultiValues of these shapes" + end + function ($op)(a::MultiValue{S},b::MultiValue{S}) where S r = map(($op), a.data, b.data) T = _eltype($op,r,a,b) @@ -52,17 +56,52 @@ for op in (:+,:-) M(r) end - function ($op)(a::TensorValue,b::SymTensorValue) + function ($op)(a::TensorValue{D,D},b::SymTensorValue{D}) where D + map(($op), a, TensorValue(get_array(b))) + end + + function ($op)(a::SymTensorValue{D},b::TensorValue{D,D}) where D + map(($op), TensorValue(get_array(a)), b) + end + + function ($op)(a::TensorValue{D,D},b::SymTracelessTensorValue{D}) where D map(($op), a, TensorValue(get_array(b))) end - function ($op)(a::SymTensorValue,b::TensorValue) + function ($op)(a::SymTracelessTensorValue{D},b::TensorValue{D,D}) where D map(($op), TensorValue(get_array(a)), b) end + function ($op)(a::SymTracelessTensorValue{D},b::SymTensorValue{D}) where D + r = map(($op), a.data, b.data) + T = _eltype($op,r,a,b) + M = change_eltype(b,T) + M(r) + end + + function ($op)(a::SymTensorValue{D},b::SymTracelessTensorValue{D}) where D + r = map(($op), a.data, b.data) + T = _eltype($op,r,a,b) + M = change_eltype(a,T) + M(r) + end + + function ($op)(a::SymTracelessTensorValue) + r = map($op, a.data[1:end-1]) + typeof(a)(r) + end + + function ($op)(a::SymTracelessTensorValue{D},b::SymTracelessTensorValue{D}) where D + r = map(($op), a.data[1:end-1], b.data[1:end-1]) + T = _eltype($op,r,a,b) + M = change_eltype(a,T) + M(r) + end + end end + ############################################################### # Matrix Division ############################################################### @@ -98,28 +137,45 @@ end for op in (:+,:-,:*) @eval begin function ($op)(a::MultiValue,b::Number) - r = _bc($op,a.data,b) - T = _eltype($op,r,a,b) - M = change_eltype(a,T) - M(r) + r = _bc($op,a.data,b) + T = _eltype($op,r,a,b) + M = change_eltype(a,T) + M(r) end function ($op)(a::Number,b::MultiValue) - r = _bc($op,a,b.data) - T = _eltype($op,r,a,b) - M = change_eltype(b,T) - M(r) + r = _bc($op,a,b.data) + T = _eltype($op,r,a,b) + M = change_eltype(b,T) + M(r) end end end +function (*)(a::Number,b::SymTracelessTensorValue) + r = _bc(*,a,b.data[1:end-1]) + T = _eltype(*,r,a,b) + M = change_eltype(b,T) + M(r) +end + +function (*)(a::SymTracelessTensorValue,b::Number) + b*a +end + function (/)(a::MultiValue,b::Number) - r = _bc(/,a.data,b) - T = _eltype(/,r,a,b) - P = change_eltype(a,T) - P(r) + r = _bc(/,a.data,b) + T = _eltype(/,r,a,b) + P = change_eltype(a,T) + P(r) end +const _err = " with number is undefined for traceless tensors" +function -(::SymTracelessTensorValue,::Number) error("Addition" *_err) end +function +(::SymTracelessTensorValue,::Number) error("Subtraction"*_err) end +function -(::Number,::SymTracelessTensorValue) error("Addition" *_err) end +function +(::Number,::SymTracelessTensorValue) error("Subtraction"*_err) end + @inline function _eltype(op,r,a,b) eltype(r) end @@ -146,18 +202,18 @@ dot(a::MultiValue{Tuple{D}}, b::MultiValue{Tuple{D}}) where D = inner(a,b) dot(a::MultiValue,b::MultiValue) = @notimplemented @generated function dot(a::A,b::B) where {A<:MultiValue{Tuple{D1}},B<:MultiValue{Tuple{D1,D2}}} where {D1,D2} - ss = String[] - for j in 1:D2 - s = "" - for i in 1:D1 - ak = data_index(A,i) - bk = data_index(B,i,j) - s *= "a.data[$ak]*b.data[$bk]+" - end - push!(ss,s[1:(end-1)]*", ") + ss = String[] + for j in 1:D2 + s = "" + for i in 1:D1 + ak = data_index(A,i) + bk = data_index(B,i,j) + s *= "a.data[$ak]*b.data[$bk]+" end - str = join(ss) - Meta.parse("VectorValue{$D2}($str)") + push!(ss,s[1:(end-1)]*", ") + end + str = join(ss) + Meta.parse("VectorValue{$D2}($str)") end function dot(a::A,b::B) where {A<:MultiValue{Tuple{0}},B<:MultiValue{Tuple{0,D2}}} where D2 @@ -166,30 +222,30 @@ function dot(a::A,b::B) where {A<:MultiValue{Tuple{0}},B<:MultiValue{Tuple{0,D2} end @generated function dot(a::A,b::B) where {A<:MultiValue{Tuple{D1,D2}},B<:MultiValue{Tuple{D2}}} where {D1,D2} - ss = String[] - for i in 1:D1 - s = "" - for j in 1:D2 - ak = data_index(A,i,j) - bk = data_index(B,j) - s *= "a.data[$ak]*b.data[$bk]+" - end - push!(ss,s[1:(end-1)]*", ") + ss = String[] + for i in 1:D1 + s = "" + for j in 1:D2 + ak = data_index(A,i,j) + bk = data_index(B,j) + s *= "a.data[$ak]*b.data[$bk]+" end - str = join(ss) - Meta.parse("VectorValue{$D1}($str)") + push!(ss,s[1:(end-1)]*", ") + end + str = join(ss) + Meta.parse("VectorValue{$D1}($str)") end @generated function dot(a::MultiValue{Tuple{D1,D3}}, b::MultiValue{Tuple{D3,D2}}) where {D1,D2,D3} - ss = String[] - for j in 1:D2 - for i in 1:D1 - s = join([ "a[$i,$k]*b[$k,$j]+" for k in 1:D3]) - push!(ss,s[1:(end-1)]*", ") - end + ss = String[] + for j in 1:D2 + for i in 1:D1 + s = join([ "a[$i,$k]*b[$k,$j]+" for k in 1:D3]) + push!(ss,s[1:(end-1)]*", ") end - str = join(ss) - Meta.parse("TensorValue{$D1,$D2}(($str))") + end + str = join(ss) + Meta.parse("TensorValue{$D1,$D2}(($str))") end # a_ij = b_ijk*c_k @@ -261,34 +317,35 @@ function inner(a::MultiValue, b::MultiValue) end @generated function inner(a::MultiValue{S}, b::MultiValue{S}) where S - str = join([" a[$i]*b[$i] +" for i in 1:length(a) ]) - Meta.parse(str[1:(end-1)]) + str = join([" a[$i]*b[$i] +" for i in 1:length(a) ]) + Meta.parse(str[1:(end-1)]) end -@generated function inner(a::SymTensorValue{D}, b::SymTensorValue{D}) where D +@generated function inner(a::AbstractSymTensorValue{D}, b::AbstractSymTensorValue{D}) where D str = "" for i in 1:D - for j in 1:D - k = data_index(a,i,j) - str *= " a.data[$k]*b.data[$k] +" + str *= "+ a[$i,$i]*b[$i,$i]" + end + str *= " + 2*(" + for i in 1:D + for j in i+1:D + str *= "+ a[$i,$j]*b[$i,$j]" end end - Meta.parse(str[1:(end-1)]) + str *= ")" + Meta.parse(str) end -@generated function inner(a::SymFourthOrderTensorValue{D}, b::SymTensorValue{D}) where D +@generated function inner(a::SymFourthOrderTensorValue{D}, b::AbstractSymTensorValue{D}) where D str = "" for i in 1:D for j in i:D - s = "" for k in 1:D for l in 1:D - ak = data_index(a,i,j,k,l) - bk = data_index(b,k,l) - s *= " a.data[$ak]*b.data[$bk] +" + str *= "+ a[$i,$j,$k,$l]*b[$k,$l]" end end - str *= s[1:(end-1)]*", " + str *= ", " end end Meta.parse("SymTensorValue{D}($str)") @@ -388,9 +445,9 @@ const ⋅² = double_contraction ############################################################### for op in (:sum,:maximum,:minimum) - @eval begin - $op(a::MultiValue) = $op(a.data) - end + @eval begin + $op(a::MultiValue) = $op(a.data) + end end # Outer product (aka dyadic product) @@ -426,15 +483,13 @@ end Meta.parse("ThirdOrderTensorValue{D,D1,D2}($str)") end -@generated function outer(a::SymTensorValue{D},b::SymTensorValue{D}) where D +@generated function outer(a::AbstractSymTensorValue{D},b::AbstractSymTensorValue{D}) where D str = "" for i in 1:D for j in i:D - ak = data_index(a,i,j) for k in 1:D for l in k:D - bk = data_index(b,k,l) - str *= "a.data[$ak]*b.data[$bk], " + str *= "a[$i,$j]*b[$k,$l], " end end end @@ -481,6 +536,9 @@ function det(a::MultiValue{Tuple{3,3}}) end inv(a::MultiValue{Tuple{D1,D2}}) where {D1,D2} = TensorValue(inv(get_array(a))) +# those still have better perf than the D=2,3 specialization below +inv(a::AbstractSymTensorValue{D}) where D = SymTensorValue(inv(get_array(a))) +inv(a::SymTracelessTensorValue{2}) = SymTracelessTensorValue(inv(get_array(a))) function inv(a::MultiValue{Tuple{1,1}}) r = 1/a[1] @@ -520,7 +578,8 @@ end """ meas(a::MultiValue{Tuple{D}}) where D = sqrt(inner(a,a)) meas(a::MultiValue{Tuple{D,D}}) where D = abs(det(a)) -meas(a::TensorValue{0,D,T}) where {T,D} = one(T) +#meas( ::TensorValue{0,D,T}) where {T,D} = one(T) +#meas( ::MultiValue{Tuple{0,0},T}) where {T} = one(T) function meas(v::MultiValue{Tuple{1,D}}) where D t = VectorValue(v.data) @@ -553,14 +612,20 @@ function conj(a::T) where {T<:MultiValue} T(r) end +function conj(a::SymTracelessTensorValue) + r = map(conj, a.data) + SymTracelessTensorValue(r[1:end-1]) +end + ############################################################### # Trace ############################################################### @generated function tr(v::MultiValue{Tuple{D,D}}) where D - str = join([" v[$i,$i] +" for i in 1:D ]) - Meta.parse(str[1:(end-1)]) + str = join([" v[$i,$i] +" for i in 1:D ]) + Meta.parse(str[1:(end-1)]) end +tr(::SymTracelessTensorValue{D,T}) where {D,T} = zero(T) @generated function tr(v::MultiValue{Tuple{A,A,B}}) where {A,B} lis = LinearIndices((A,A,B)) @@ -596,7 +661,7 @@ transpose(a::MultiValue{Tuple{D,D}}) where D = @notimplemented Meta.parse("TensorValue{D2,D1}($str)") end -@generated function transpose(a::TensorValue{D1,D2}) where {D1,D2} +@generated function transpose(a::TensorValue{D1,D2,T}) where {D1,D2,T} str = "" for i in 1:D1 for j in 1:D2 @@ -604,18 +669,18 @@ end str *= "a.data[$k], " end end - Meta.parse("TensorValue{D2,D1}($str)") + Meta.parse("TensorValue{D2,D1,T}($str)") end @inline function adjoint(a::TensorValue{D1,D2,T}) where {D1,D2,T<:Real} transpose(a) end -adjoint(a::SymTensorValue) = conj(a) +adjoint(a::AbstractSymTensorValue) = conj(a) -@inline adjoint(a::SymTensorValue{D,T} where {D,T<:Real}) = transpose(a) +@inline adjoint(a::AbstractSymTensorValue{D,T} where {D,T<:Real}) = transpose(a) -transpose(a::SymTensorValue) = a +transpose(a::AbstractSymTensorValue) = a ############################################################### # Symmetric part @@ -634,24 +699,14 @@ transpose(a::SymTensorValue) = a Meta.parse("SymTensorValue{D}($str)") end +symmetric_part(v::AbstractSymTensorValue) = v + ############################################################### # diag ############################################################### -function LinearAlgebra.diag(a::TensorValue{1,1}) - VectorValue(a.data[1]) -end - -function LinearAlgebra.diag(a::TensorValue{2,2}) - VectorValue(a.data[1],a.data[4]) -end - -function LinearAlgebra.diag(a::TensorValue{3,3}) - VectorValue(a.data[1],a.data[5],a.data[9]) -end - -function LinearAlgebra.diag(a::TensorValue) - @notimplemented +function LinearAlgebra.diag(a::MultiValue{Tuple{D,D},T}) where {D,T} + VectorValue((a[i,i] for i in 1:D)...) end ############################################################### @@ -667,25 +722,6 @@ function Base.broadcasted(f,a::TensorValue,b::TensorValue) TensorValue(map(f,a.data,b.data)) end -############################################################### -# Define new operations for Gridap types -############################################################### - -#for op in (:symmetric_part,) -# @eval begin -# ($op)(a::GridapType) = operate($op,a) -# end -#end -# -#for op in (:inner,:outer,:double_contraction)#,:(:)) -# @eval begin -# ($op)(a::GridapType,b::GridapType) = operate($op,a,b) -# ($op)(a::GridapType,b::Number) = operate($op,a,b) -# ($op)(a::Number, b::GridapType) = operate($op,a,b) -# ($op)(a::GridapType,b::Function) = operate($op,a,b) -# ($op)(a::Function, b::GridapType) = operate($op,a,b) -# end -#end - - - +function Base.broadcasted(f,a::AbstractSymTensorValue,b::AbstractSymTensorValue) + SymTensorValue(map(f,a.data,b.data)) +end diff --git a/src/TensorValues/SymTensorValueTypes.jl b/src/TensorValues/SymTensorValueTypes.jl index 1f9412aef..3382f23cb 100644 --- a/src/TensorValues/SymTensorValueTypes.jl +++ b/src/TensorValues/SymTensorValueTypes.jl @@ -1,11 +1,15 @@ ############################################################### # SymTensorValue Type ############################################################### +""" +Abstract type representing any symmetric second-order tensor +""" +abstract type AbstractSymTensorValue{D,T,L} <: MultiValue{Tuple{D,D},T,2,L} end """ -Type representing a symmetric second-order tensor +Type representing a symmetric second-order tensor (with D(D-1)/2 independant components) """ -struct SymTensorValue{D,T,L} <: MultiValue{Tuple{D,D},T,2,L} +struct SymTensorValue{D,T,L} <: AbstractSymTensorValue{D,T,L} data::NTuple{L,T} function SymTensorValue{D,T}(data::NTuple{L,T}) where {D,T,L} @check L == D*(D+1)÷2 diff --git a/src/TensorValues/SymTracelessTensorValueTypes.jl b/src/TensorValues/SymTracelessTensorValueTypes.jl new file mode 100644 index 000000000..4bd847e3a --- /dev/null +++ b/src/TensorValues/SymTracelessTensorValueTypes.jl @@ -0,0 +1,176 @@ +############################################################### +# SymTracelessTensorValue Type +############################################################### + +""" +Type representing a traceless symmetric second-order tensor, +used to model the Q tensor in nematic liquid cristals + +The last diagonal value is determined by minus the sum of the other and musn't be provided +""" +struct SymTracelessTensorValue{D,T,L} <: AbstractSymTensorValue{D,T,L} + data::NTuple{L,T} + function SymTracelessTensorValue{D,T}(data::NTuple{L,T}) where {D,T,L} + @check L == D*(D+1)÷2-1 + new{D,T,L+1}( (data..., _minus_trace(data,Val(D))) ) + end + function SymTracelessTensorValue{0,T}(data::NTuple{0,T}) where {T} + new{0,T,0}(data) + end + function SymTracelessTensorValue{1,T}(::NTuple{0,T}) where {T} + new{1,T,1}( (zero(T),) ) + end +end + +@generated function _minus_trace(data::NTuple{L,T},::Val{D}) where {D,T,L} + str = "" + for i in 1:D-1 + k = _2d_sym_tensor_linear_index(D,i,i) + str *= "- data[$k]" + end + Meta.parse("($str)") +end + +const QTensorValue = SymTracelessTensorValue + +############################################################### +# Constructors (SymTracelessTensorValue) +############################################################### + +# Empty SymTracelessTensorValue constructor + +SymTracelessTensorValue() = SymTracelessTensorValue{0,Int}(NTuple{0,Int}()) +SymTracelessTensorValue{0}() = SymTracelessTensorValue{0,Int}(NTuple{0,Int}()) +SymTracelessTensorValue{0,T}() where {T} = SymTracelessTensorValue{0,T}(NTuple{0,T}()) +SymTracelessTensorValue(data::NTuple{0}) = SymTracelessTensorValue{0,Int}(data) +SymTracelessTensorValue{0}(data::NTuple{0}) = SymTracelessTensorValue{0,Int}(data) + +# 1D SymTracelessTensorValue missing constructor + +SymTracelessTensorValue{1}() = SymTracelessTensorValue{1,Int}(NTuple{0,Int}()) +SymTracelessTensorValue{1}(data::NTuple{0}) = SymTracelessTensorValue{1,Int}(data) + +# SymTracelessTensorValue single NTuple argument constructor + +@generated function SymTracelessTensorValue(data::NTuple{L,T}) where {L,T} + msg = "Invalid number of scalar arguments in SymTracelessTensorValue constructor" + V = (sqrt(9+8*L)-1)/2 + @check floor(Int,V) == ceil(Int,V) msg + D = Int(V) + quote + SymTracelessTensorValue{$D,T}(data) + end +end +SymTracelessTensorValue{D}(data::NTuple{L,T}) where {D,L,T} = SymTracelessTensorValue{D,T}(data) +SymTracelessTensorValue{D,T1}(data::NTuple{L,T2}) where {D,L,T1,T2} = SymTracelessTensorValue{D,T1}(NTuple{L,T1}(data)) +SymTracelessTensorValue{D,T1,L}(data::NTuple{Lm,T2}) where {D,L,Lm,T1,T2} = SymTracelessTensorValue{D,T1}(NTuple{Lm,T1}(data)) + +# SymTracelessTensorValue single Tuple argument constructor + +SymTracelessTensorValue(data::Tuple) = SymTracelessTensorValue(promote(data...)) +SymTracelessTensorValue{D}(data::Tuple) where {D} = SymTracelessTensorValue{D}(promote(data...)) +SymTracelessTensorValue{D,T1}(data::Tuple) where {D,T1} = SymTracelessTensorValue{D,T1}(NTuple{length(data),T1}(data)) +SymTracelessTensorValue{D,T1,L}(data::Tuple) where {D,T1,L} = SymTracelessTensorValue{D,T1}(NTuple{L-1,T1}(data)) + +# SymTracelessTensorValue Vararg constructor + +SymTracelessTensorValue(data::Number...) = SymTracelessTensorValue(data) +SymTracelessTensorValue{D}(data::Number...) where {D} = SymTracelessTensorValue{D}(data) +SymTracelessTensorValue{D,T1}(data::Number...) where {D,T1} = SymTracelessTensorValue{D,T1}(data) +SymTracelessTensorValue{D,T1,L}(data::Number...) where {D,T1,L} = SymTracelessTensorValue{D,T1}(data) + +# SymTracelessTensorValue single AbstractMatrix argument constructor + +#From Square Matrices +@generated function _flatten_upper_triangle_traceless(data::AbstractArray,::Val{D}) where D + str = "" + for i in 1:D-1 + for j in i:D + str *= "data[$i,$j], " + end + end + Meta.parse("($str)") +end + +SymTracelessTensorValue(data::AbstractMatrix{T}) where {T} = ((D1,D2)=size(data); SymTracelessTensorValue{D1}(data)) +SymTracelessTensorValue{D}(data::AbstractMatrix{T}) where {D,T} = SymTracelessTensorValue{D,T}(_flatten_upper_triangle_traceless(data,Val{D}())) +SymTracelessTensorValue{D,T1}(data::AbstractMatrix{T2}) where {D,T1,T2} = SymTracelessTensorValue{D,T1}(_flatten_upper_triangle_traceless(data,Val{D}())) +SymTracelessTensorValue{D,T1,L}(data::AbstractMatrix{T2}) where {D,T1,T2,L} = SymTracelessTensorValue{D,T1,L}(_flatten_upper_triangle_traceless(data,Val{D}())) + +############################################################### +# Conversions (SymTracelessTensorValue) +############################################################### + +@generated function _SymTracelessTensorValue_to_array(arg::SymTracelessTensorValue{D,T,L}) where {D,T,L} + str = "" + for j in 1:D + for i in 1:D + str *= "arg[$i,$j], " + end + end + Meta.parse("SMatrix{D,D,T}(($str))") +end + +# Direct conversion +convert(::Type{<:SymTracelessTensorValue{D,T}}, arg::AbstractArray) where {D,T} = SymTracelessTensorValue{D,T}(arg) +convert(::Type{<:SymTracelessTensorValue{D,T}}, arg::Tuple) where {D,T} = SymTracelessTensorValue{D,T}(arg) + +# Inverse conversion +convert(::Type{<:MMatrix{D,D,T}}, arg::SymTracelessTensorValue) where {D,T} = MMatrix{D,D,T}(_SymTracelessTensorValue_to_array(arg)) +convert(::Type{<:SMatrix{D,D,T}}, arg::SymTracelessTensorValue) where {D,T} = _SymTracelessTensorValue_to_array(arg) +convert(::Type{<:NTuple{L,T}}, arg::SymTracelessTensorValue) where {L,T} = NTuple{L,T}(Tuple(arg)) + +# Internal conversion +convert(::Type{<:SymTracelessTensorValue{D,T}}, arg::SymTracelessTensorValue{D}) where {D,T} = SymTracelessTensorValue{D,T}(Tuple(arg)) +convert(::Type{<:SymTracelessTensorValue{D,T}}, arg::SymTracelessTensorValue{D,T}) where {D,T} = arg + +############################################################### +# Other constructors and conversions (SymTracelessTensorValue) +############################################################### + +zero(::Type{<:SymTracelessTensorValue{0,T}}) where {T} = SymTracelessTensorValue{0,T}() +@generated function zero(::Type{<:SymTracelessTensorValue{D,T}}) where {D,T} + L=D*(D+1)÷2-1 + quote + SymTracelessTensorValue{D,T}(tfill(zero(T),Val{$L}())) + end +end + +zero(::Type{<:SymTracelessTensorValue{D,T,L}}) where {D,T,L} = SymTracelessTensorValue{D,T}(tfill(zero(T),Val{L}())) +zero(::SymTracelessTensorValue{D,T,L}) where {D,T,L} = zero(SymTracelessTensorValue{D,T,L}) + +rand(::AbstractRNG, ::Random.SamplerType{<:SymTracelessTensorValue{0,T}}) where {T} = SymTracelessTensorValue{0,T}() +@generated function rand(rng::AbstractRNG, + ::Random.SamplerType{<:SymTracelessTensorValue{D,T}}) where {D,T} + L=D*(D+1)÷2 + quote + rand(rng, SymTracelessTensorValue{D,T,$L}) + end +end +rand(rng::AbstractRNG,::Random.SamplerType{<:SymTracelessTensorValue{D,T,L}}) where {D,T,L} = + SymTracelessTensorValue{D,T}(Tuple(rand(rng, SVector{L-1,T}))) + +Mutable(::Type{<:SymTracelessTensorValue{D,T}}) where {D,T} = MMatrix{D,D,T} +Mutable(::SymTracelessTensorValue{D,T}) where {D,T} = Mutable(SymTracelessTensorValue{D,T}) +mutable(a::SymTracelessTensorValue{D}) where D = MMatrix{D,D}(Tuple(get_array(a))) + +change_eltype(::Type{SymTracelessTensorValue{D,T1,L}},::Type{T2}) where {D,T1,T2,L} = SymTracelessTensorValue{D,T2,L} +change_eltype(::SymTracelessTensorValue{D,T1,L},::Type{T2}) where {D,T1,T2,L} = change_eltype(SymTracelessTensorValue{D,T1,L},T2) + +get_array(arg::SymTracelessTensorValue{D,T,L}) where {D,T,L} = convert(SMatrix{D,D,T}, arg) + +############################################################### +# Introspection (SymTracelessTensorValue) +############################################################### + +eltype(::Type{<:SymTracelessTensorValue{D,T}}) where {D,T} = T +eltype(::SymTracelessTensorValue{D,T}) where {D,T} = eltype(SymTracelessTensorValue{D,T}) + +size(::Type{<:SymTracelessTensorValue{D}}) where {D} = (D,D) +size(::SymTracelessTensorValue{D}) where {D} = size(SymTracelessTensorValue{D}) + +length(::Type{<:SymTracelessTensorValue{D}}) where {D} = D*D +length(::SymTracelessTensorValue{D}) where {D} = length(SymTracelessTensorValue{D}) + +num_components(::Type{<:SymTracelessTensorValue{D}}) where {D} = length(SymTracelessTensorValue{D}) +num_components(::SymTracelessTensorValue{D}) where {D} = num_components(SymTracelessTensorValue{D}) diff --git a/src/TensorValues/TensorValues.jl b/src/TensorValues/TensorValues.jl index 513d046a0..747e21d71 100644 --- a/src/TensorValues/TensorValues.jl +++ b/src/TensorValues/TensorValues.jl @@ -41,7 +41,10 @@ using Random export MultiValue export VectorValue export TensorValue +export AbstractSymTensorValue export SymTensorValue +export SymTracelessTensorValue +export QTensorValue export SymFourthOrderTensorValue export ThirdOrderTensorValue @@ -89,6 +92,8 @@ include("TensorValueTypes.jl") include("SymTensorValueTypes.jl") +include("SymTracelessTensorValueTypes.jl") + include("SymFourthOrderTensorValueTypes.jl") include("ThirdOrderTensorValueTypes.jl") diff --git a/test/TensorValuesTests/IndexingTests.jl b/test/TensorValuesTests/IndexingTests.jl index 5f6951076..378c23054 100644 --- a/test/TensorValuesTests/IndexingTests.jl +++ b/test/TensorValuesTests/IndexingTests.jl @@ -40,24 +40,33 @@ for (k,ti) in enumerate(t) end s = SymTensorValue{2}(11,21,22) +q = SymTracelessTensorValue{2}(11,21) t = TensorValue(convert(SMatrix{2,2,Int},s)) +p = TensorValue(convert(SMatrix{2,2,Int},q)) -@test size(s) == (2,2) -@test length(s) == 4 -@test lastindex(s) == length(s) -@test s[end] == 22 +@test size(s) == size(q) == (2,2) +@test length(s) == length(q) == 4 +@test lastindex(s) == lastindex(q) == length(s) +@test s[end] == 22 +@test q[end] == -11 for (k,i) in enumerate(eachindex(t)) @test s[i] == t[k] end +for (k,i) in enumerate(eachindex(p)) + @test q[i] == p[k] +end -@test s[2,1] == 21 - -@test s[2] == 21 +@test s[2,1] == q[2,1] == 21 +@test s[2] == q[2] == 21 +@test q[1] == -q[4] for (k,si) in enumerate(t) @test si == s[k] end +for (k,qi) in enumerate(p) + @test qi == q[k] +end v = @SMatrix zeros(2,3) w = TensorValue(v) diff --git a/test/TensorValuesTests/OperationsTests.jl b/test/TensorValuesTests/OperationsTests.jl index d7fd0b4c3..6c93d91a3 100644 --- a/test/TensorValuesTests/OperationsTests.jl +++ b/test/TensorValuesTests/OperationsTests.jl @@ -104,20 +104,93 @@ c = a + b r = SymTensorValue(6,8,10) @test c==r +c = b - a +r = SymTensorValue(4,4,4) +@test c==r + a = TensorValue(1,2,3,4) b = SymTensorValue(5,6,7) c = a + b +d = b + a r = TensorValue(6,8,9,11) @test c==r +@test d==r + +c = a - b +r = TensorValue(-4,-4,-3,-3) +@test c==r + +c = b - a +r = TensorValue(4,4,3,3) +@test c==r + +a = SymTracelessTensorValue(1,2) +b = SymTracelessTensorValue(5,6) + +c = -a +r = SymTracelessTensorValue(-1,-2) +@test c==r + +c = a + b +r = SymTracelessTensorValue(6,8) +@test c==r + +c = a - b +r = SymTracelessTensorValue(-4,-4) +@test c==r + +a = SymTensorValue(1,2,3) +b = SymTracelessTensorValue(5,6) + +c = a + b +d = b + a +r = SymTensorValue(6,8,-2) +@test c==r +@test d==r + +c = a - b +r = SymTensorValue(-4,-4,8) +@test c==r + +c = b - a +r = SymTensorValue(4,4,-8) +@test c==r a = SymTensorValue(5,6,7) b = TensorValue(1,2,3,4) +c = a + b +d = b + a +r = TensorValue(6,8,9,11) +@test c==r +@test d==r + c = a - b r = TensorValue(4,4,3,3) @test c==r +c = b - a +r = TensorValue(-4,-4,-3,-3) +@test c==r + +a = SymTracelessTensorValue(5,6) +b = TensorValue(1,2,3,4) + +c = a + b +d = b + a +r = TensorValue(6,8,9,-1) +@test c==r +@test d==r + +c = a - b +r = TensorValue(4,4,3,-9) +@test c==r + +c = b - a +r = TensorValue(-4,-4,-3,9) +@test c==r + # Matrix Division a = VectorValue(1,2,3) @@ -134,6 +207,7 @@ c = st\a t = TensorValue(1,2,3,4,5,6,7,8,9) st = SymTensorValue(1,2,3,5,6,9) +qt = SymTracelessTensorValue(1,2,3,5,6) s4ot = one(SymFourthOrderTensorValue{2,Int}) a = VectorValue(1,2,3) @@ -192,6 +266,18 @@ c = st + 2 r = SymTensorValue(3,4,5,7,8,11) @test c == r + +c = 2 * qt +@test isa(c,SymTracelessTensorValue{3}) +r = SymTracelessTensorValue(2,4,6,10,12) +@test c == r + +c = qt * 2 +@test isa(c,SymTracelessTensorValue{3}) +r = SymTracelessTensorValue(2,4,6,10,12) +@test c == r + + c = 2 * s4ot @test isa(c,SymFourthOrderTensorValue{2}) r = SymFourthOrderTensorValue(2,0,0, 0,1,0, 0,0,2) @@ -214,8 +300,10 @@ b = VectorValue(2,1,6) t = TensorValue(1,2,3,4,5,6,7,8,9) s = TensorValue(9,8,3,4,5,6,7,2,1) -st = SymTensorValue(1,2,3,5,6,9) +st = SymTensorValue(1,2,3,5,6,9) st2 = SymTensorValue(9,6,5,3,2,1) +qt = SymTracelessTensorValue(1,2,3,5,6) +qt2 = SymTracelessTensorValue(9,6,5,3,2) c = a ⋅ b @test isa(c,Int) @@ -231,6 +319,11 @@ c = st ⋅ a r = VectorValue(14,30,42) @test c == r +c = qt ⋅ a +@test isa(c,VectorValue{3,Int}) +r = VectorValue(14,30,-3) +@test c == r + c = s ⋅ t @test isa(c,TensorValue{3,3,Int}) r = TensorValue(38,24,18,98,69,48,158,114,78) @@ -241,11 +334,31 @@ c = st ⋅ st2 r = TensorValue(36, 78, 108, 18, 39, 54, 12, 26, 36) @test c == r +c = qt ⋅ qt2 +@test isa(c,TensorValue{3,3,Int}) +r = TensorValue(36, 78, 33, 18, 39, 24, -27, -52, 99) +@test c == r + +c = st ⋅ qt2 +@test isa(c,TensorValue{3,3,Int}) +r = TensorValue(36, 78, 108, 18, 39, 54, -27, -52, -81) +@test c == r + +c = qt2 ⋅ st +@test isa(c,TensorValue{3,3,Int}) +r = TensorValue(36, 18, -27, 78, 39, -52, 108, 54, -81) +@test c == r + c = a ⋅ st @test isa(c,VectorValue{3,Int}) r = VectorValue(14,30,42) @test c == r +c = a ⋅ qt +@test isa(c,VectorValue{3,Int}) +r = VectorValue(14,30,-3) +@test c == r + a1 = VectorValue(1,0) b1 = VectorValue(1,2) @@ -307,6 +420,11 @@ c = st ⊙ st2 @test isa(c,Int) @test c == inner(TensorValue(get_array(st)),TensorValue(get_array(st2))) +c = inner(qt,qt2) +c = qt ⊙ qt2 +@test isa(c,Int) +@test c == inner(TensorValue(get_array(qt)),TensorValue(get_array(qt2))) + # Reductions a = VectorValue(1,2,3) @@ -389,7 +507,11 @@ c = inv(t) st = SymTensorValue(9,8,7,5,4,1) @test det(st) == det(TensorValue(get_array(st))) -@test inv(st) == inv(TensorValue(get_array(st))) +@test inv(st) ≈ inv(TensorValue(get_array(st))) + +qt = SymTracelessTensorValue(9,8,7,5,4) +@test det(qt) == det(TensorValue(get_array(qt))) +@test inv(qt) ≈ inv(TensorValue(get_array(qt))) t = TensorValue(10) @test det(t) == 10 @@ -412,6 +534,9 @@ c = meas(t) st = SymTensorValue(1,2,3,5,6,9) @test meas(st) == meas(TensorValue(get_array(st))) +qt = SymTracelessTensorValue(1,2,3,5,6) +@test meas(qt) == meas(TensorValue(get_array(qt))) + v = TensorValue{1,2}(10,20) @test meas(v) == sqrt(500) @@ -465,8 +590,14 @@ t = TensorValue(1,2,3,4,5,6,7,8,9) st = SymTensorValue(1,2,3,5,6,9) @test tr(st) == tr(TensorValue(get_array(st))) +qt = SymTracelessTensorValue(1,2,3,5,6) +@test tr(qt) == tr(TensorValue(get_array(qt))) + @test get_array(symmetric_part(t)) == get_array(TensorValue(1.0, 3.0, 5.0, 3.0, 5.0, 7.0, 5.0, 7.0, 9.0)) @test symmetric_part(st) == symmetric_part(TensorValue(get_array(st))) +@test symmetric_part(st) === st +@test symmetric_part(qt) == symmetric_part(TensorValue(get_array(qt))) +@test symmetric_part(qt) === qt a = TensorValue(1,2,3,4) b = a' @@ -566,6 +697,15 @@ odot_contraction_array = 1*a[:,1,1] + 2*a[:,1,2] + 3*a[:,1,3] + 2*a[:,2,1] + 4*a[:,2,2] + 5*a[:,2,3] + 3*a[:,3,1] + 5*a[:,3,2] + 6*a[:,3,3] @test odot_contraction == odot_contraction_array +a = reshape(Vector(1:27),(3,3,3)) +a_tensor = ThirdOrderTensorValue(a...) +b_tensor = SymTracelessTensorValue((1:5)...) +b = Matrix(get_array(b_tensor)) +odot_contraction = Vector(get_array(a_tensor ⋅² b_tensor)) +odot_contraction_array = 1*a[:,1,1] + 2*a[:,1,2] + 3*a[:,1,3] + 2*a[:,2,1] + + 4*a[:,2,2] + 5*a[:,2,3] + 3*a[:,3,1] + 5*a[:,3,2] + (-5)*a[:,3,3] +@test odot_contraction == odot_contraction_array + # double Contractions w/ products Sym4TensorIndexing = [1111, 1121, 1131, 1122, 1132, 1133, 2111, 2121, 2131, 2122, 2132, 2133, 3111, 3121, 3131, 3122, 3132, 3133, 2211, 2221, 2231, 2222, 2232, 2233, @@ -738,4 +878,22 @@ c = a .* b @test diag(a) == VectorValue(1,4) +a = SymTensorValue(1,2,4) +b = SymTensorValue(1.,2.,4.) +c = a .* b +@test isa(c,SymTensorValue) +@test c.data == map(*,a.data,b.data) + +@test diag(a) == VectorValue(1,4) + + +# Componant wise operations on sym. traceless tensors yield sym. tensors +a = SymTracelessTensorValue(1,2) +b = SymTracelessTensorValue(1.,2.) +c = a .* b +@test isa(c,SymTensorValue) +@test c.data == map(*,a.data,b.data) + +@test diag(a) == VectorValue(1,-1) + end # module OperationsTests diff --git a/test/TensorValuesTests/ReinterpretTests.jl b/test/TensorValuesTests/ReinterpretTests.jl index bba4228af..23cf505a0 100644 --- a/test/TensorValuesTests/ReinterpretTests.jl +++ b/test/TensorValuesTests/ReinterpretTests.jl @@ -27,4 +27,10 @@ A = reinterpret(V) R = [1 1 1 1 1; 2 2 2 2 2; 3 3 3 3 3] @test A == R +v = SymTracelessTensorValue(1,2) +V = fill(v,5) +A = reinterpret(V) +R = [1 1 1 1 1; 2 2 2 2 2; -1 -1 -1 -1 -1] +@test A == R + end # module ReinterpretTests diff --git a/test/TensorValuesTests/TypesTests.jl b/test/TensorValuesTests/TypesTests.jl index c86d8391e..900d22231 100644 --- a/test/TensorValuesTests/TypesTests.jl +++ b/test/TensorValuesTests/TypesTests.jl @@ -103,6 +103,60 @@ s = SymTensorValue{2,Int}(11,21.0,22) @test isa(s,SymTensorValue{2,Int}) @test convert(SMatrix{2,2,Int},s) == [11.0 21.0;21.0 22.0] +# Constructors (SymTracelessTensorValue) + +q = SymTracelessTensorValue( (11,21) ) +@test isa(q,SymTracelessTensorValue{2,Int}) +@test convert(SMatrix{2,2,Int},q) == [11 21;21 -11] + +q = SymTracelessTensorValue(11,21) +@test isa(q,SymTracelessTensorValue{2,Int}) +@test convert(SMatrix{2,2,Float64},q) == [11.0 21.0;21.0 -11.0] + +q = SymTracelessTensorValue{2}( (11,21) ) +@test isa(q,SymTracelessTensorValue{2,Int}) +@test convert(SMatrix{2,2,Int},q) == [11 21;21 -11] + +q = SymTracelessTensorValue{2}(11,21) +@test isa(q,SymTracelessTensorValue{2,Int}) +@test convert(SMatrix{2,2,Float64},q) == [11.0 21.0;21.0 -11.0] + +q = SymTracelessTensorValue{2,Int}( (11,21) ) +@test isa(q,SymTracelessTensorValue{2,Int}) +@test convert(SMatrix{2,2,Int},q) == [11 21;21 -11] + +q = SymTracelessTensorValue{2,Float64}(11,21) +@test isa(q,SymTracelessTensorValue{2,Float64}) +@test convert(SMatrix{2,2,Float64},q) == [11.0 21.0;21.0 -11.0] + +q = SymTracelessTensorValue{0,Int}( () ) +@test isa(q,SymTracelessTensorValue{0,Int}) +@test convert(SMatrix{0,0,Int},q) == Array{Any,2}(undef,0,0) + +q = SymTracelessTensorValue{0,Int}() +@test isa(q,SymTracelessTensorValue{0,Int}) +@test convert(SMatrix{0,0,Int},q) == Array{Any,2}(undef,0,0) + +q = SymTracelessTensorValue{1,Int}( () ) +@test isa(q,SymTracelessTensorValue{1,Int}) +@test convert(SMatrix{1,1,Int},q) == [0;;] + +q = SymTracelessTensorValue{1,Int}() +@test isa(q,SymTracelessTensorValue{1,Int}) +@test convert(SMatrix{1,1,Int},q) == [0;;] + +q = SymTracelessTensorValue(11,21.0) +@test isa(q,SymTracelessTensorValue{2,Float64}) +@test convert(SMatrix{2,2,Float64},q) == [11.0 21.0;21.0 -11.0] + +q = SymTracelessTensorValue{2}(11,21.0) +@test isa(q,SymTracelessTensorValue{2,Float64}) +@test convert(SMatrix{2,2,Float64},q) == [11.0 21.0;21.0 -11.0] + +q = SymTracelessTensorValue{2,Int}(11,21.0) +@test isa(q,SymTracelessTensorValue{2,Int}) +@test convert(SMatrix{2,2,Int},q) == [11.0 21.0;21.0 -11.0] + # Constructors (SymFourthOrderTensorValue) s = SymFourthOrderTensorValue( (1111,1121,1122, 2111,2121,2122, 2211,2221,2222) ) @@ -241,6 +295,10 @@ z = zero(SymTensorValue{3,Int}) @test isa(z,SymTensorValue{3,Int,6}) @test convert(SMatrix{3,3,Int},z) == zeros(Int,(3,3)) +z = zero(SymTracelessTensorValue{3,Int}) +@test isa(z,SymTracelessTensorValue{3,Int,6}) +@test convert(SMatrix{3,3,Int},z) == zeros(Int,(3,3)) + z = zero(ThirdOrderTensorValue{3,3,3,Int,27}) @test isa(z,ThirdOrderTensorValue{3,3,3,Int,27}) @test Tuple(z) == Tuple(zeros(Int,(27))) @@ -279,6 +337,10 @@ r = rand(SymTensorValue{3,Int}) @test isa(r,SymTensorValue{3,Int,6}) @test r ≠ rand(typeof(r)) +r = rand(SymTracelessTensorValue{3,Int}) +@test isa(r,SymTracelessTensorValue{3,Int,6}) +@test r ≠ rand(typeof(r)) + r = rand(SymFourthOrderTensorValue{3,Int}) @test isa(r,SymFourthOrderTensorValue{3,Int,36}) @test r ≠ rand(typeof(r)) @@ -311,6 +373,13 @@ b = convert(V,a) b = V[a,a,a,] @test isa(b,Vector{V}) +a = (11,21) +V = SymTracelessTensorValue{2,Int,3} +b = convert(V,a) +@test isa(b,V) +b = V[a,a,a] +@test isa(b,Vector{V}) + a = (1111,1121,1122, 2111,2121,2122, 2211,2221,2222) V = SymFourthOrderTensorValue{2,Int,9} b = convert(V,a) @@ -403,6 +472,7 @@ v = VectorValue(m) @test num_components(VectorValue(1,2,3)) == 3 @test num_components(TensorValue(1,2,3,4)) == 4 @test num_components(SymTensorValue(1,2,3)) == 4 +@test num_components(SymTracelessTensorValue(1,2)) == 4 @test num_components(SymFourthOrderTensorValue(1111,1121,1122, 2111,2121,2122, 2211,2221,2222)) == 16 a = VectorValue(1,2,3,4) @@ -434,9 +504,21 @@ b[1,1] = a[1,1] b[1,2] = a[1,2] b[2,1] = a[2,1] b[2,2] = a[2,2] +a = SymTensorValue(11,21,22) bt = SymTensorValue{2,Int64}(b) -@test bt .== a +@test all(bt .== a) +a = SymTracelessTensorValue(11,21) +@test change_eltype(a,Float64) == SymTracelessTensorValue{2,Float64,3} +@test isa(Tuple(a),Tuple) +@test Tuple(a) == a.data +b = Matrix{Int64}(undef,2,2) +b[1,1] = a[1,1] +b[1,2] = a[1,2] +b[2,1] = a[2,1] +b[2,2] = a[2,2] +bt = SymTracelessTensorValue{2,Int64}(b) +@test all(bt .== a) a = SymFourthOrderTensorValue(1111,1121,1122, 2111,2121,2122, 2211,2221,2222) @test change_eltype(a,Float64) == SymFourthOrderTensorValue{2,Float64,9} From 7f606327946d6cb7edca084f8bd3639025f50b7f Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Mon, 5 Aug 2024 11:56:45 +1000 Subject: [PATCH 05/85] Fixed some comments --- .../SymFourthOrderTensorValueTypes.jl | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/TensorValues/SymFourthOrderTensorValueTypes.jl b/src/TensorValues/SymFourthOrderTensorValueTypes.jl index d36c2cbd7..7bdd63aea 100644 --- a/src/TensorValues/SymFourthOrderTensorValueTypes.jl +++ b/src/TensorValues/SymFourthOrderTensorValueTypes.jl @@ -1,5 +1,5 @@ ############################################################### -# SymTensorValue Type +# SymFourthOrderTensorValue Type ############################################################### """ @@ -14,10 +14,10 @@ struct SymFourthOrderTensorValue{D,T,L} <: MultiValue{Tuple{D,D,D,D},T,4,L} end ############################################################### -# Constructors (SymTensorValue) +# Constructors (SymFourthOrderTensorValue) ############################################################### -# Empty SymTensorValue constructor +# Empty SymFourthOrderTensorValue constructor SymFourthOrderTensorValue() = SymFourthOrderTensorValue{0,Int}(NTuple{0,Int}()) SymFourthOrderTensorValue{0}() = SymFourthOrderTensorValue{0,Int}(NTuple{0,Int}()) @@ -25,7 +25,7 @@ SymFourthOrderTensorValue{0,T}() where {T} = SymFourthOrderTensorValue{0,T}(N SymFourthOrderTensorValue(data::NTuple{0}) = SymFourthOrderTensorValue{0,Int}(data) SymFourthOrderTensorValue{0}(data::NTuple{0}) = SymFourthOrderTensorValue{0,Int}(data) -# SymTensorValue single NTuple argument constructor +# SymFourthOrderTensorValue single NTuple argument constructor @generated function SymFourthOrderTensorValue(data::NTuple{L,T}) where {L,T} msg = "Invalid number of scalar arguments in SymFourthOrderTensorValue constructor" @@ -40,20 +40,20 @@ SymFourthOrderTensorValue{D}(data::NTuple{L,T}) where {D,L,T} = SymFou SymFourthOrderTensorValue{D,T1}(data::NTuple{L,T2}) where {D,L,T1,T2} = SymFourthOrderTensorValue{D,T1}(NTuple{L,T1}(data)) SymFourthOrderTensorValue{D,T1,L}(data::NTuple{L,T2}) where {D,L,T1,T2} = SymFourthOrderTensorValue{D,T1}(NTuple{L,T1}(data)) -# SymTensorValue single Tuple argument constructor +# SymFourthOrderTensorValue single Tuple argument constructor SymFourthOrderTensorValue(data::Tuple) = SymFourthOrderTensorValue(promote(data...)) SymFourthOrderTensorValue{D}(data::Tuple) where {D} = SymFourthOrderTensorValue{D}(promote(data...)) SymFourthOrderTensorValue{D,T1}(data::Tuple) where {D,T1} = SymFourthOrderTensorValue{D,T1}(NTuple{length(data),T1}(data)) -# SymTensorValue Vararg constructor +# SymFourthOrderTensorValue Vararg constructor SymFourthOrderTensorValue(data::Number...) = SymFourthOrderTensorValue(data) SymFourthOrderTensorValue{D}(data::Number...) where {D} = SymFourthOrderTensorValue{D}(data) SymFourthOrderTensorValue{D,T1}(data::Number...) where {D,T1} = SymFourthOrderTensorValue{D,T1}(data) ############################################################### -# Conversions (SymTensorValue) +# Conversions (SymFourthOrderTensorValue) ############################################################### # Direct conversion @@ -67,7 +67,7 @@ convert(::Type{<:SymFourthOrderTensorValue{D,T}}, arg::SymFourthOrderTensorValue convert(::Type{<:SymFourthOrderTensorValue{D,T}}, arg::SymFourthOrderTensorValue{D,T}) where {D,T} = arg ############################################################### -# Other constructors and conversions (SymTensorValue) +# Other constructors and conversions (SymFourthOrderTensorValue) ############################################################### @generated function zero(::Type{<:SymFourthOrderTensorValue{D,T}}) where {D,T} @@ -101,7 +101,7 @@ change_eltype(::Type{SymFourthOrderTensorValue{D,T1,L}},::Type{T2}) where {D,T1, change_eltype(::SymFourthOrderTensorValue{D,T1,L},::Type{T2}) where {D,T1,T2,L} = change_eltype(SymFourthOrderTensorValue{D,T1,L},T2) ############################################################### -# Introspection (SymTensorValue) +# Introspection (SymFourthOrderTensorValue) ############################################################### eltype(::Type{<:SymFourthOrderTensorValue{D,T}}) where {D,T} = T From fbc4948947d31b67695301036bb17fa45f1f2467 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Mon, 5 Aug 2024 15:44:25 +1000 Subject: [PATCH 06/85] Fix zero(::SymTracelessTensorValueTypes) --- src/TensorValues/SymTracelessTensorValueTypes.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/TensorValues/SymTracelessTensorValueTypes.jl b/src/TensorValues/SymTracelessTensorValueTypes.jl index 4bd847e3a..d2f410bab 100644 --- a/src/TensorValues/SymTracelessTensorValueTypes.jl +++ b/src/TensorValues/SymTracelessTensorValueTypes.jl @@ -136,7 +136,7 @@ zero(::Type{<:SymTracelessTensorValue{0,T}}) where {T} = SymTracelessTensorValue end end -zero(::Type{<:SymTracelessTensorValue{D,T,L}}) where {D,T,L} = SymTracelessTensorValue{D,T}(tfill(zero(T),Val{L}())) +zero(::Type{<:SymTracelessTensorValue{D,T,L}}) where {D,T,L} = SymTracelessTensorValue{D,T}(tfill(zero(T),Val{L-1}())) zero(::SymTracelessTensorValue{D,T,L}) where {D,T,L} = zero(SymTracelessTensorValue{D,T,L}) rand(::AbstractRNG, ::Random.SamplerType{<:SymTracelessTensorValue{0,T}}) where {T} = SymTracelessTensorValue{0,T}() From 5172f81b05e5a94909b9f87c2fb1928c19211c98 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Mon, 5 Aug 2024 17:20:51 +1000 Subject: [PATCH 07/85] fix change_eltype for Tensor Multivalues Added change_eltype method without L type parameter, because it prevents from using change_eltype on partial definition that dont specify L (problem in TensorValued FESpace construction for which L was not specified, e.g. : FESpace(model, TensorValue{2,2,Float64}, 1) --- src/TensorValues/SymFourthOrderTensorValueTypes.jl | 1 + src/TensorValues/SymTensorValueTypes.jl | 1 + src/TensorValues/SymTracelessTensorValueTypes.jl | 1 + src/TensorValues/TensorValueTypes.jl | 3 ++- src/TensorValues/ThirdOrderTensorValueTypes.jl | 1 + test/TensorValuesTests/TypesTests.jl | 4 ++++ 6 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/TensorValues/SymFourthOrderTensorValueTypes.jl b/src/TensorValues/SymFourthOrderTensorValueTypes.jl index 7bdd63aea..0a2d521f3 100644 --- a/src/TensorValues/SymFourthOrderTensorValueTypes.jl +++ b/src/TensorValues/SymFourthOrderTensorValueTypes.jl @@ -97,6 +97,7 @@ end rand(rng::AbstractRNG,::Random.SamplerType{<:SymFourthOrderTensorValue{D,T,L}}) where {D,T,L} = SymFourthOrderTensorValue{D,T}(Tuple(rand(rng, SVector{L,T}))) +change_eltype(::Type{SymFourthOrderTensorValue{D,T1}},::Type{T2}) where {D,T1,T2} = SymFourthOrderTensorValue{D,T2} change_eltype(::Type{SymFourthOrderTensorValue{D,T1,L}},::Type{T2}) where {D,T1,T2,L} = SymFourthOrderTensorValue{D,T2,L} change_eltype(::SymFourthOrderTensorValue{D,T1,L},::Type{T2}) where {D,T1,T2,L} = change_eltype(SymFourthOrderTensorValue{D,T1,L},T2) diff --git a/src/TensorValues/SymTensorValueTypes.jl b/src/TensorValues/SymTensorValueTypes.jl index 3382f23cb..e0ed17565 100644 --- a/src/TensorValues/SymTensorValueTypes.jl +++ b/src/TensorValues/SymTensorValueTypes.jl @@ -138,6 +138,7 @@ Mutable(::Type{<:SymTensorValue{D,T}}) where {D,T} = MMatrix{D,D,T} Mutable(::SymTensorValue{D,T}) where {D,T} = Mutable(SymTensorValue{D,T}) mutable(a::SymTensorValue{D}) where D = MMatrix{D,D}(Tuple(get_array(a))) +change_eltype(::Type{SymTensorValue{D,T1}},::Type{T2}) where {D,T1,T2} = SymTensorValue{D,T2} change_eltype(::Type{SymTensorValue{D,T1,L}},::Type{T2}) where {D,T1,T2,L} = SymTensorValue{D,T2,L} change_eltype(::SymTensorValue{D,T1,L},::Type{T2}) where {D,T1,T2,L} = change_eltype(SymTensorValue{D,T1,L},T2) diff --git a/src/TensorValues/SymTracelessTensorValueTypes.jl b/src/TensorValues/SymTracelessTensorValueTypes.jl index d2f410bab..acf5b8cad 100644 --- a/src/TensorValues/SymTracelessTensorValueTypes.jl +++ b/src/TensorValues/SymTracelessTensorValueTypes.jl @@ -154,6 +154,7 @@ Mutable(::Type{<:SymTracelessTensorValue{D,T}}) where {D,T} = MMatrix{D,D,T} Mutable(::SymTracelessTensorValue{D,T}) where {D,T} = Mutable(SymTracelessTensorValue{D,T}) mutable(a::SymTracelessTensorValue{D}) where D = MMatrix{D,D}(Tuple(get_array(a))) +change_eltype(::Type{SymTracelessTensorValue{D,T1}},::Type{T2}) where {D,T1,T2} = SymTracelessTensorValue{D,T2} change_eltype(::Type{SymTracelessTensorValue{D,T1,L}},::Type{T2}) where {D,T1,T2,L} = SymTracelessTensorValue{D,T2,L} change_eltype(::SymTracelessTensorValue{D,T1,L},::Type{T2}) where {D,T1,T2,L} = change_eltype(SymTracelessTensorValue{D,T1,L},T2) diff --git a/src/TensorValues/TensorValueTypes.jl b/src/TensorValues/TensorValueTypes.jl index 327211c6f..5490d897c 100644 --- a/src/TensorValues/TensorValueTypes.jl +++ b/src/TensorValues/TensorValueTypes.jl @@ -14,7 +14,7 @@ struct TensorValue{D1,D2,T,L} <: MultiValue{Tuple{D1,D2},T,2,L} end ############################################################### -# Constructors +# Constructors ############################################################### # Empty TensorValue constructor @@ -111,6 +111,7 @@ Mutable(::Type{<:TensorValue{D1,D2,T}}) where {D1,D2,T} = MMatrix{D1,D2,T} Mutable(::TensorValue{D1,D2,T}) where {D1,D2,T} = Mutable(TensorValue{D1,D2,T}) mutable(a::TensorValue{D1,D2}) where {D1,D2} = MMatrix{D1,D2}(a.data) +change_eltype(::Type{TensorValue{D1,D2,T1}},::Type{T2}) where {D1,D2,T1,T2} = TensorValue{D1,D2,T2} change_eltype(::Type{TensorValue{D1,D2,T1,L}},::Type{T2}) where {D1,D2,T1,T2,L} = TensorValue{D1,D2,T2,L} change_eltype(::TensorValue{D1,D2,T1,L},::Type{T2}) where {D1,D2,T1,T2,L} = change_eltype(TensorValue{D1,D2,T1,L},T2) diff --git a/src/TensorValues/ThirdOrderTensorValueTypes.jl b/src/TensorValues/ThirdOrderTensorValueTypes.jl index 8b9b6b2c3..bfba87fb8 100644 --- a/src/TensorValues/ThirdOrderTensorValueTypes.jl +++ b/src/TensorValues/ThirdOrderTensorValueTypes.jl @@ -74,6 +74,7 @@ convert(::Type{<:ThirdOrderTensorValue{D1,D2,D3,T}}, arg::ThirdOrderTensorValue{ # other +change_eltype(::Type{ThirdOrderTensorValue{D1,D2,D3,T1}},::Type{T2}) where {D1,D2,D3,T1,T2} = ThirdOrderTensorValue{D1,D2,D3,T2} change_eltype(::Type{ThirdOrderTensorValue{D1,D2,D3,T1,L}},::Type{T2}) where {D1,D2,D3,T1,T2,L} = ThirdOrderTensorValue{D1,D2,D3,T2,L} change_eltype(::T,::Type{T2}) where {T<:ThirdOrderTensorValue,T2} = change_eltype(T,T2) diff --git a/test/TensorValuesTests/TypesTests.jl b/test/TensorValuesTests/TypesTests.jl index 900d22231..4fe97f2de 100644 --- a/test/TensorValuesTests/TypesTests.jl +++ b/test/TensorValuesTests/TypesTests.jl @@ -480,6 +480,7 @@ a = VectorValue(1,2,3,4) a = TensorValue(1,2,3,4) @test change_eltype(a,Float64) == TensorValue{2,2,Float64,4} +@test change_eltype(TensorValue{2,2,Float64},Int) == TensorValue{2,2,Int} @test change_eltype(1,Float64) == Float64 @@ -497,6 +498,7 @@ t = diagonal_tensor(p) a = SymTensorValue(11,21,22) @test change_eltype(a,Float64) == SymTensorValue{2,Float64,3} +@test change_eltype(SymTensorValue{2,Float64},Int) == SymTensorValue{2,Int} @test isa(Tuple(a),Tuple) @test Tuple(a) == a.data b = Matrix{Int64}(undef,2,2) @@ -510,6 +512,7 @@ bt = SymTensorValue{2,Int64}(b) a = SymTracelessTensorValue(11,21) @test change_eltype(a,Float64) == SymTracelessTensorValue{2,Float64,3} +@test change_eltype(SymTracelessTensorValue{2,Float64},Int) == SymTracelessTensorValue{2,Int} @test isa(Tuple(a),Tuple) @test Tuple(a) == a.data b = Matrix{Int64}(undef,2,2) @@ -522,6 +525,7 @@ bt = SymTracelessTensorValue{2,Int64}(b) a = SymFourthOrderTensorValue(1111,1121,1122, 2111,2121,2122, 2211,2221,2222) @test change_eltype(a,Float64) == SymFourthOrderTensorValue{2,Float64,9} +@test change_eltype(SymFourthOrderTensorValue{2,Float64},Int) == SymFourthOrderTensorValue{2,Int} @test isa(Tuple(a),Tuple) @test Tuple(a) == a.data From 5ee6efa7f835a5ca3fbbc6be0a57ffd469ec5e3f Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Tue, 6 Aug 2024 15:29:27 +1000 Subject: [PATCH 08/85] add num_indep_components(::MultiValue) The Dof basis builder needs to know the number of free/independent components of a <:MultiValue valued FESpace, which is different from the number of components for e.g. symmetric tensors. --- src/TensorValues/MultiValueTypes.jl | 5 +++ .../SymFourthOrderTensorValueTypes.jl | 4 ++ src/TensorValues/SymTensorValueTypes.jl | 5 +++ .../SymTracelessTensorValueTypes.jl | 6 +++ src/TensorValues/TensorValueTypes.jl | 3 +- src/TensorValues/TensorValues.jl | 1 + .../ThirdOrderTensorValueTypes.jl | 2 +- src/TensorValues/VectorValueTypes.jl | 1 + test/TensorValuesTests/TypesTests.jl | 39 +++++++++++++++++++ 9 files changed, 64 insertions(+), 2 deletions(-) diff --git a/src/TensorValues/MultiValueTypes.jl b/src/TensorValues/MultiValueTypes.jl index 7f7ed8e49..c038f5649 100644 --- a/src/TensorValues/MultiValueTypes.jl +++ b/src/TensorValues/MultiValueTypes.jl @@ -29,6 +29,11 @@ change_eltype(::Number,::Type{T2}) where {T2} = change_eltype(Number,T2) num_components(::Type{<:Number}) = 1 num_components(::Number) = num_components(Number) +num_components(T::Type{<:MultiValue}) = @unreachable "$T type is too abstract to count its components, provide a (parametric) concrete type" + +"Number of independant components, that is num_component(::Number) minus the number of components determined by symetries or constraints" +num_indep_components(::Type{T}) where T<:Number = num_components(T) +num_indep_components(::T) where T<:Number = num_indep_components(T) function n_components(a) msg = "Function n_components has been removed, use num_components instead" diff --git a/src/TensorValues/SymFourthOrderTensorValueTypes.jl b/src/TensorValues/SymFourthOrderTensorValueTypes.jl index 0a2d521f3..a48f783ac 100644 --- a/src/TensorValues/SymFourthOrderTensorValueTypes.jl +++ b/src/TensorValues/SymFourthOrderTensorValueTypes.jl @@ -114,6 +114,10 @@ size(::SymFourthOrderTensorValue{D}) where {D} = size(SymFourthOrderTensorValue{ length(::Type{<:SymFourthOrderTensorValue{D}}) where {D} = D*D*D*D length(::SymFourthOrderTensorValue{D}) where {D} = length(SymFourthOrderTensorValue{D}) +num_components(::Type{<:SymFourthOrderTensorValue}) = @unreachable "The dimension is needed to count components" num_components(::Type{<:SymFourthOrderTensorValue{D}}) where {D} = length(SymFourthOrderTensorValue{D}) num_components(::SymFourthOrderTensorValue{D}) where {D} = num_components(SymFourthOrderTensorValue{D}) +num_indep_components(::Type{<:SymFourthOrderTensorValue}) = num_components(SymFourthOrderTensorValue) +num_indep_components(::Type{<:SymFourthOrderTensorValue{D}}) where {D} = (D*(D+1)÷2)^2 +num_indep_components(::SymFourthOrderTensorValue{D}) where {D} = num_indep_components(SymFourthOrderTensorValue{D}) diff --git a/src/TensorValues/SymTensorValueTypes.jl b/src/TensorValues/SymTensorValueTypes.jl index e0ed17565..1511fb026 100644 --- a/src/TensorValues/SymTensorValueTypes.jl +++ b/src/TensorValues/SymTensorValueTypes.jl @@ -157,5 +157,10 @@ size(::SymTensorValue{D}) where {D} = size(SymTensorValue{D}) length(::Type{<:SymTensorValue{D}}) where {D} = D*D length(::SymTensorValue{D}) where {D} = length(SymTensorValue{D}) +num_components(::Type{<:SymTensorValue}) = @unreachable "The dimension is needed to count components" num_components(::Type{<:SymTensorValue{D}}) where {D} = length(SymTensorValue{D}) num_components(::SymTensorValue{D}) where {D} = num_components(SymTensorValue{D}) + +num_indep_components(::Type{<:SymTensorValue}) = num_components(SymTensorValue) +num_indep_components(::Type{<:SymTensorValue{D}}) where {D} = D*(D+1)÷2 +num_indep_components(::SymTensorValue{D}) where {D} = num_indep_components(SymTensorValue{D}) diff --git a/src/TensorValues/SymTracelessTensorValueTypes.jl b/src/TensorValues/SymTracelessTensorValueTypes.jl index acf5b8cad..8295b2b57 100644 --- a/src/TensorValues/SymTracelessTensorValueTypes.jl +++ b/src/TensorValues/SymTracelessTensorValueTypes.jl @@ -173,5 +173,11 @@ size(::SymTracelessTensorValue{D}) where {D} = size(SymTracelessTensorValue{D}) length(::Type{<:SymTracelessTensorValue{D}}) where {D} = D*D length(::SymTracelessTensorValue{D}) where {D} = length(SymTracelessTensorValue{D}) +num_components(::Type{<:SymTracelessTensorValue}) = @unreachable "The dimension is needed to count components" num_components(::Type{<:SymTracelessTensorValue{D}}) where {D} = length(SymTracelessTensorValue{D}) num_components(::SymTracelessTensorValue{D}) where {D} = num_components(SymTracelessTensorValue{D}) + +num_indep_components(::Type{<:SymTracelessTensorValue}) = num_components(SymTracelessTensorValue) +num_indep_components(::Type{SymTracelessTensorValue{0}}) = 0 +num_indep_components(::Type{<:SymTracelessTensorValue{D}}) where {D} = D*(D+1)÷2-1 +num_indep_components(::SymTracelessTensorValue{D}) where {D} = num_indep_components(SymTracelessTensorValue{D}) diff --git a/src/TensorValues/TensorValueTypes.jl b/src/TensorValues/TensorValueTypes.jl index 5490d897c..c58d1262a 100644 --- a/src/TensorValues/TensorValueTypes.jl +++ b/src/TensorValues/TensorValueTypes.jl @@ -142,7 +142,8 @@ length(::Type{<:TensorValue{D}}) where {D} = length(TensorValue{D,D}) length(::Type{<:TensorValue{D1,D2}}) where {D1,D2} = D1*D2 length(::TensorValue{D1,D2}) where {D1,D2} = length(TensorValue{D1,D2}) -num_components(::Type{<:TensorValue{D}}) where {D} = length(TensorValue{D,D}) +num_components(::Type{<:TensorValue}) = @unreachable "All two size dimensions are needed to count components" +num_components(::Type{<:TensorValue{D,D}}) where {D} = length(TensorValue{D,D}) num_components(::Type{<:TensorValue{D1,D2}}) where {D1,D2} = length(TensorValue{D1,D2}) num_components(::TensorValue{D1,D2}) where {D1,D2} = num_components(TensorValue{D1,D2}) diff --git a/src/TensorValues/TensorValues.jl b/src/TensorValues/TensorValues.jl index 747e21d71..1ac4ee2d8 100644 --- a/src/TensorValues/TensorValues.jl +++ b/src/TensorValues/TensorValues.jl @@ -54,6 +54,7 @@ export Mutable export symmetric_part export n_components export num_components +export num_indep_components export change_eltype export diagonal_tensor export ⊙ diff --git a/src/TensorValues/ThirdOrderTensorValueTypes.jl b/src/TensorValues/ThirdOrderTensorValueTypes.jl index bfba87fb8..62ec7eaeb 100644 --- a/src/TensorValues/ThirdOrderTensorValueTypes.jl +++ b/src/TensorValues/ThirdOrderTensorValueTypes.jl @@ -110,6 +110,6 @@ size(::ThirdOrderTensorValue{D1,D2,D3}) where {D1,D2,D3} = size(ThirdOrderTensor length(::Type{<:ThirdOrderTensorValue{D1,D2,D3}}) where {D1,D2,D3} = D1*D2*D3 length(::ThirdOrderTensorValue{D1,D2,D3}) where {D1,D2,D3} = length(ThirdOrderTensorValue{D1,D2,D3}) +num_components(::Type{<:ThirdOrderTensorValue}) = @unreachable "All three size dimensions are needed to count components" num_components(::Type{<:ThirdOrderTensorValue{D1,D2,D3}}) where {D1,D2,D3} = length(ThirdOrderTensorValue{D1,D2,D3}) num_components(::ThirdOrderTensorValue{D1,D2,D3}) where {D1,D2,D3} = num_components(ThirdOrderTensorValue{D1,D2,D3}) - diff --git a/src/TensorValues/VectorValueTypes.jl b/src/TensorValues/VectorValueTypes.jl index a142afc13..841d6e046 100644 --- a/src/TensorValues/VectorValueTypes.jl +++ b/src/TensorValues/VectorValueTypes.jl @@ -108,5 +108,6 @@ size(::VectorValue{D}) where {D} = size(VectorValue{D}) length(::Type{<:VectorValue{D}}) where {D} = D length(::VectorValue{D}) where {D} = length(VectorValue{D}) +num_components(::Type{<:VectorValue}) = @unreachable "The dimension is needed to count components" num_components(::Type{<:VectorValue{D}}) where {D} = length(VectorValue{D}) num_components(::VectorValue{D}) where {D} = num_components(VectorValue{D}) diff --git a/test/TensorValuesTests/TypesTests.jl b/test/TensorValuesTests/TypesTests.jl index 4fe97f2de..3c0a9be57 100644 --- a/test/TensorValuesTests/TypesTests.jl +++ b/test/TensorValuesTests/TypesTests.jl @@ -473,8 +473,47 @@ v = VectorValue(m) @test num_components(TensorValue(1,2,3,4)) == 4 @test num_components(SymTensorValue(1,2,3)) == 4 @test num_components(SymTracelessTensorValue(1,2)) == 4 +@test num_components(ThirdOrderTensorValue(111,112,121,122,211,212,221,222)) == 8 @test num_components(SymFourthOrderTensorValue(1111,1121,1122, 2111,2121,2122, 2211,2221,2222)) == 16 +@test num_indep_components(Int) == 1 +@test num_indep_components(Float64) == 1 +@test num_indep_components(1.0) == 1 +@test num_indep_components(1) == 1 +@test num_indep_components(VectorValue{0}) == 0 +@test num_indep_components(VectorValue{3}) == 3 +@test num_indep_components(VectorValue(1,2,3)) == 3 +@test num_indep_components(TensorValue{2,2}) == 4 +@test num_indep_components(TensorValue(1,2,3,4)) == 4 +@test num_indep_components(SymTensorValue{0}) == 0 +@test num_indep_components(SymTensorValue{2}) == 3 +@test num_indep_components(SymTensorValue(1,2,3)) == 3 +@test num_indep_components(SymTracelessTensorValue{0}) == 0 +@test num_indep_components(SymTracelessTensorValue{1}) == 0 +@test num_indep_components(SymTracelessTensorValue{2}) == 2 +@test num_indep_components(SymTracelessTensorValue(1,2)) == 2 +@test num_indep_components(ThirdOrderTensorValue{2,2,2}) == 8 +@test num_indep_components(ThirdOrderTensorValue(111,112,121,122,211,212,221,222)) == 8 +@test num_indep_components(SymFourthOrderTensorValue{2}) == 9 +@test num_indep_components(SymFourthOrderTensorValue(1111,1121,1122, 2111,2121,2122, 2211,2221,2222)) == 9 + +@test_throws ErrorException num_components(VectorValue) +@test_throws ErrorException num_components(TensorValue) +@test_throws ErrorException num_components(TensorValue{2}) +@test_throws ErrorException num_components(AbstractSymTensorValue{2}) +@test_throws ErrorException num_components(SymTensorValue) +@test_throws ErrorException num_components(SymTracelessTensorValue) +@test_throws ErrorException num_components(ThirdOrderTensorValue{2,2}) +@test_throws ErrorException num_components(SymFourthOrderTensorValue) +@test_throws ErrorException num_indep_components(VectorValue) +@test_throws ErrorException num_indep_components(TensorValue) +@test_throws ErrorException num_indep_components(TensorValue{2}) +@test_throws ErrorException num_indep_components(AbstractSymTensorValue{2}) +@test_throws ErrorException num_indep_components(SymTensorValue) +@test_throws ErrorException num_indep_components(SymTracelessTensorValue) +@test_throws ErrorException num_indep_components(ThirdOrderTensorValue{2,2}) +@test_throws ErrorException num_indep_components(SymFourthOrderTensorValue) + a = VectorValue(1,2,3,4) @test change_eltype(a,Float64) == VectorValue{4,Float64} From 7769a3ea1fffc27e4c8e274901d2491fdde5bc3c Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Wed, 7 Aug 2024 16:29:59 +1000 Subject: [PATCH 09/85] fix ConstantFESpaces.jl indentations --- src/FESpaces/ConstantFESpaces.jl | 111 +++++++++++++++---------------- 1 file changed, 52 insertions(+), 59 deletions(-) diff --git a/src/FESpaces/ConstantFESpaces.jl b/src/FESpaces/ConstantFESpaces.jl index d8251f341..d938cc9e0 100644 --- a/src/FESpaces/ConstantFESpaces.jl +++ b/src/FESpaces/ConstantFESpaces.jl @@ -5,46 +5,42 @@ struct ConstantFESpace <: SingleFieldFESpace end """ struct ConstantFESpace{V,T,A,B,C} <: SingleFieldFESpace -model::DiscreteModel -cell_basis::A -cell_dof_basis::B -cell_dof_ids::C -function ConstantFESpace(model; - vector_type::Type{V}=Vector{Float64}, - field_type::Type{T}=Float64) where {V,T} -function setup_cell_reffe(model::DiscreteModel, - reffe::Tuple{<:ReferenceFEName,Any,Any}; kwargs...) - basis, reffe_args,reffe_kwargs = reffe - cell_reffe = ReferenceFE(model,basis,reffe_args...;reffe_kwargs...) -end -reffe=ReferenceFE(lagrangian,T,0) -cell_reffe = setup_cell_reffe(model,reffe) -cell_basis_array=lazy_map(get_shapefuns,cell_reffe) - -cell_basis=SingleFieldFEBasis( - cell_basis_array, - Triangulation(model), - TestBasis(), - ReferenceDomain()) - -cell_dof_basis_array=lazy_map(get_dof_basis,cell_reffe) -cell_dof_basis=CellDof(cell_dof_basis_array,Triangulation(model),ReferenceDomain()) - -cell_dof_ids=Fill(Int32(1):Int32(num_components(field_type)),num_cells(model)) -A=typeof(cell_basis) -B=typeof(cell_dof_basis) -C=typeof(cell_dof_ids) -new{V,T,A,B,C}(model, - cell_basis, - cell_dof_basis, - cell_dof_ids) -end + model::DiscreteModel + cell_basis::A + cell_dof_basis::B + cell_dof_ids::C + + function ConstantFESpace(model; + vector_type::Type{V}=Vector{Float64}, + field_type::Type{T}=Float64) where {V,T} + function setup_cell_reffe(model::DiscreteModel, + reffe::Tuple{<:ReferenceFEName,Any,Any}; kwargs...) + basis, reffe_args,reffe_kwargs = reffe + cell_reffe = ReferenceFE(model,basis,reffe_args...;reffe_kwargs...) + end + + reffe = ReferenceFE(lagrangian,T,0) + cell_reffe = setup_cell_reffe(model,reffe) + cell_basis_array = lazy_map(get_shapefuns,cell_reffe) + + cell_basis = SingleFieldFEBasis( + cell_basis_array, + Triangulation(model), + TestBasis(), + ReferenceDomain()) + + cell_dof_basis_array = lazy_map(get_dof_basis,cell_reffe) + cell_dof_basis = CellDof(cell_dof_basis_array,Triangulation(model),ReferenceDomain()) + + cell_dof_ids = Fill(Int32(1):Int32(num_components(field_type)),num_cells(model)) + A = typeof(cell_basis) + B = typeof(cell_dof_basis) + C = typeof(cell_dof_ids) + new{V,T,A,B,C}(model, cell_basis, cell_dof_basis, cell_dof_ids) + end end -# Genuine functions -function TrialFESpace(f::ConstantFESpace) -f -end +TrialFESpace(f::ConstantFESpace) = f # Delegated functions get_triangulation(f::ConstantFESpace) = Triangulation(f.model) @@ -70,27 +66,24 @@ num_dirichlet_tags(f::ConstantFESpace) = 0 get_dirichlet_dof_tag(f::ConstantFESpace) = Int8[] function scatter_free_and_dirichlet_values(f::ConstantFESpace,fv,dv) -cell_dof_ids = get_cell_dof_ids(f) -lazy_map(Broadcasting(PosNegReindex(fv,dv)),cell_dof_ids) + cell_dof_ids = get_cell_dof_ids(f) + lazy_map(Broadcasting(PosNegReindex(fv,dv)),cell_dof_ids) end -function gather_free_and_dirichlet_values!(free_vals, - dirichlet_vals, - f::ConstantFESpace, - cell_vals) -cell_dofs = get_cell_dof_ids(f) -cache_vals = array_cache(cell_vals) -cache_dofs = array_cache(cell_dofs) -cells = 1:length(cell_vals) - -_free_and_dirichlet_values_fill!( -free_vals, -dirichlet_vals, -cache_vals, -cache_dofs, -cell_vals, -cell_dofs, -cells) - -(free_vals,dirichlet_vals) +function gather_free_and_dirichlet_values!(free_vals, dirichlet_vals, f::ConstantFESpace, cell_vals) + cell_dofs = get_cell_dof_ids(f) + cache_vals = array_cache(cell_vals) + cache_dofs = array_cache(cell_dofs) + cells = 1:length(cell_vals) + + _free_and_dirichlet_values_fill!( + free_vals, + dirichlet_vals, + cache_vals, + cache_dofs, + cell_vals, + cell_dofs, + cells) + + (free_vals,dirichlet_vals) end From 1543f7e7c5a81e1fd6b610e8a036d88d3a10e9ca Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Wed, 7 Aug 2024 19:17:01 +1000 Subject: [PATCH 10/85] [wip] added indep_comp_getindex and fix FESpaces of value with linked components. Indeed, the code previously made the assumption that all the components of the Number unknown of the FESpaces are unlinked to the other, which is false for the symmetric tensor types. A new getter for the independent components of the Number is added and used in the MultiValue'd FESpaces machinery. Also, the number of DoFs of the ReferenceFEs are given by num_indep_components(::Number). fixes #923 #908 --- src/Adaptivity/FineToCoarseReferenceFEs.jl | 10 +-- src/Exports.jl | 1 + src/FESpaces/CLagrangianFESpaces.jl | 14 ++-- src/FESpaces/ConstantFESpaces.jl | 2 +- src/Polynomials/JacobiPolynomialBases.jl | 16 ++--- src/Polynomials/ModalC0Bases.jl | 26 ++++--- src/Polynomials/MonomialBases.jl | 27 +++---- src/ReferenceFEs/LagrangianDofBases.jl | 20 +++--- src/ReferenceFEs/LagrangianRefFEs.jl | 4 +- src/TensorValues/MultiValueTypes.jl | 30 +++++++- src/TensorValues/TensorValues.jl | 1 + .../CLagrangianRefFEsTests.jl | 23 ++++++ .../LagrangianDofBasesTests.jl | 71 +++++++++++++++++++ 13 files changed, 188 insertions(+), 57 deletions(-) diff --git a/src/Adaptivity/FineToCoarseReferenceFEs.jl b/src/Adaptivity/FineToCoarseReferenceFEs.jl index e921d479e..1899d38a0 100644 --- a/src/Adaptivity/FineToCoarseReferenceFEs.jl +++ b/src/Adaptivity/FineToCoarseReferenceFEs.jl @@ -45,8 +45,8 @@ function Arrays.evaluate!(cache,s::FineToCoarseDofBasis{T,<:LagrangianDofBasis}, vals = evaluate!(cf,field,b.nodes,s.child_ids) ndofs = length(b.dof_to_node) T2 = eltype(vals) - ncomps = num_components(T2) - @check ncomps == num_components(eltype(b.node_and_comp_to_dof)) """\n + ncomps = num_indep_components(T2) # use num_indep_components ? + @check ncomps == num_indep_components(eltype(b.node_and_comp_to_dof)) """\n Unable to evaluate LagrangianDofBasis. The number of components of the given Field does not match with the LagrangianDofBasis. @@ -80,8 +80,8 @@ end """ - Wrapper for a ReferenceFE which is specialised for - efficiently evaluating FineToCoarseFields. + Wrapper for a ReferenceFE which is specialised for + efficiently evaluating FineToCoarseFields. """ struct FineToCoarseRefFE{T,D,A} <: ReferenceFE{D} reffe :: T @@ -132,4 +132,4 @@ function FESpaces.TestFESpace(model::DiscreteModel,rrules::AbstractVector{<:Refi basis, reffe_args, reffe_kwargs = reffe reffes = lazy_map(rr -> ReferenceFE(get_polytope(rr),rr,basis,reffe_args...;reffe_kwargs...),rrules) return TestFESpace(model,reffes;kwargs...) -end \ No newline at end of file +end diff --git a/src/Exports.jl b/src/Exports.jl index 782625bba..27dbe467d 100644 --- a/src/Exports.jl +++ b/src/Exports.jl @@ -41,6 +41,7 @@ using Gridap.Arrays: ∑; export ∑ @publish TensorValues outer @publish TensorValues diagonal_tensor @publish TensorValues num_components +@publish TensorValues num_indep_components using Gridap.TensorValues: ⊙; export ⊙ using Gridap.TensorValues: ⊗; export ⊗ diff --git a/src/FESpaces/CLagrangianFESpaces.jl b/src/FESpaces/CLagrangianFESpaces.jl index c3ad07e1a..f90e58691 100644 --- a/src/FESpaces/CLagrangianFESpaces.jl +++ b/src/FESpaces/CLagrangianFESpaces.jl @@ -119,7 +119,7 @@ end # Helpers _default_mask(::Type) = true -_default_mask(::Type{T}) where T <: MultiValue = ntuple(i->true,Val{length(T)}()) +_default_mask(::Type{T}) where T <: MultiValue = ntuple(i->true,Val{num_indep_components(T)}()) _dof_type(::Type{T}) where T = T _dof_type(::Type{T}) where T<:MultiValue = eltype(T) @@ -193,7 +193,7 @@ function _generate_node_to_dof_glue_component_major( z::MultiValue,node_to_tag,tag_to_masks) nfree_dofs = 0 ndiri_dofs = 0 - ncomps = length(z) + ncomps = num_indep_components(z) @check length(testitem(tag_to_masks)) == ncomps for (node,tag) in enumerate(node_to_tag) if tag == UNSET @@ -218,7 +218,7 @@ function _generate_node_to_dof_glue_component_major( node_and_comp_to_dof = zeros(T,nnodes) nfree_dofs = 0 ndiri_dofs = 0 - m = zero(Mutable(T)) + m = zeros(Int32, ncomps) for (node,tag) in enumerate(node_to_tag) if tag == UNSET for comp in 1:ncomps @@ -245,7 +245,7 @@ function _generate_node_to_dof_glue_component_major( end end end - node_and_comp_to_dof[node] = m + node_and_comp_to_dof[node] = T(m...) end glue = NodeToDofGlue( free_dof_to_node, @@ -301,7 +301,7 @@ function _generate_cell_dofs_clagrangian( cell_to_ctype, node_and_comp_to_dof) - ncomps = num_components(z) + ncomps = num_indep_components(z) ctype_to_lnode_to_comp_to_ldof = map(get_node_and_comp_to_dof,ctype_to_reffe) ctype_to_num_ldofs = map(num_dofs,ctype_to_reffe) @@ -353,8 +353,8 @@ function _fill_cell_dofs_clagrangian!( p = cell_to_dofs.ptrs[cell]-1 for (lnode, node) in enumerate(nodes) for comp in 1:ncomps - ldof = lnode_and_comp_to_ldof[lnode][comp] - dof = node_and_comp_to_dof[node][comp] + ldof = indep_comp_getindex(lnode_and_comp_to_ldof[lnode], comp) + dof = indep_comp_getindex(node_and_comp_to_dof[node], comp) cell_to_dofs.data[p+ldof] = dof end end diff --git a/src/FESpaces/ConstantFESpaces.jl b/src/FESpaces/ConstantFESpaces.jl index d938cc9e0..f7115f46c 100644 --- a/src/FESpaces/ConstantFESpaces.jl +++ b/src/FESpaces/ConstantFESpaces.jl @@ -32,7 +32,7 @@ struct ConstantFESpace{V,T,A,B,C} <: SingleFieldFESpace cell_dof_basis_array = lazy_map(get_dof_basis,cell_reffe) cell_dof_basis = CellDof(cell_dof_basis_array,Triangulation(model),ReferenceDomain()) - cell_dof_ids = Fill(Int32(1):Int32(num_components(field_type)),num_cells(model)) + cell_dof_ids = Fill(Int32(1):Int32(num_indep_components(field_type)),num_cells(model)) A = typeof(cell_basis) B = typeof(cell_dof_basis) C = typeof(cell_dof_ids) diff --git a/src/Polynomials/JacobiPolynomialBases.jl b/src/Polynomials/JacobiPolynomialBases.jl index 2b615d9c3..896fc9c2d 100644 --- a/src/Polynomials/JacobiPolynomialBases.jl +++ b/src/Polynomials/JacobiPolynomialBases.jl @@ -9,7 +9,7 @@ struct JacobiPolynomialBasis{D,T} <: AbstractVector{JacobiPolynomial} end end -@inline Base.size(a::JacobiPolynomialBasis{D,T}) where {D,T} = (length(a.terms)*num_components(T),) +@inline Base.size(a::JacobiPolynomialBasis{D,T}) where {D,T} = (length(a.terms)*num_indep_components(T),) @inline Base.getindex(a::JacobiPolynomialBasis,i::Integer) = JacobiPolynomial() @inline Base.IndexStyle(::JacobiPolynomialBasis) = IndexLinear() @@ -49,7 +49,7 @@ return_type(::JacobiPolynomialBasis{D,T}) where {D,T} = T function return_cache(f::JacobiPolynomialBasis{D,T},x::AbstractVector{<:Point}) where {D,T} @check D == length(eltype(x)) "Incorrect number of point components" np = length(x) - ndof = length(f.terms)*num_components(T) + ndof = length(f) n = 1 + _maximum(f.orders) r = CachedArray(zeros(T,(np,ndof))) v = CachedArray(zeros(T,(ndof,))) @@ -60,7 +60,7 @@ end function evaluate!(cache,f::JacobiPolynomialBasis{D,T},x::AbstractVector{<:Point}) where {D,T} r, v, c = cache np = length(x) - ndof = length(f.terms)*num_components(T) + ndof = length(f) n = 1 + _maximum(f.orders) setsize!(r,(np,ndof)) setsize!(v,(ndof,)) @@ -82,7 +82,7 @@ function return_cache( f = fg.fa @assert D == length(eltype(x)) "Incorrect number of point components" np = length(x) - ndof = length(f.terms)*num_components(V) + ndof = length(f) xi = testitem(x) T = gradient_type(V,xi) n = 1 + _maximum(f.orders) @@ -101,7 +101,7 @@ function evaluate!( f = fg.fa r, v, c, g = cache np = length(x) - ndof = length(f.terms) * num_components(T) + ndof = length(f) n = 1 + _maximum(f.orders) setsize!(r,(np,ndof)) setsize!(v,(ndof,)) @@ -124,7 +124,7 @@ function return_cache( f = fg.fa @assert D == length(eltype(x)) "Incorrect number of point components" np = length(x) - ndof = length(f.terms)*num_components(V) + ndof = length(f) xi = testitem(x) T = gradient_type(gradient_type(V,xi),xi) n = 1 + _maximum(f.orders) @@ -144,7 +144,7 @@ function evaluate!( f = fg.fa r, v, c, g, h = cache np = length(x) - ndof = length(f.terms) * num_components(T) + ndof = length(f) n = 1 + _maximum(f.orders) setsize!(r,(np,ndof)) setsize!(v,(ndof,)) @@ -164,7 +164,7 @@ end # Optimizing evaluation at a single point function return_cache(f::JacobiPolynomialBasis{D,T},x::Point) where {D,T} - ndof = length(f.terms)*num_components(T) + ndof = length(f) r = CachedArray(zeros(T,(ndof,))) xs = [x] cf = return_cache(f,xs) diff --git a/src/Polynomials/ModalC0Bases.jl b/src/Polynomials/ModalC0Bases.jl index 9c3a6b2e9..834b742cc 100644 --- a/src/Polynomials/ModalC0Bases.jl +++ b/src/Polynomials/ModalC0Bases.jl @@ -11,11 +11,12 @@ struct ModalC0Basis{D,T,V} <: AbstractVector{ModalC0BasisFunction} terms::Vector{CartesianIndex{D}}, a::Vector{Point{D,V}}, b::Vector{Point{D,V}}) where {D,T,V} + new{D,T,V}(orders,terms,a,b) end end -@inline Base.size(a::ModalC0Basis{D,T,V}) where {D,T,V} = (length(a.terms)*num_components(T),) +@inline Base.size(a::ModalC0Basis{D,T,V}) where {D,T,V} = (length(a.terms)*num_indep_components(T),) @inline Base.getindex(a::ModalC0Basis,i::Integer) = ModalC0BasisFunction() @inline Base.IndexStyle(::ModalC0Basis) = IndexLinear() @@ -101,7 +102,7 @@ return_type(::ModalC0Basis{D,T,V}) where {D,T,V} = T function return_cache(f::ModalC0Basis{D,T,V},x::AbstractVector{<:Point}) where {D,T,V} @assert D == length(eltype(x)) "Incorrect number of point components" np = length(x) - ndof = length(f.terms)*num_components(T) + ndof = length(f) n = 1 + _maximum(f.orders) r = CachedArray(zeros(T,(np,ndof))) v = CachedArray(zeros(T,(ndof,))) @@ -112,7 +113,7 @@ end function evaluate!(cache,f::ModalC0Basis{D,T,V},x::AbstractVector{<:Point}) where {D,T,V} r, v, c = cache np = length(x) - ndof = length(f.terms)*num_components(T) + ndof = length(f) n = 1 + _maximum(f.orders) setsize!(r,(np,ndof)) setsize!(v,(ndof,)) @@ -134,7 +135,7 @@ function return_cache( f = fg.fa @assert D == length(eltype(x)) "Incorrect number of point components" np = length(x) - ndof = length(f.terms)*num_components(V) + ndof = length(f) xi = testitem(x) T = gradient_type(V,xi) n = 1 + _maximum(f.orders) @@ -153,7 +154,7 @@ function evaluate!( f = fg.fa r, v, c, g = cache np = length(x) - ndof = length(f.terms) * num_components(T) + ndof = length(f) n = 1 + _maximum(f.orders) setsize!(r,(np,ndof)) setsize!(v,(ndof,)) @@ -176,7 +177,7 @@ function return_cache( f = fg.fa @assert D == length(eltype(x)) "Incorrect number of point components" np = length(x) - ndof = length(f.terms)*num_components(V) + ndof = length(f) xi = testitem(x) T = gradient_type(gradient_type(V,xi),xi) n = 1 + _maximum(f.orders) @@ -196,7 +197,7 @@ function evaluate!( f = fg.fa r, v, c, g, h = cache np = length(x) - ndof = length(f.terms) * num_components(T) + ndof = length(f) n = 1 + _maximum(f.orders) setsize!(r,(np,ndof)) setsize!(v,(ndof,)) @@ -391,16 +392,17 @@ function _evaluate_nd_mc0!( end @inline function _set_value_mc0!(v::AbstractVector{V},s::T,k,l) where {V,T} - m = zero(Mutable(V)) + ncomp = num_indep_components(V) + m = zeros(T,ncomp) z = zero(T) - js = eachindex(m) + js = 1:ncomp for j in js for i in js @inbounds m[i] = z end @inbounds m[j] = s i = k+l*(j-1) - @inbounds v[i] = m + @inbounds v[i] = V(m...) end k+1 end @@ -461,8 +463,12 @@ end k+1 end +# Indexing and m definition should be fixed if G contains symmetries, that is +# if the code is optimized for symmetric tensor V valued FESpaces +# (if gradient_type(V) returned a symmetric higher order tensor type G) @inline function _set_gradient_mc0!( v::AbstractVector{G},s,k,l,::Type{V}) where {V,G} + @notimplementedif num_indep_components(G) != num_components(G) "Not implemented for symmetric Jacobian or Hessian" T = eltype(s) m = zero(Mutable(G)) diff --git a/src/Polynomials/MonomialBases.jl b/src/Polynomials/MonomialBases.jl index 4f5238983..261ffb809 100644 --- a/src/Polynomials/MonomialBases.jl +++ b/src/Polynomials/MonomialBases.jl @@ -18,7 +18,7 @@ struct MonomialBasis{D,T} <: AbstractVector{Monomial} end end -Base.size(a::MonomialBasis{D,T}) where {D,T} = (length(a.terms)*num_components(T),) +Base.size(a::MonomialBasis{D,T}) where {D,T} = (length(a.terms)*num_indep_components(T),) # @santiagobadia : Not sure we want to create the monomial machinery Base.getindex(a::MonomialBasis,i::Integer) = Monomial() Base.IndexStyle(::MonomialBasis) = IndexLinear() @@ -122,7 +122,7 @@ function return_cache(f::MonomialBasis{D,T},x::AbstractVector{<:Point}) where {D zxi = zero(eltype(eltype(x))) Tp = typeof( zT*zxi*zxi + zT*zxi*zxi ) np = length(x) - ndof = length(f.terms)*num_components(T) + ndof = length(f) n = 1 + _maximum(f.orders) r = CachedArray(zeros(Tp,(np,ndof))) v = CachedArray(zeros(Tp,(ndof,))) @@ -133,7 +133,7 @@ end function evaluate!(cache,f::MonomialBasis{D,T},x::AbstractVector{<:Point}) where {D,T} r, v, c = cache np = length(x) - ndof = length(f.terms)*num_components(T) + ndof = length(f) n = 1 + _maximum(f.orders) setsize!(r,(np,ndof)) setsize!(v,(ndof,)) @@ -157,7 +157,7 @@ function _return_cache( f = fg.fa @check D == length(eltype(x)) "Incorrect number of point components" np = length(x) - ndof = length(f.terms)*num_components(V) + ndof = length(f) n = 1 + _maximum(f.orders) r = CachedArray(zeros(T,(np,ndof))) v = CachedArray(zeros(T,(ndof,))) @@ -173,7 +173,7 @@ function _return_cache( TisbitsType::Val{false}) where {D,V,T} cache = _return_cache(fg,x,T,Val{true}()) - z = CachedArray(zeros(eltype(T),D)) + z = CachedArray(zeros(eltype(T),D)) (cache...,z) end @@ -197,7 +197,7 @@ function _evaluate!( r, v, c, g = cache z = zero(Mutable(VectorValue{D,eltype(T)})) np = length(x) - ndof = length(f.terms) * num_components(T) + ndof = length(f) n = 1 + _maximum(f.orders) setsize!(r,(np,ndof)) setsize!(v,(ndof,)) @@ -222,7 +222,7 @@ function _evaluate!( f = fg.fa r, v, c, g, z = cache np = length(x) - ndof = length(f.terms) * num_components(T) + ndof = length(f) n = 1 + _maximum(f.orders) setsize!(r,(np,ndof)) setsize!(v,(ndof,)) @@ -255,7 +255,7 @@ function return_cache( f = fg.fa @check D == length(eltype(x)) "Incorrect number of point components" np = length(x) - ndof = length(f.terms)*num_components(V) + ndof = length(f) xi = testitem(x) T = gradient_type(gradient_type(V,xi),xi) n = 1 + _maximum(f.orders) @@ -275,7 +275,7 @@ function evaluate!( f = fg.fa r, v, c, g, h = cache np = length(x) - ndof = length(f.terms) * num_components(T) + ndof = length(f) n = 1 + _maximum(f.orders) setsize!(r,(np,ndof)) setsize!(v,(ndof,)) @@ -406,15 +406,16 @@ function _evaluate_nd!( end function _set_value!(v::AbstractVector{V},s::T,k) where {V,T} - m = zero(Mutable(V)) + ncomp = num_indep_components(V) + m = zeros(T,ncomp) z = zero(T) - js = eachindex(m) + js = 1:ncomp for j in js for i in js @inbounds m[i] = z end m[j] = s - v[k] = m + v[k] = V(m...) k += 1 end k @@ -440,7 +441,7 @@ function _gradient_nd!( _evaluate_1d!(c,x,orders[d],d) _gradient_1d!(g,x,orders[d],d) end - + o = one(T) k = 1 diff --git a/src/ReferenceFEs/LagrangianDofBases.jl b/src/ReferenceFEs/LagrangianDofBases.jl index 8573d222c..07b79e94e 100644 --- a/src/ReferenceFEs/LagrangianDofBases.jl +++ b/src/ReferenceFEs/LagrangianDofBases.jl @@ -68,13 +68,13 @@ end # Node major implementation function _generate_dof_layout_node_major(::Type{T},nnodes::Integer) where T<:MultiValue - ncomps = num_components(T) V = change_eltype(T,Int) + ncomps = num_indep_components(T) ndofs = ncomps*nnodes dof_to_comp = zeros(Int,ndofs) dof_to_node = zeros(Int,ndofs) - node_and_comp_to_dof = zeros(V,nnodes) - m = zero(Mutable(V)) + node_and_comp_to_dof = Vector{V}(undef,nnodes) + m = zeros(Int,ncomps) for node in 1:nnodes for comp in 1:ncomps o = nnodes*(comp-1) @@ -83,7 +83,7 @@ function _generate_dof_layout_node_major(::Type{T},nnodes::Integer) where T<:Mul dof_to_node[dof] = node m[comp] = dof end - node_and_comp_to_dof[node] = m + node_and_comp_to_dof[node] = V(m...) end (dof_to_node, dof_to_comp, node_and_comp_to_dof) end @@ -113,8 +113,8 @@ function evaluate!(cache,b::LagrangianDofBasis,field) vals = evaluate!(cf,field,b.nodes) ndofs = length(b.dof_to_node) T = eltype(vals) - ncomps = num_components(T) - @check ncomps == num_components(eltype(b.node_and_comp_to_dof)) """\n + ncomps = num_indep_components(T) + @check ncomps == num_indep_components(eltype(b.node_and_comp_to_dof)) """\n Unable to evaluate LagrangianDofBasis. The number of components of the given Field does not match with the LagrangianDofBasis. @@ -135,8 +135,8 @@ function _evaluate_lagr_dof!(c::AbstractVector,node_comp_to_val,node_and_comp_to comp_to_dof = node_and_comp_to_dof[node] comp_to_val = node_comp_to_val[node] for comp in 1:ncomps - dof = comp_to_dof[comp] - val = comp_to_val[comp] + dof = indep_comp_getindex(comp_to_dof,comp) + val = indep_comp_getindex(comp_to_val,comp) r[dof] = val end end @@ -152,8 +152,8 @@ function _evaluate_lagr_dof!(c::AbstractMatrix,node_pdof_comp_to_val,node_and_co for pdof in 1:npdofs comp_to_val = node_pdof_comp_to_val[node,pdof] for comp in 1:ncomps - dof = comp_to_dof[comp] - val = comp_to_val[comp] + dof = indep_comp_getindex(comp_to_dof,comp) + val = indep_comp_getindex(comp_to_val,comp) r[dof,pdof] = val end end diff --git a/src/ReferenceFEs/LagrangianRefFEs.jl b/src/ReferenceFEs/LagrangianRefFEs.jl index 2a2cb10b5..af14e2cc8 100644 --- a/src/ReferenceFEs/LagrangianRefFEs.jl +++ b/src/ReferenceFEs/LagrangianRefFEs.jl @@ -222,7 +222,7 @@ end function _generate_face_own_dofs(face_own_nodes, node_and_comp_to_dof) faces = 1:length(face_own_nodes) T = eltype(node_and_comp_to_dof) - comps = 1:num_components(T) + comps = 1:num_indep_components(T) face_own_dofs = [Int[] for i in faces] for face in faces nodes = face_own_nodes[face] @@ -254,7 +254,7 @@ function _generate_face_own_dofs_permutations( face_own_nodes_permutations, node_and_comp_to_dof, face_own_nodes, face_own_dofs) T = eltype(node_and_comp_to_dof) - ncomps = num_components(T) + ncomps = num_indep_components(T) face_own_dofs_permutations = Vector{Vector{Int}}[] for (face, pindex_to_inode_to_pinode) in enumerate(face_own_nodes_permutations) diff --git a/src/TensorValues/MultiValueTypes.jl b/src/TensorValues/MultiValueTypes.jl index c038f5649..c59571246 100644 --- a/src/TensorValues/MultiValueTypes.jl +++ b/src/TensorValues/MultiValueTypes.jl @@ -31,7 +31,10 @@ num_components(::Type{<:Number}) = 1 num_components(::Number) = num_components(Number) num_components(T::Type{<:MultiValue}) = @unreachable "$T type is too abstract to count its components, provide a (parametric) concrete type" -"Number of independant components, that is num_component(::Number) minus the number of components determined by symetries or constraints" +""" +Number of independant components, that is `num_components(::Type{T})` minus the +number of components determined from others by symmetries or constraints. +""" num_indep_components(::Type{T}) where T<:Number = num_components(T) num_indep_components(::T) where T<:Number = num_indep_components(T) @@ -40,6 +43,31 @@ function n_components(a) error(msg) end +# This should probably not be exported, as (accessing) the data field of +# MultiValue is not a public api function data_index(::Type{<:MultiValue},i...) @abstractmethod end + +""" + indep_comp_getindex(a::Number,i) + +Get the ith independent component of `a`. It only differs from `getindex(a,i)` +when the components of `a` are linked, see [`num_indep_components`](@ref), and +`i` should be in `1:num_indep_components(a)`. +""" +function indep_comp_getindex(a::Number,i) + @check 1 <= i <= num_indep_components(Number) + a[i] +end + +function indep_comp_getindex(a::T,i) where {T<:MultiValue} + @check 1 <= i <= num_indep_components(T) + _get_data(a,i) +end + +# abstraction of Multivalue data access in case subtypes of MultiValue don't +# store its data in a data field +function _get_data(a::MultiValue,i) + a.data[i] +end diff --git a/src/TensorValues/TensorValues.jl b/src/TensorValues/TensorValues.jl index 1ac4ee2d8..7a9fa4bf3 100644 --- a/src/TensorValues/TensorValues.jl +++ b/src/TensorValues/TensorValues.jl @@ -63,6 +63,7 @@ export ⋅¹ export ⋅² export double_contraction export data_index +export indep_comp_getindex import Base: show import Base: zero, one diff --git a/test/ReferenceFEsTests/CLagrangianRefFEsTests.jl b/test/ReferenceFEsTests/CLagrangianRefFEsTests.jl index 02855bb38..1723b85ab 100644 --- a/test/ReferenceFEsTests/CLagrangianRefFEsTests.jl +++ b/test/ReferenceFEsTests/CLagrangianRefFEsTests.jl @@ -35,6 +35,20 @@ dofs = LagrangianDofBasis(VectorValue{3,Float64},TET,1) @test dofs.nodes == Point{3,Float64}[(0,0,0), (1,0,0), (0,1,0), (0,0,1)] @test dofs.node_and_comp_to_dof == VectorValue{3,Int}[(1,5,9), (2,6,10), (3,7,11), (4,8,12)] +dofs = LagrangianDofBasis(TensorValue{2,2,Float64},TET,1) +@test dofs.nodes == Point{3,Float64}[(0,0,0), (1,0,0), (0,1,0), (0,0,1)] +@test dofs.node_and_comp_to_dof == TensorValue{2,2,Int}[(1,5,9,13), (2,6,10,14), (3,7,11,15), (4,8,12,16)] + +dofs = LagrangianDofBasis(SymTensorValue{2,Float64},TET,1) +@test dofs.nodes == Point{3,Float64}[(0,0,0), (1,0,0), (0,1,0), (0,0,1)] +@test dofs.node_and_comp_to_dof == SymTensorValue{2,Int}[(1,5,9), (2,6,10), (3,7,11), (4,8,12)] + +# For SymTracelessTensorValue, the last index in data should not be accessed, +# as it is minus the sum of the D-1 diagonal value, so is not free/independent +dofs = LagrangianDofBasis(SymTracelessTensorValue{2,Float64},TET,1) +@test dofs.nodes == Point{3,Float64}[(0,0,0), (1,0,0), (0,1,0), (0,0,1)] +@test dofs.node_and_comp_to_dof == SymTracelessTensorValue{2,Int}[(1,5), (2,6), (3,7), (4,8)] + dofs = LagrangianDofBasis(Float64,WEDGE,(2,2,2)) r = Point{3,Float64}[ (0.0,0.0,0.0),(1.0,0.0,0.0),(0.0,1.0,0.0), @@ -48,6 +62,15 @@ r = Point{3,Float64}[ dofs = LagrangianDofBasis(VectorValue{2,Int},VERTEX,()) @test dofs.node_and_comp_to_dof == VectorValue{2,Int}[(1,2)] +dofs = LagrangianDofBasis(TensorValue{2,2,Int},VERTEX,()) +@test dofs.node_and_comp_to_dof == TensorValue{2,2,Int}[(1,2,3,4)] + +dofs = LagrangianDofBasis(SymTensorValue{2,Int},VERTEX,()) +@test dofs.node_and_comp_to_dof == SymTensorValue{2,Int}[(1,2,3)] + +dofs = LagrangianDofBasis(SymTracelessTensorValue{2,Int},VERTEX,()) +@test dofs.node_and_comp_to_dof == SymTracelessTensorValue{2,Int}[(1,2)] + b = MonomialBasis(VectorValue{2,Int},VERTEX,()) @test length(b) == 2 @test evaluate(b,Point{0,Int}[(),()]) == VectorValue{2,Int}[(1, 0) (0, 1); (1, 0) (0, 1)] diff --git a/test/ReferenceFEsTests/LagrangianDofBasesTests.jl b/test/ReferenceFEsTests/LagrangianDofBasesTests.jl index 82b41d107..55c071467 100644 --- a/test/ReferenceFEsTests/LagrangianDofBasesTests.jl +++ b/test/ReferenceFEsTests/LagrangianDofBasesTests.jl @@ -52,4 +52,75 @@ dbb = [ test_dof_array(db,b,dbb) +T = TensorValue{2,2,Float64} +db = LagrangianDofBasis(T,x) +@test db.nodes === x +@test db.node_and_comp_to_dof == TensorValue{2,2,Int}[(1,5,9,13), (2,6,10,14), (3,7,11,15), (4,8,12,16)] +@test db.dof_to_node == [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4] +@test db.dof_to_comp == [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4] + +v = TensorValue(1,2,3,4) +f = GenericField(x->v*x[1]) +dbf = [0, 1, 0, 1, 0, 2, 0, 2, 0, 3, 0, 3, 0, 4, 0, 4] + +test_dof_array(db,f,dbf) + +ndof = 16 +b = fill(f,ndof) +bx = evaluate(b,x) +dbb = [ + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1; 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1; + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2; 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2; + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3; 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3; + 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4; 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0; 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4;] + +test_dof_array(db,b,dbb) + + +T = SymTensorValue{2,Float64} +db = LagrangianDofBasis(T,x) +@test db.nodes === x +@test db.node_and_comp_to_dof == SymTensorValue{2,Int}[(1,5,9), (2,6,10), (3,7,11), (4,8,12)] +@test db.dof_to_node == [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4] +@test db.dof_to_comp == [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3] + +v = SymTensorValue(1,2,3) +f = GenericField(x->v*x[1]) +dbf = [0, 1, 0, 1, 0, 2, 0, 2, 0, 3, 0, 3] + +test_dof_array(db,f,dbf) + +ndof = 12 +b = fill(f,ndof) +bx = evaluate(b,x) +dbb = [ + 0 0 0 0 0 0 0 0 0 0 0 0; 1 1 1 1 1 1 1 1 1 1 1 1; 0 0 0 0 0 0 0 0 0 0 0 0; 1 1 1 1 1 1 1 1 1 1 1 1; + 0 0 0 0 0 0 0 0 0 0 0 0; 2 2 2 2 2 2 2 2 2 2 2 2; 0 0 0 0 0 0 0 0 0 0 0 0; 2 2 2 2 2 2 2 2 2 2 2 2; + 0 0 0 0 0 0 0 0 0 0 0 0; 3 3 3 3 3 3 3 3 3 3 3 3; 0 0 0 0 0 0 0 0 0 0 0 0; 3 3 3 3 3 3 3 3 3 3 3 3;] + +test_dof_array(db,b,dbb) + + +T = SymTracelessTensorValue{2,Float64} +db = LagrangianDofBasis(T,x) +@test db.nodes === x +@test db.node_and_comp_to_dof == SymTracelessTensorValue{2,Int}[(1,5), (2,6), (3,7), (4,8)] +@test db.dof_to_node == [1, 2, 3, 4, 1, 2, 3, 4] +@test db.dof_to_comp == [1, 1, 1, 1, 2, 2, 2, 2] + +v = SymTracelessTensorValue(1,2) +f = GenericField(x->v*x[1]) +dbf = [0, 1, 0, 1, 0, 2, 0, 2] + +test_dof_array(db,f,dbf) + +ndof = 8 +b = fill(f,ndof) +bx = evaluate(b,x) +dbb = [ + 0 0 0 0 0 0 0 0; 1 1 1 1 1 1 1 1; 0 0 0 0 0 0 0 0; 1 1 1 1 1 1 1 1; + 0 0 0 0 0 0 0 0; 2 2 2 2 2 2 2 2; 0 0 0 0 0 0 0 0; 2 2 2 2 2 2 2 2;] + +test_dof_array(db,b,dbb) + end # module From 98fbe44b17699935edf9b8f4978027bbfeb877ec Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Thu, 8 Aug 2024 16:22:52 +1000 Subject: [PATCH 11/85] small addition for SymTracelessTensorValue --- src/Fields/AutoDiff.jl | 4 ++-- test/VisualizationTests/VtkTests.jl | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Fields/AutoDiff.jl b/src/Fields/AutoDiff.jl index 5c6ada102..6bfa9d12d 100644 --- a/src/Fields/AutoDiff.jl +++ b/src/Fields/AutoDiff.jl @@ -94,7 +94,7 @@ function divergence(f::Function,x::Point,fx::TensorValue{3,3}) ) end -function divergence(f::Function,x::Point,fx::SymTensorValue{2}) +function divergence(f::Function,x::Point,fx::AbstractSymTensorValue{2}) g(x) = SVector(f(x).data) a = ForwardDiff.jacobian(g,get_array(x)) VectorValue( @@ -103,7 +103,7 @@ function divergence(f::Function,x::Point,fx::SymTensorValue{2}) ) end -function divergence(f::Function,x::Point,fx::SymTensorValue{3}) +function divergence(f::Function,x::Point,fx::AbstractSymTensorValue{3}) g(x) = SVector(f(x).data) a = ForwardDiff.jacobian(g,get_array(x)) VectorValue( diff --git a/test/VisualizationTests/VtkTests.jl b/test/VisualizationTests/VtkTests.jl index 055da65eb..af9ad91c4 100644 --- a/test/VisualizationTests/VtkTests.jl +++ b/test/VisualizationTests/VtkTests.jl @@ -96,6 +96,7 @@ writevtk(trian,f,nsubcells=10, cellfields=[ "v2"=>x->VectorValue(1,2), "v"=>x->VectorValue(1,2,3), "s"=>x->SymTensorValue(1.0,2.0,3.0), + "q"=>x->SymTracelessTensorValue(1.0,2.0), "c"=>x->SymFourthOrderTensorValue(1,2,3, 1,2,3, 1,2,3), "t"=>x->TensorValue(1,2,3,4),]) From 8020086044c35448cbb926edc8f17085c4aa2153 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Wed, 14 Aug 2024 12:49:50 +1000 Subject: [PATCH 12/85] fix ThirdOrderTensor S/M convert and add get_array --- src/TensorValues/ThirdOrderTensorValueTypes.jl | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/TensorValues/ThirdOrderTensorValueTypes.jl b/src/TensorValues/ThirdOrderTensorValueTypes.jl index 62ec7eaeb..3d91b2f34 100644 --- a/src/TensorValues/ThirdOrderTensorValueTypes.jl +++ b/src/TensorValues/ThirdOrderTensorValueTypes.jl @@ -64,8 +64,8 @@ convert(::Type{<:ThirdOrderTensorValue{D1,D2,D3,T}}, arg::AbstractArray) where { convert(::Type{<:ThirdOrderTensorValue{D1,D2,D3,T}}, arg::Tuple) where {D1,D2,D3,T} = ThirdOrderTensorValue{D1,D2,D3,T}(arg) # Inverse conversion -convert(::Type{<:SMatrix{D1,D2,D3,T}}, arg::ThirdOrderTensorValue) where {D1,D2,D3,T} = SMatrix{D1,D2,D3,T}(Tuple(arg)) -convert(::Type{<:MMatrix{D1,D2,D3,T}}, arg::ThirdOrderTensorValue) where {D1,D2,D3,T} = MMatrix{D1,D2,D3,T}(Tuple(arg)) +convert(::Type{<:SArray{Tuple{D1,D2,D3},T}}, arg::ThirdOrderTensorValue) where {D1,D2,D3,T} = SArray{Tuple{D1,D2,D3},T}(Tuple(arg)) +convert(::Type{<:MArray{Tuple{D1,D2,D3},T}}, arg::ThirdOrderTensorValue) where {D1,D2,D3,T} = MArray{Tuple{D1,D2,D3},T}(Tuple(arg)) convert(::Type{<:NTuple{L,T1}}, arg::ThirdOrderTensorValue) where {L,T1} = NTuple{L,T1}(Tuple(arg)) # Internal conversion @@ -78,6 +78,8 @@ change_eltype(::Type{ThirdOrderTensorValue{D1,D2,D3,T1}},::Type{T2}) where {D1,D change_eltype(::Type{ThirdOrderTensorValue{D1,D2,D3,T1,L}},::Type{T2}) where {D1,D2,D3,T1,T2,L} = ThirdOrderTensorValue{D1,D2,D3,T2,L} change_eltype(::T,::Type{T2}) where {T<:ThirdOrderTensorValue,T2} = change_eltype(T,T2) +get_array(arg::ThirdOrderTensorValue{D1,D2,D3,T}) where {D1,D2,D3,T} = convert(SArray{Tuple{D1,D2,D3},T},arg) + zero(::Type{<:ThirdOrderTensorValue{D1,D2,D3,T}}) where {D1,D2,D3,T} = ThirdOrderTensorValue{D1,D2,D3,T}(tfill(zero(T),Val{D1*D2*D3}())) zero(::ThirdOrderTensorValue{D1,D2,D3,T}) where {D1,D2,D3,T} = zero(ThirdOrderTensorValue{D1,D2,D3,T}) From 50392e53552e3e92b1e552fff42706333d00e761 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Thu, 15 Aug 2024 16:16:05 +1000 Subject: [PATCH 13/85] Fields Autodiff improvments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Implementation of gradient, divergence and laplacian for tensor valued functions - Prevents calling divergence on inconsistent sizes - Accelerate multidim laplacian by calling the second ForwardDiff.jacobian in place A SymThirdOrderTensorValue could be implemented to hold the gradient of u::AbstractSymTensorValue, to enable (∇⋅∇(u)) to be of the type of Δu (that of u), if divergence(::SymThirdOrderTensorValue) is appropriately specialized --- src/Fields/AutoDiff.jl | 55 +++++++++++++++++++++----- test/FieldsTests/DiffOperatorsTests.jl | 49 +++++++++++++++++++---- 2 files changed, 86 insertions(+), 18 deletions(-) diff --git a/src/Fields/AutoDiff.jl b/src/Fields/AutoDiff.jl index 6bfa9d12d..1aadf2d09 100644 --- a/src/Fields/AutoDiff.jl +++ b/src/Fields/AutoDiff.jl @@ -59,6 +59,13 @@ function gradient(f::Function,x::Point,fx::VectorValue) TensorValue(transpose(ForwardDiff.jacobian(y->get_array(f(y)),get_array(x)))) end +# Implementation for all second order tensor values +# Does not exploit possible symmetries +function gradient(f::Function,x::Point{A},fx::S) where S<:MultiValue{Tuple{B,C}} where {A,B,C} + a = transpose(ForwardDiff.jacobian(y->get_array(f(y)),get_array(x))) + ThirdOrderTensorValue(reshape(a, (A,B,C))) +end + function gradient(f::Function,x::Point,fx::MultiValue) @notimplemented end @@ -71,11 +78,21 @@ function divergence(f::Function,x::Point) divergence(f,x,return_value(f,x)) end -function divergence(f::Function,x::Point,fx) +function divergence(f::Function,x::Point{D},fx::VectorValue{D}) where D tr(gradient(f,x,fx)) end -function divergence(f::Function,x::Point,fx::TensorValue{2,2}) +function divergence(f::Function,x::Point{D},fx::S) where S<:MultiValue{Tuple{D,A},T} where {D,A,T} + a = ForwardDiff.jacobian(y->get_array(f(y)),get_array(x)) + VectorValue{A,T}( ntuple(k -> sum(i-> a[(k-1)*D+i,i], 1:D),A) ) +end + +function divergence(f::Function,x::Point{D},fx::S) where S<:MultiValue{Tuple{D,A,B},T} where {D,A,B,T} + a = ForwardDiff.jacobian(y->get_array(f(y)),get_array(x)) + TensorValue{A,B,T}( ntuple(k -> sum(i-> a[(k-1)*D+i,i], 1:D),A*B) ) +end + +function divergence(f::Function,x::Point{2},fx::TensorValue{2,2}) g(x) = SVector(f(x).data) a = ForwardDiff.jacobian(g,get_array(x)) VectorValue( @@ -84,7 +101,7 @@ function divergence(f::Function,x::Point,fx::TensorValue{2,2}) ) end -function divergence(f::Function,x::Point,fx::TensorValue{3,3}) +function divergence(f::Function,x::Point{3},fx::TensorValue{3,3}) g(x) = SVector(f(x).data) a = ForwardDiff.jacobian(g,get_array(x)) VectorValue( @@ -94,7 +111,7 @@ function divergence(f::Function,x::Point,fx::TensorValue{3,3}) ) end -function divergence(f::Function,x::Point,fx::AbstractSymTensorValue{2}) +function divergence(f::Function,x::Point{2},fx::AbstractSymTensorValue{2}) g(x) = SVector(f(x).data) a = ForwardDiff.jacobian(g,get_array(x)) VectorValue( @@ -103,7 +120,7 @@ function divergence(f::Function,x::Point,fx::AbstractSymTensorValue{2}) ) end -function divergence(f::Function,x::Point,fx::AbstractSymTensorValue{3}) +function divergence(f::Function,x::Point{3},fx::AbstractSymTensorValue{3}) g(x) = SVector(f(x).data) a = ForwardDiff.jacobian(g,get_array(x)) VectorValue( @@ -113,6 +130,10 @@ function divergence(f::Function,x::Point,fx::AbstractSymTensorValue{3}) ) end +function divergence(f::Function,x::Point,fx::MultiValue) + @notimplemented +end + function curl(f::Function,x::Point) grad2curl(gradient(f,x)) end @@ -125,11 +146,25 @@ function laplacian(f::Function,x::Point,fx::Number) tr(ForwardDiff.jacobian(y->ForwardDiff.gradient(f,y), get_array(x))) end -function laplacian(f::Function,x::Point,fx::VectorValue) - A = length(x) - B = length(fx) - a = ForwardDiff.jacobian(y->transpose(ForwardDiff.jacobian(z->get_array(f(z)),y)), get_array(x)) - tr(ThirdOrderTensorValue{A,A,B}(Tuple(transpose(a)))) +function laplacian(f::Function,x::Point{A},fx::VectorValue{B,T}) where {A,B,T} + a = MMatrix{A*B,A,T}(undef) + ForwardDiff.jacobian!(a, y->ForwardDiff.jacobian(z->get_array(f(z)),y), get_array(x)) + VectorValue{B,T}( ntuple(k -> sum(i-> a[(i-1)*B+k,i], 1:A),B) ) +end + +function laplacian(f::Function,x::Point{A},fx::S) where S<:MultiValue{Tuple{B,C},T} where {A,B,C,T} + a = MMatrix{A*B*C,A,T}(undef) + ForwardDiff.jacobian!(a, y->ForwardDiff.jacobian(z->get_array(f(z)),y), get_array(x)) + t = ntuple(k -> sum(i-> a[(i-1)*B*C+k,i], 1:A),B*C) + S(SMatrix{B,C}(t)) #Necessary cast to build e.g. symmetric tensor values +end + +# Implementation for any third order tensor values +function laplacian(f::Function,x::Point{A},fx::S) where S<:MultiValue{Tuple{B,C,D},T} where {A,B,C,D,T} + a = MMatrix{A*B*C*D,A,T}(undef) + ForwardDiff.jacobian!(a, y->ForwardDiff.jacobian(z->get_array(f(z)),y), get_array(x)) + t = ntuple(k -> sum(i-> a[(i-1)*B*C*D+k,i], 1:A),B*C*D) + S(SArray{Tuple{B,C,D}}(t)) end function laplacian(f::Function,x::Point,fx::MultiValue) diff --git a/test/FieldsTests/DiffOperatorsTests.jl b/test/FieldsTests/DiffOperatorsTests.jl index 98975c69a..35f262de5 100644 --- a/test/FieldsTests/DiffOperatorsTests.jl +++ b/test/FieldsTests/DiffOperatorsTests.jl @@ -74,13 +74,46 @@ u_vec(x) = VectorValue( x[1]^2 + x[2], 4*x[1] - x[2]^2 ) Δu_vec(x) = VectorValue( 2, -2 ) εu_vec(x) = SymTensorValue( 2*x[1], 0.5*(one(x[2])+4*one(x[1])), - 2*x[2] ) +u_ten(x) = TensorValue( x[1]^2 + x[2], 4*x[1] - x[2]^2, -x[1]^2 - x[2], -4*x[1] + x[2]^2 ) +∇u_ten(x) = ThirdOrderTensorValue( 2*x[1], one(x[2]), 4*one(x[1]), - 2*x[2], + -2*x[1],-one(x[2]),-4*one(x[1]), + 2*x[2] ) +Δu_ten(x) = TensorValue( 2, -2, -2, 2 ) + +u_sten(x) = SymTensorValue( x[1]^2 + x[2], 4*x[1] - x[2]^2, -4*x[1] + x[2]^2 ) +∇u_sten(x) = ThirdOrderTensorValue( 2*x[1], one(x[2]), 4*one(x[1]), - 2*x[2], + 4*one(x[1]),-2*x[2], -4*one(x[1]), + 2*x[2] ) +Δu_sten(x) = SymTensorValue( 2, -2, 2 ) + +u_qten(x) = SymTracelessTensorValue( x[1]^3 + 2x[2]^3, 5*x[1]^3 - 7x[2]^3) +∇u_qten(x) = ThirdOrderTensorValue( 3x[1]^2, 6x[2]^2, 15x[1]^2, -21x[2]^2, + 15x[1]^2,-21x[2]^2, -3x[1]^2, -6x[2]^2) +Δu_qten(x) = SymTracelessTensorValue( 6x[1] + 12x[2], 30x[1] - 42x[2] ) + xs = [ Point(1.,1.), Point(2.,0.), Point(0.,3.), Point(-1.,3.)] for x in xs - @test ∇(u_scal)(x) == ∇u_scal(x) - @test Δ(u_scal)(x) == Δu_scal(x) - @test ∇(u_vec)(x) == ∇u_vec(x) - @test Δ(u_vec)(x) == Δu_vec(x) - @test ε(u_vec)(x) == εu_vec(x) + @test ∇(u_scal)(x) == ∇u_scal(x) + @test Δ(u_scal)(x) == Δu_scal(x) + + @test ∇(u_vec)(x) == ∇u_vec(x) + @test Δ(u_vec)(x) == Δu_vec(x) + @test ε(u_vec)(x) == εu_vec(x) + + @test ∇(u_ten)(x) == ∇u_ten(x) + @test Δ(u_ten)(x) == Δu_ten(x) + @test (∇⋅u_ten)(x) == tr(∇u_ten(x)) + @test Δ(u_ten)(x) == (∇⋅∇u_ten)(x) + + @test ∇(u_sten)(x) == ∇u_sten(x) + @test Δ(u_sten)(x) == Δu_sten(x) + @test (∇⋅u_sten)(x) == tr(∇u_sten(x)) + #@test Δ(u_sten)(x) == (∇⋅∇u_sten)(x) # Would work if ∇(u_sten) is implemented to return a SymThirdOrderTensorValue + @test get_array(Δ(u_sten)(x)) == get_array((∇⋅∇u_sten)(x)) + + @test ∇(u_qten)(x) == ∇u_qten(x) + @test Δ(u_qten)(x) == Δu_qten(x) + @test (∇⋅u_qten)(x) == tr(∇u_qten(x)) + #@test Δ(u_qten)(x) == (∇⋅∇u_qten)(x) + @test get_array(Δ(u_qten)(x)) == get_array((∇⋅∇u_qten)(x)) end u(x) = VectorValue( x[1]^2 + 2*x[2]^2, -x[1]^2 ) @@ -89,7 +122,7 @@ u(x) = VectorValue( x[1]^2 + 2*x[2]^2, -x[1]^2 ) εu(x) = SymTensorValue( 2*x[1], 2*x[2]-x[1], zero(x[1]) ) for x in xs - @test (∇⋅u)(x) == tr(∇u(x)) + @test (∇⋅u)(x) == tr(∇u(x)) @test (∇×u)(x) == grad2curl(∇u(x)) @test Δ(u)(x) == Δu(x) @test ε(u)(x) == εu(x) @@ -104,7 +137,7 @@ u(x) = VectorValue( x[1]^2 + 2*x[2]^2, 0 ) εu(x) = SymTensorValue( 2*x[1], 2*x[2], 0 ) for x in xs - @test (∇⋅u)(x) == tr(∇u(x)) + @test (∇⋅u)(x) == tr(∇u(x)) @test (∇×u)(x) == grad2curl(∇u(x)) @test Δ(u)(x) == Δu(x) @test ε(u)(x) == εu(x) @@ -120,7 +153,7 @@ u(x) = VectorValue( x[1]^2 + 2*x[3]^2, -x[1]^2, -x[2]^2 + x[3]^2 ) xs = [ Point(1.,1.,2.0), Point(2.,0.,1.), Point(0.,3.,0.), Point(-1.,3.,2.)] for x in xs @test ∇(u)(x) == ∇u(x) - @test (∇⋅u)(x) == tr(∇u(x)) + @test (∇⋅u)(x) == tr(∇u(x)) @test (∇×u)(x) == grad2curl(∇u(x)) @test Δ(u)(x) == Δu(x) @test ε(u)(x) == εu(x) From 36ee4d2f19d4d38146e63a72300e9f48f91d9e7d Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Fri, 16 Aug 2024 15:53:52 +1000 Subject: [PATCH 14/85] [wip] added indep_comp_getindex and fix FESpaces --- src/FESpaces/CLagrangianFESpaces.jl | 2 +- src/Polynomials/MonomialBases.jl | 34 ++++++++++++++++++++++++++++ src/ReferenceFEs/LagrangianRefFEs.jl | 6 ++--- 3 files changed, 38 insertions(+), 4 deletions(-) diff --git a/src/FESpaces/CLagrangianFESpaces.jl b/src/FESpaces/CLagrangianFESpaces.jl index f90e58691..a631c6916 100644 --- a/src/FESpaces/CLagrangianFESpaces.jl +++ b/src/FESpaces/CLagrangianFESpaces.jl @@ -215,7 +215,7 @@ function _generate_node_to_dof_glue_component_major( diri_dof_to_tag = ones(Int8,ndiri_dofs) T = change_eltype(z,Int32) nnodes = length(node_to_tag) - node_and_comp_to_dof = zeros(T,nnodes) + node_and_comp_to_dof = Vector{T}(undef,nnodes) nfree_dofs = 0 ndiri_dofs = 0 m = zeros(Int32, ncomps) diff --git a/src/Polynomials/MonomialBases.jl b/src/Polynomials/MonomialBases.jl index 261ffb809..502568ba0 100644 --- a/src/Polynomials/MonomialBases.jl +++ b/src/Polynomials/MonomialBases.jl @@ -494,6 +494,40 @@ function _set_gradient!( k end +# Specialization for SymTensorValue and SymTracelessTensorValue, +# necessary as long as outer(Point, V<:AbstractSymTensorValue)::G does not +# return a tensor type that implements the appropriate symmetries of the +# gradient (and hessian) +function _set_gradient!( + v::AbstractVector{G},s,k,::Type{V}) where {V<:AbstractSymTensorValue{D},G} where D + + T = eltype(s) + m = zero(Mutable(G)) + z = zero(T) + + is_traceless = V <: SymTracelessTensorValue + skip_last_diagval = is_traceless ? 1 : 0 # Skid V_DD if traceless + + for c in 1:(D-skip_last_diagval) # Go over cols + for r in c:D # Go over lower triangle, current col + for i in CartesianIndices(m) + @inbounds m[i] = z + end + for i in CartesianIndices(s) + @inbounds m[i,r,c] = s[i] + if (r!=c) + @inbounds m[i,c,r] = s[i] + elseif is_traceless # V_rr contributes negatively to V_DD (tracelessness) + @inbounds m[i,D,D] = -s[i] + end + end + @inbounds v[k] = m + k += 1 + end + end + k +end + function _hessian_nd!( v::AbstractVector{G}, x, diff --git a/src/ReferenceFEs/LagrangianRefFEs.jl b/src/ReferenceFEs/LagrangianRefFEs.jl index af14e2cc8..0c6b738ba 100644 --- a/src/ReferenceFEs/LagrangianRefFEs.jl +++ b/src/ReferenceFEs/LagrangianRefFEs.jl @@ -230,7 +230,7 @@ function _generate_face_own_dofs(face_own_nodes, node_and_comp_to_dof) for comp in comps for node in nodes comp_to_dofs = node_and_comp_to_dof[node] - dof = comp_to_dofs[comp] + dof = indep_comp_getindex(comp_to_dofs,comp) push!(face_own_dofs[face],dof) end end @@ -274,8 +274,8 @@ function _generate_face_own_dofs_permutations( comp_to_pdof = node_and_comp_to_dof[pnode] comp_to_dof = node_and_comp_to_dof[node] for comp in 1:ncomps - dof = comp_to_dof[comp] - pdof = comp_to_pdof[comp] + dof = indep_comp_getindex(comp_to_dof,comp) + pdof = indep_comp_getindex(comp_to_pdof,comp) idof = findfirst(i->i==dof,idof_to_dof) ipdof = findfirst(i->i==pdof,idof_to_dof) idof_to_pidof[idof] = ipdof From 8faade5e7664bcc9f6890faf57d1e810c7befeec Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Fri, 16 Aug 2024 15:58:54 +1000 Subject: [PATCH 15/85] Test [Sym][Traceless]TensorValue'd FESpaces for Poisson --- test/GridapTests/PoissonTests.jl | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/test/GridapTests/PoissonTests.jl b/test/GridapTests/PoissonTests.jl index bea1fe375..5ae7fa2eb 100644 --- a/test/GridapTests/PoissonTests.jl +++ b/test/GridapTests/PoissonTests.jl @@ -2,6 +2,7 @@ module PoissonTests using Test using Gridap +using Gridap.TensorValues import Gridap: ∇ #using LinearAlgebra @@ -60,7 +61,30 @@ vector_data[:valuetype] = VectorValue{2,Float64} vector_data[:u] = u_vec vector_data[:f] = f_vec -for data in [ vector_data, scalar_data ] +u_ten(x) = TensorValue( x[1]^2 + x[2], 4*x[1] - x[2]^2, 2x[2]^2 - 3x[1], -.5x[1]^2 + x[2] ) +f_ten(x) = - Δ(u_ten)(x) +tensor_data = Dict{Symbol,Any}() +tensor_data[:valuetype] = TensorValue{2,2,Float64} +tensor_data[:u] = u_ten +tensor_data[:f] = f_ten + + +u_sten(x) = SymTensorValue( x[1]^2 + x[2], 4*x[1] - x[2]^2, 2x[2]^2 - 3x[1]) +f_sten(x) = - Δ(u_sten)(x) +stensor_data = Dict{Symbol,Any}() +stensor_data[:valuetype] = SymTensorValue{2,Float64} +stensor_data[:u] = u_sten +stensor_data[:f] = f_sten + + +u_qten(x) = SymTracelessTensorValue( x[1]^2 + x[2], 4*x[1] - x[2]^2) +f_qten(x) = - Δ(u_qten)(x) +qtensor_data = Dict{Symbol,Any}() +qtensor_data[:valuetype] = SymTracelessTensorValue{2,Float64} +qtensor_data[:u] = u_qten +qtensor_data[:f] = f_qten + +for data in [scalar_data, vector_data, tensor_data, stensor_data, qtensor_data] T = data[:valuetype] u = data[:u] @@ -81,7 +105,7 @@ for data in [ vector_data, scalar_data ] l(v) = ∫( v⊙f )*dΩ + ∫( v⊙(nn⋅∇(uh)) )*dΓn + - ∫( (γ/h)*v⊙uh - (nd⋅∇(v))⊙u )*dΓd + ∫( (γ/h)*v⊙uh - (nd⋅∇(v))⊙uh )*dΓd op = AffineFEOperator(a,l,U,V) uh = solve(op) From b5afd0c839ed4f34a262d21dfa18e32d319f65c3 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Wed, 28 Aug 2024 16:26:04 +1000 Subject: [PATCH 16/85] fix QTensorValue * method ambiguity --- src/TensorValues/Operations.jl | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/TensorValues/Operations.jl b/src/TensorValues/Operations.jl index 4f831de65..112157c96 100644 --- a/src/TensorValues/Operations.jl +++ b/src/TensorValues/Operations.jl @@ -197,6 +197,13 @@ function (*)(a::MultiValue, b::MultiValue) #dot(a,b) end +# Resolution of silly method ambiguity +const _msg = "Use use simple contraction dot aka ⋅ (\\cdot) or full contraction inner aka ⊙ (\\odot)" +function *(::MultiValue,::SymTracelessTensorValue) @unreachable _msg end +function *(::SymTracelessTensorValue,::MultiValue) @unreachable _msg end +function *(::SymTracelessTensorValue,::AbstractSymTensorValue) @unreachable _msg end +function *(::SymTracelessTensorValue,::SymTracelessTensorValue) @unreachable _msg end + dot(a::MultiValue{Tuple{D}}, b::MultiValue{Tuple{D}}) where D = inner(a,b) dot(a::MultiValue,b::MultiValue) = @notimplemented From 5bce78fdb32392343ad9357274e2004f5d9ab7db Mon Sep 17 00:00:00 2001 From: CompatHelper Julia Date: Mon, 9 Sep 2024 00:17:11 +0000 Subject: [PATCH 17/85] CompatHelper: bump compat for JLD2 to 0.5, (keep existing compat) --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index ed525ebbd..540d53294 100644 --- a/Project.toml +++ b/Project.toml @@ -42,7 +42,7 @@ FastGaussQuadrature = "0.4.2, 1" FileIO = "1.2.2, 1.3, 1.4" FillArrays = "0.8.4, 0.9, 0.10, 0.11, 0.12, 0.13, 1" ForwardDiff = "0.10.10" -JLD2 = "0.1.11, 0.3, 0.4" +JLD2 = "0.1.11, 0.3, 0.4, 0.5" JSON = "0.21.0" LineSearches = "7.0.1" NLsolve = "4.3.0" From 78fcbf1ac92f64549b71f36cbfd382b4662f0582 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Tue, 17 Sep 2024 18:26:08 +1000 Subject: [PATCH 18/85] fix division of SymTracelessTensorValue by scalar --- src/TensorValues/Operations.jl | 7 +++++++ test/TensorValuesTests/OperationsTests.jl | 23 +++++++++++++++++++++-- 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/src/TensorValues/Operations.jl b/src/TensorValues/Operations.jl index 112157c96..1642f2af6 100644 --- a/src/TensorValues/Operations.jl +++ b/src/TensorValues/Operations.jl @@ -170,6 +170,13 @@ function (/)(a::MultiValue,b::Number) P(r) end +function (/)(a::SymTracelessTensorValue,b::Number) + r = _bc(/,a.data[1:end-1],b) + T = _eltype(/,r,a,b) + M = change_eltype(a,T) + M(r) +end + const _err = " with number is undefined for traceless tensors" function -(::SymTracelessTensorValue,::Number) error("Addition" *_err) end function +(::SymTracelessTensorValue,::Number) error("Subtraction"*_err) end diff --git a/test/TensorValuesTests/OperationsTests.jl b/test/TensorValuesTests/OperationsTests.jl index 6c93d91a3..d5f98b57d 100644 --- a/test/TensorValuesTests/OperationsTests.jl +++ b/test/TensorValuesTests/OperationsTests.jl @@ -245,6 +245,11 @@ c = t * 2 r = TensorValue(2, 4, 6, 8, 10, 12, 14, 16, 18) @test c == r +c = t / 2 +@test isa(c,TensorValue{3}) +r = TensorValue(.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5) +@test c == r + c = t + 2 @test isa(c,TensorValue{3,3,Int}) r = TensorValue(3, 4, 5, 6, 7, 8, 9, 10, 11) @@ -261,6 +266,11 @@ c = st * 2 r = SymTensorValue(2,4,6,10,12,18) @test c == r +c = st / 2 +@test isa(c,SymTensorValue{3}) +r = SymTensorValue(.5,1,1.5,2.5,3,4.5) +@test c == r + c = st + 2 @test isa(c,SymTensorValue{3}) r = SymTensorValue(3,4,5,7,8,11) @@ -277,6 +287,10 @@ c = qt * 2 r = SymTracelessTensorValue(2,4,6,10,12) @test c == r +c = qt / 2 +@test isa(c,SymTracelessTensorValue{3}) +r = SymTracelessTensorValue(.5,1,1.5,2.5,3) +@test c == r c = 2 * s4ot @test isa(c,SymFourthOrderTensorValue{2}) @@ -288,9 +302,14 @@ c = s4ot * 2 r = SymFourthOrderTensorValue(2,0,0, 0,1,0, 0,0,2) @test c == r -c = c + 0 +c = s4ot / 2 @test isa(c,SymFourthOrderTensorValue{2}) -r = SymFourthOrderTensorValue(2,0,0, 0,1,0, 0,0,2) +r = SymFourthOrderTensorValue(.5,0,0, 0,.25,0, 0,0,.5) +@test c == r + +c = s4ot + 0 +@test isa(c,SymFourthOrderTensorValue{2}) +r = SymFourthOrderTensorValue(1,0,0, 0,.5,0, 0,0,1) @test c == r # Dot product (simple contraction) From 9f6eb951802c96adc2b78a76f862cb023ac75207 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Thu, 19 Sep 2024 17:09:08 +1000 Subject: [PATCH 19/85] Fix time derivative of SymTensorValues --- src/ODEs/TimeDerivatives.jl | 17 +++++++----- test/ODEsTests/TimeDerivativesTests.jl | 36 ++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 6 deletions(-) diff --git a/src/ODEs/TimeDerivatives.jl b/src/ODEs/TimeDerivatives.jl index d14885900..06d7b65f9 100644 --- a/src/ODEs/TimeDerivatives.jl +++ b/src/ODEs/TimeDerivatives.jl @@ -121,15 +121,20 @@ function _time_derivative(T::Type{<:Real}, f, t, x) ForwardDiff.derivative(partial, t) end -function _time_derivative(T::Type{<:VectorValue}, f, t, x) +function _time_derivative(T::Type{<:MultiValue}, f, t, x) partial(t) = get_array(f(t)(x)) - VectorValue(ForwardDiff.derivative(partial, t)) + T(ForwardDiff.derivative(partial, t)) end -function _time_derivative(T::Type{<:TensorValue}, f, t, x) - partial(t) = get_array(f(t)(x)) - TensorValue(ForwardDiff.derivative(partial, t)) -end +#function _time_derivative(T::Type{<:VectorValue}, f, t, x) +# partial(t) = get_array(f(t)(x)) +# VectorValue(ForwardDiff.derivative(partial, t)) +#end +# +#function _time_derivative(T::Type{<:TensorValue}, f, t, x) +# partial(t) = get_array(f(t)(x)) +# TensorValue(ForwardDiff.derivative(partial, t)) +#end ########################################## # Specialisation for `TimeSpaceFunction` # diff --git a/test/ODEsTests/TimeDerivativesTests.jl b/test/ODEsTests/TimeDerivativesTests.jl index 612970dbd..28c80f6d6 100644 --- a/test/ODEsTests/TimeDerivativesTests.jl +++ b/test/ODEsTests/TimeDerivativesTests.jl @@ -6,6 +6,7 @@ using ForwardDiff using Gridap using Gridap.ODEs +using Gridap.TensorValues # First time derivative, scalar-valued f1(t) = x -> 5 * x[1] * x[2] + x[2]^2 * t^3 @@ -73,6 +74,41 @@ for (f, ∂tf) in ((f1, ∂tf1),) @test ∂t(F)(tv)(xv) ≈ ∂tf(tv)(xv) end +# First time derivative, symmetric tensor-valued +f1(t) = x -> SymTensorValue(x[1] * t, x[1] * x[2], x[2] * t^2) +∂tf1(t) = x -> SymTensorValue(x[1], zero(x[1]), 2 * x[2] * t) + +for (f, ∂tf) in ((f1, ∂tf1),) + dtf(t) = x -> SymTensorValue(ForwardDiff.derivative(t -> get_array(f(t)(x)), t)) + + tv = rand(Float64) + xv = Point(rand(Float64, 2)...) + @test ∂t(f)(tv)(xv) ≈ ∂tf(tv)(xv) + @test ∂t(f)(tv)(xv) ≈ dtf(tv)(xv) + + F = TimeSpaceFunction(f) + @test F(tv)(xv) ≈ f(tv)(xv) + @test ∂t(F)(tv)(xv) ≈ ∂tf(tv)(xv) +end + +# First time derivative, symmetric traceless tensor-valued +f1(t) = x -> SymTracelessTensorValue(x[1] * t, x[2] * t^2) +∂tf1(t) = x -> SymTracelessTensorValue(x[1], 2 * x[2] * t) + +for (f, ∂tf) in ((f1, ∂tf1),) + dtf(t) = x -> SymTensorValue(ForwardDiff.derivative(t -> get_array(f(t)(x)), t)) + + tv = rand(Float64) + xv = Point(rand(Float64, 2)...) + @test ∂t(f)(tv)(xv) ≈ ∂tf(tv)(xv) + @test ∂t(f)(tv)(xv) ≈ dtf(tv)(xv) + + F = TimeSpaceFunction(f) + @test F(tv)(xv) ≈ f(tv)(xv) + @test ∂t(F)(tv)(xv) ≈ ∂tf(tv)(xv) +end + + # Spatial derivatives ft(t) = x -> x[1]^2 * t + x[2] f = TimeSpaceFunction(ft) From 645dab6d7aef389abfff9af9c1842844711ec72c Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Sun, 13 Oct 2024 00:46:06 +1100 Subject: [PATCH 20/85] Started exploring benchmarks --- benchmark/Project.toml | 4 ++++ benchmark/benchmarks.jl | 28 ++++++++++++++++++++++++++++ benchmark/drivers.jl | 20 ++++++++++++++++++++ benchmark/run_benchmarks.jl | 15 +++++++++++++++ 4 files changed, 67 insertions(+) create mode 100644 benchmark/Project.toml create mode 100644 benchmark/benchmarks.jl create mode 100644 benchmark/drivers.jl create mode 100644 benchmark/run_benchmarks.jl diff --git a/benchmark/Project.toml b/benchmark/Project.toml new file mode 100644 index 000000000..03356cbf5 --- /dev/null +++ b/benchmark/Project.toml @@ -0,0 +1,4 @@ +[deps] +BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" +Gridap = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" +PkgBenchmark = "32113eaa-f34f-5b0d-bd6c-c81e245fc73d" diff --git a/benchmark/benchmarks.jl b/benchmark/benchmarks.jl new file mode 100644 index 000000000..5f47843fd --- /dev/null +++ b/benchmark/benchmarks.jl @@ -0,0 +1,28 @@ +using BenchmarkTools +using Gridap + +include("drivers.jl") + +const SUITE = BenchmarkGroup() + +ncells = 40 +for D in [2,3] + for order in [1,2,3] + basis_cases = [ + ("lagrangian",lagrangian,Float64,order), + ("vector_lagragian",lagrangian,VectorValue{D,Float64},order), + ("raviart_thomas",raviart_thomas,Float64,order-1), + ] + for (basis_name,basis,T,degree) in basis_cases + biform_cases = [ + ("mass",mass,2*order), + ("laplacian",laplacian,2*(order-1)), + ] + for (biform_name,biform,qdegree) in biform_cases + reffe = ReferenceFE(basis, T, degree) + name = "assembly_$(D)D_$(basis_name)_$(biform_name)_$(order)" + SUITE[name] = @benchmarkable bm_matrix_assembly(D,ncells,reffe,qdegree,biform) + end + end + end +end diff --git a/benchmark/drivers.jl b/benchmark/drivers.jl new file mode 100644 index 000000000..e62a14bfb --- /dev/null +++ b/benchmark/drivers.jl @@ -0,0 +1,20 @@ + +mass(u,v,dΩ) = ∫(u⋅v)dΩ +laplacian(u,v,dΩ) = ∫(∇(u)⊙∇(v))dΩ + +function bm_matrix_assembly( + D :: Integer, + n :: Integer, + reffe :: Tuple, + qdegree :: Integer, + biform :: Function +) + domain = Tuple(repeat((0,1), D)...) + partition = Tuple(repeat(n, D)...) + model = UnstructuredDiscreteModel(CartesianDiscreteModel(domain, partition)) + Ω = Triangulation(model) + dΩ = Measure(Ω,qdegree) + V = TestFESpace(model, reffe) + a(u,v) = biform(u,v,dΩ) + A = assemble_matrix(a, V, V) +end diff --git a/benchmark/run_benchmarks.jl b/benchmark/run_benchmarks.jl new file mode 100644 index 000000000..6c85739b3 --- /dev/null +++ b/benchmark/run_benchmarks.jl @@ -0,0 +1,15 @@ + +using Gridap +using PkgBenchmark +using DrWatson + +target = "raviart_thomas" + +results = judge( + Gridap, + BenchmarkConfig(juliacmd = `julia -O3`, id = target), + BenchmarkConfig(juliacmd = `julia -O3`, id = "master") +) + +outfile = projectdir("benchmark/results_$(target).json") +export_markdown(outfile,results) From 6ff1c1db0d0011d720e66ca712acc939cd841bc6 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 15 Oct 2024 11:10:17 +1100 Subject: [PATCH 21/85] Minor fixes --- benchmark/benchmarks.jl | 4 +++- benchmark/drivers.jl | 6 ++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/benchmark/benchmarks.jl b/benchmark/benchmarks.jl index 5f47843fd..440d84166 100644 --- a/benchmark/benchmarks.jl +++ b/benchmark/benchmarks.jl @@ -21,7 +21,9 @@ for D in [2,3] for (biform_name,biform,qdegree) in biform_cases reffe = ReferenceFE(basis, T, degree) name = "assembly_$(D)D_$(basis_name)_$(biform_name)_$(order)" - SUITE[name] = @benchmarkable bm_matrix_assembly(D,ncells,reffe,qdegree,biform) + SUITE[name] = @benchmarkable bm_matrix_assembly( + $(D),$(ncells),$(reffe),$(qdegree),$(biform) + ) end end end diff --git a/benchmark/drivers.jl b/benchmark/drivers.jl index e62a14bfb..6ac5bf6a5 100644 --- a/benchmark/drivers.jl +++ b/benchmark/drivers.jl @@ -1,4 +1,6 @@ +using Gridap.Geometry + mass(u,v,dΩ) = ∫(u⋅v)dΩ laplacian(u,v,dΩ) = ∫(∇(u)⊙∇(v))dΩ @@ -9,8 +11,8 @@ function bm_matrix_assembly( qdegree :: Integer, biform :: Function ) - domain = Tuple(repeat((0,1), D)...) - partition = Tuple(repeat(n, D)...) + domain = Tuple([repeat([0,1], D)...]) + partition = Tuple(fill(n, D)) model = UnstructuredDiscreteModel(CartesianDiscreteModel(domain, partition)) Ω = Triangulation(model) dΩ = Measure(Ω,qdegree) From 7c1891df01cfb2048c5244a4ed513c52cf371199 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 17 Oct 2024 16:57:16 +1100 Subject: [PATCH 22/85] Minor --- benchmark/run_benchmarks.jl | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/benchmark/run_benchmarks.jl b/benchmark/run_benchmarks.jl index 6c85739b3..05c82c694 100644 --- a/benchmark/run_benchmarks.jl +++ b/benchmark/run_benchmarks.jl @@ -1,3 +1,8 @@ +using Pkg + +Pkg.activate(@__DIR__) +Pkg.develop(PackageSpec(path = dirname(@__DIR__))) +Pkg.instantiate() using Gridap using PkgBenchmark From 7fd75de835c15cea21ecb392f439b9e004df92cc Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 17 Oct 2024 17:26:26 +1100 Subject: [PATCH 23/85] Minor --- benchmark/run_benchmarks.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/run_benchmarks.jl b/benchmark/run_benchmarks.jl index 05c82c694..fec741470 100644 --- a/benchmark/run_benchmarks.jl +++ b/benchmark/run_benchmarks.jl @@ -8,7 +8,7 @@ using Gridap using PkgBenchmark using DrWatson -target = "raviart_thomas" +target = "raviart-thomas" results = judge( Gridap, From 668ef36cef340f3d344da78493c57b98db6b0fad Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 17 Oct 2024 17:34:16 +1100 Subject: [PATCH 24/85] Minor --- benchmark/benchmarks.jl | 3 +-- benchmark/run_benchmarks.jl | 6 ++---- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/benchmark/benchmarks.jl b/benchmark/benchmarks.jl index 440d84166..e14106e0a 100644 --- a/benchmark/benchmarks.jl +++ b/benchmark/benchmarks.jl @@ -5,8 +5,7 @@ include("drivers.jl") const SUITE = BenchmarkGroup() -ncells = 40 -for D in [2,3] +for (D,ncells) in [(2,20),(3,8)] for order in [1,2,3] basis_cases = [ ("lagrangian",lagrangian,Float64,order), diff --git a/benchmark/run_benchmarks.jl b/benchmark/run_benchmarks.jl index fec741470..dcd3f1570 100644 --- a/benchmark/run_benchmarks.jl +++ b/benchmark/run_benchmarks.jl @@ -8,13 +8,11 @@ using Gridap using PkgBenchmark using DrWatson -target = "raviart-thomas" - results = judge( Gridap, - BenchmarkConfig(juliacmd = `julia -O3`, id = target), + BenchmarkConfig(juliacmd = `julia -O3`), # target -> current branch BenchmarkConfig(juliacmd = `julia -O3`, id = "master") ) -outfile = projectdir("benchmark/results_$(target).json") +outfile = normpath(@__DIR__,"results_$(target).json") export_markdown(outfile,results) From a3bf057df0272855206ea5978dc3c51815fb32eb Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 17 Oct 2024 18:46:34 +1100 Subject: [PATCH 25/85] Added github workflow --- .github/workflows/benchmark.yml | 56 +++++++++++++++++++++++++++++++++ benchmark/run_benchmarks.jl | 22 +++++++++---- 2 files changed, 72 insertions(+), 6 deletions(-) create mode 100644 .github/workflows/benchmark.yml diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 000000000..e7c29485a --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,56 @@ +name: Benchmarks + +on: + workflow_dispatch: + inputs: + target: + description: 'Target branch' + required: true + type: string + base: + description: 'Base branch' + required: true + default: 'master' + type: string + +jobs: + benchmark: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: + - ubuntu-latest + version: + - '1.10' + arch: + - x64 + steps: + - uses: actions/checkout@v4 + - uses: julia-actions/setup-julia@v2 + with: + version: ${{ matrix.version }} + arch: ${{ matrix.arch }} + - uses: actions/cache@v4 + env: + cache-name: cache-artifacts + with: + path: ~/.julia/artifacts + key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} + restore-keys: | + ${{ runner.os }}-test-${{ env.cache-name }}- + ${{ runner.os }}-test- + ${{ runner.os }}- + - uses: julia-actions/julia-buildpkg@v1 + - name: Install Gridap in main environment + run: julia -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()' + - name: Install dependencies + run: julia --project=benchmark/ -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()' + - name: Run benchmarks + run: julia --project=benchmark/ --color=yes benchmark/run_benchmarks.jl + env: + BM_BASE: ${{ github.event.inputs.base }} + BM_TARGET: ${{ github.event.inputs.target }} + - uses: actions/upload-artifact@v4 + with: + name: benchmarks + path: benchmark/benchmark_results.md \ No newline at end of file diff --git a/benchmark/run_benchmarks.jl b/benchmark/run_benchmarks.jl index dcd3f1570..5f939cb1a 100644 --- a/benchmark/run_benchmarks.jl +++ b/benchmark/run_benchmarks.jl @@ -6,13 +6,23 @@ Pkg.instantiate() using Gridap using PkgBenchmark -using DrWatson -results = judge( - Gridap, - BenchmarkConfig(juliacmd = `julia -O3`), # target -> current branch - BenchmarkConfig(juliacmd = `julia -O3`, id = "master") +config_kwargs = (; + juliacmd = `julia -O3`, ) -outfile = normpath(@__DIR__,"results_$(target).json") +if haskey(ENV,"BM_TARGET") + target = BenchmarkConfig(config_kwargs..., id = ENV["BM_TARGET"]) +else + target = BenchmarkConfig(config_kwargs...) +end + +if haskey(ENV,"BM_BASE") + base = BenchmarkConfig(config_kwargs..., id = ENV["BM_BASE"]) +else + base = BenchmarkConfig(config_kwargs..., id = "master") +end + +results = judge(Gridap, target, base) +outfile = normpath(@__DIR__,"benchmark_results.md") export_markdown(outfile,results) From 3c00c10c06841763a6006c1781af128772664ad1 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 17 Oct 2024 18:52:08 +1100 Subject: [PATCH 26/85] Minor --- benchmark/run_benchmarks.jl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/benchmark/run_benchmarks.jl b/benchmark/run_benchmarks.jl index 5f939cb1a..db3cc5fa3 100644 --- a/benchmark/run_benchmarks.jl +++ b/benchmark/run_benchmarks.jl @@ -11,15 +11,15 @@ config_kwargs = (; juliacmd = `julia -O3`, ) -if haskey(ENV,"BM_TARGET") +if haskey(ENV,"BM_TARGET") # Provided by CI workflow target = BenchmarkConfig(config_kwargs..., id = ENV["BM_TARGET"]) -else +else # Default to the current commit target = BenchmarkConfig(config_kwargs...) end -if haskey(ENV,"BM_BASE") +if haskey(ENV,"BM_BASE") # Provided by CI workflow base = BenchmarkConfig(config_kwargs..., id = ENV["BM_BASE"]) -else +else # Default to master base = BenchmarkConfig(config_kwargs..., id = "master") end From 386b52ca43ca16b95cf03fe21f0005db8996cb95 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Fri, 18 Oct 2024 12:41:48 +1100 Subject: [PATCH 27/85] clean real and imag implementation on MultiValues --- src/TensorValues/Operations.jl | 20 ++++++++++++-------- src/TensorValues/VectorValueTypes.jl | 3 --- test/TensorValuesTests/OperationsTests.jl | 6 ++++++ 3 files changed, 18 insertions(+), 11 deletions(-) diff --git a/src/TensorValues/Operations.jl b/src/TensorValues/Operations.jl index 1642f2af6..d6965b381 100644 --- a/src/TensorValues/Operations.jl +++ b/src/TensorValues/Operations.jl @@ -618,17 +618,21 @@ end @inline norm(u::MultiValue{Tuple{0},T}) where T = sqrt(zero(T)) ############################################################### -# conj +# conj, real, imag ############################################################### -function conj(a::T) where {T<:MultiValue} - r = map(conj, a.data) - T(r) -end +for op in (:conj,:real,:imag) + @eval begin + function ($op)(a::T) where {T<:MultiValue} + r = map($op, a.data) + T(r) + end -function conj(a::SymTracelessTensorValue) - r = map(conj, a.data) - SymTracelessTensorValue(r[1:end-1]) + function ($op)(a::SymTracelessTensorValue) + r = map($op, a.data) + SymTracelessTensorValue(r[1:end-1]) + end + end end ############################################################### diff --git a/src/TensorValues/VectorValueTypes.jl b/src/TensorValues/VectorValueTypes.jl index f727f3d09..8c6edab83 100644 --- a/src/TensorValues/VectorValueTypes.jl +++ b/src/TensorValues/VectorValueTypes.jl @@ -92,9 +92,6 @@ change_eltype(::VectorValue{D,T1},::Type{T2}) where {D,T1,T2} = change_eltype(Ve get_array(arg::VectorValue{D,T}) where {D,T} = convert(SVector{D,T}, arg) -real(x::VectorValue{D,<:Complex}) where {D} = VectorValue{D}(real.(x.data)) -imag(x::VectorValue{D,<:Complex}) where {D} = VectorValue{D}(imag.(x.data)) - ############################################################### # Introspection (VectorValue) ############################################################### diff --git a/test/TensorValuesTests/OperationsTests.jl b/test/TensorValuesTests/OperationsTests.jl index d5f98b57d..f7f8e98db 100644 --- a/test/TensorValuesTests/OperationsTests.jl +++ b/test/TensorValuesTests/OperationsTests.jl @@ -600,6 +600,8 @@ v = VectorValue(1,0) @test v == v' @test conj(v) == v' +# tr + t = TensorValue(1,2,3,4) @test tr(t) == 5 @@ -876,10 +878,14 @@ b = 4.0 - 3.0*im @test real(VectorValue(1+1im)) == VectorValue(1) @test real(VectorValue(1+1im, 1+1im)) == VectorValue(1, 1) @test real(VectorValue(1+1im, 1+1im, 1+1im)) == VectorValue(1, 1, 1) +@test real(TensorValue(1+1im, 1+1im, 1+1im, 1+1im)) == TensorValue(1, 1, 1, 1) +@test real(SymTracelessTensorValue(1+1im, 1+1im)) == SymTracelessTensorValue(1, 1) @test imag(VectorValue(1+1im)) == VectorValue(1) @test imag(VectorValue(1+1im, 1+1im)) == VectorValue(1, 1) @test imag(VectorValue(1+1im, 1+1im, 1+1im)) == VectorValue(1, 1, 1) +@test imag(TensorValue(1+1im, 1+1im, 1+1im, 1+1im)) == TensorValue(1, 1, 1, 1) +@test imag(SymTracelessTensorValue(1+1im, 1+1im)) == SymTracelessTensorValue(1, 1) # Broadcast a = VectorValue(1,2,3) From a49beeefd86fa37b97d42f20ad84e33a877228b6 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Fri, 18 Oct 2024 15:22:19 +1100 Subject: [PATCH 28/85] fix MultiValue determinent det would always crash on non square tensors. --- src/TensorValues/Operations.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/TensorValues/Operations.jl b/src/TensorValues/Operations.jl index d6965b381..aa09a52c3 100644 --- a/src/TensorValues/Operations.jl +++ b/src/TensorValues/Operations.jl @@ -531,7 +531,7 @@ cross(a::MultiValue,b::MultiValue) = error("Cross product only defined for R2 an # Linear Algebra ############################################################### -det(a::MultiValue{Tuple{D1,D2}}) where {D1,D2} = det(get_array(a)) +det(a::MultiValue{Tuple{D,D}}) where {D} = det(get_array(a)) det(a::MultiValue{Tuple{1,1}}) = a[1] From 4b5127c5599ccebb427caa14375d63826ea824c1 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 18 Oct 2024 16:18:02 +1100 Subject: [PATCH 29/85] Added support for Aqua.jl --- NEWS.md | 1 + Project.toml | 4 +++- test/Aqua.jl | 8 ++++++++ test/runtests.jl | 2 ++ 4 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 test/Aqua.jl diff --git a/NEWS.md b/NEWS.md index 98d2d211f..d3a2e5c18 100644 --- a/NEWS.md +++ b/NEWS.md @@ -11,6 +11,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Added MacroFElements. These are defined as having the basis/dof-basis of a FESpace created on top of a RefinementRule. Since PR[#1024](https://github.com/gridap/Gridap.jl/pull/1024). - Added Barycentric refinement rule in 2D and 3D. Added Simplexify refinement rule. Since PR[#1024](https://github.com/gridap/Gridap.jl/pull/1024). +- Added support for benchmarking, through PkgBenchmark.jl. Since PR[#1039](https://github.com/gridap/Gridap.jl/pull/1039). ## [0.18.6] - 2024-08-29 diff --git a/Project.toml b/Project.toml index 540d53294..49303019f 100644 --- a/Project.toml +++ b/Project.toml @@ -33,6 +33,7 @@ WriteVTK = "64499a7a-5c06-52f2-abe2-ccb03c286192" [compat] AbstractTrees = "0.3.3, 0.4" +Aqua = "0.8" BSON = "0.2.5, 0.3" BlockArrays = "0.12.12, 0.13, 0.14, 0.15, 0.16" Combinatorics = "1.0.0" @@ -56,7 +57,8 @@ WriteVTK = "1.12.0" julia = "1.3" [extras] +Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [targets] -test = ["Test"] +test = ["Aqua","Test"] diff --git a/test/Aqua.jl b/test/Aqua.jl new file mode 100644 index 000000000..38fd18af5 --- /dev/null +++ b/test/Aqua.jl @@ -0,0 +1,8 @@ + +using Gridap +using Aqua + +Aqua.test_all( + Gridap, + ambiguities = false, +) diff --git a/test/runtests.jl b/test/runtests.jl index 98342027a..1963646a7 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -34,4 +34,6 @@ using Test @time @testset "Adaptivity" begin include("AdaptivityTests/runtests.jl") end +@time @testset "Aqua" begin include("Aqua.jl") end + end # module From cc22ec0c61da4f3ec0154f44e90f3763c92cc4d1 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Mon, 21 Oct 2024 11:45:25 +1100 Subject: [PATCH 30/85] better API for double_contraction of tensors --- src/TensorValues/Operations.jl | 115 +++++++++++++++++----- test/TensorValuesTests/OperationsTests.jl | 69 +++++++++++-- 2 files changed, 149 insertions(+), 35 deletions(-) diff --git a/src/TensorValues/Operations.jl b/src/TensorValues/Operations.jl index aa09a52c3..3e0404979 100644 --- a/src/TensorValues/Operations.jl +++ b/src/TensorValues/Operations.jl @@ -350,19 +350,12 @@ end Meta.parse(str) end -@generated function inner(a::SymFourthOrderTensorValue{D}, b::AbstractSymTensorValue{D}) where D - str = "" - for i in 1:D - for j in i:D - for k in 1:D - for l in 1:D - str *= "+ a[$i,$j,$k,$l]*b[$k,$l]" - end - end - str *= ", " - end - end - Meta.parse("SymTensorValue{D}($str)") +function inner(a::SymFourthOrderTensorValue{D}, b::AbstractSymTensorValue{D}) where D + double_contraction(a,b) +end + +function inner(a::AbstractSymTensorValue{D}, b::SymFourthOrderTensorValue{D}) where D + double_contraction(a,b) end function inner(a::SymFourthOrderTensorValue{D},b::MultiValue{Tuple{D,D}}) where D @@ -375,7 +368,25 @@ const ⊙ = inner # Double Contractions w/ products ############################################################### -# a_i = b_ijk*c_jk +function double_contraction(a::MultiValue{S1}, b::MultiValue{S2}) where {S1<:Tuple,S2<:Tuple} + L1, L2 = length(S1.types), length(S2.types) + if L1<2 || L2<2 + @unreachable "Double contraction is only define for tensors of order more than 2, got $L1 and $L2." + end + + D1, E1, D2, E2 = S1.types[end-1], S1.types[end], S2.types[1], S2.types[2] + if D1 != D2 || E1 != E2 + throw(DimensionMismatch("the last two dimensions of the first argument must match the first two of the second argument, got ($D1,$E1) ≠ ($D2,$E2).")) + end + @notimplemented +end + +# c_i = a_ij*b_ij +function double_contraction(a::MultiValue{S}, b::MultiValue{S}) where {S<:Tuple{D1,D2}} where {D1,D2} + inner(a,b) +end + +# c_i = a_ijk*b_jk @generated function double_contraction(a::A, b::B) where {A<:MultiValue{Tuple{D1,D2,D3}},B<:MultiValue{Tuple{D2,D3}}} where {D1,D2,D3} ss = String[] for i in 1:D1 @@ -386,7 +397,18 @@ const ⊙ = inner Meta.parse("VectorValue{$D1}(($str))") end -# a_ijpm = b_ijkl*c_klpm (3D) +# c_k = a_ij*b_ijk +@generated function double_contraction(a::A, b::B) where {A<:MultiValue{Tuple{D1,D2}},B<:MultiValue{Tuple{D1,D2,D3}}} where {D1,D2,D3} + ss = String[] + for k in 1:D3 + s = join([ "a[$i,$j]*b[$i,$j,$k]+" for i in 1:D1 for j in 1:D2]) + push!(ss,s[1:(end-1)]*", ") + end + str = join(ss) + Meta.parse("VectorValue{$D3}(($str))") +end + +# c_ijpm = a_ijkl*b_klpm (3D) @generated function double_contraction(a::A, b::B) where {A<:SymFourthOrderTensorValue{3},B<:SymFourthOrderTensorValue{3}} Sym4TensorIndexing = [1111, 1121, 1131, 1122, 1132, 1133, 2111, 2121, 2131, 2122, 2132, 2133, @@ -403,7 +425,7 @@ end Meta.parse("SymFourthOrderTensorValue{3}($str)") end -# a_ijpm = b_ijkl*c_klpm (general case) +# c_ijpm = a_ijkl*b_klpm (general case) @generated function double_contraction(a::SymFourthOrderTensorValue{D}, b::SymFourthOrderTensorValue{D}) where D str = "" for j in 1:D @@ -424,32 +446,73 @@ end Meta.parse("SymFourthOrderTensorValue{D}($str)") end -# a_ilm = b_ijk*c_jklm -@generated function double_contraction(a::A,b::B) where {A<:ThirdOrderTensorValue{D},B<:SymFourthOrderTensorValue{D}} where D +# c_ilm = a_ijk*b_jklm +@generated function double_contraction(a::ThirdOrderTensorValue{D1,D,D},b::SymFourthOrderTensorValue{D}) where {D1,D} ss = String[] for m in 1:D for l in 1:D - for i in 1:D + for i in 1:D1 s = join([ "a[$i,$j,$k]*b[$j,$k,$l,$m]+" for j in 1:D for k in 1:D]) push!(ss,s[1:(end-1)]*", ") end end end str = join(ss) - Meta.parse("ThirdOrderTensorValue{$D}($str)") + Meta.parse("ThirdOrderTensorValue{$D1,$D,$D}($str)") end -# a_il = b_ijk*c_jkl -@generated function double_contraction(a::A,b::B) where {A<:ThirdOrderTensorValue{D},B<:ThirdOrderTensorValue{D}} where D +# c_ij = a_ijkl*b_kl +@generated function double_contraction(a::SymFourthOrderTensorValue{D}, b::AbstractSymTensorValue{D}) where D + str = "" + for i in 1:D + for j in i:D + for k in 1:D + str *= "+ a[$i,$j,$k,$k]*b[$k,$k]" + end + str *= " + 2*(" + for k in 1:D + for l in k+1:D + str *= "+ a[$i,$j,$k,$l]*b[$k,$l]" + end + end + str *= "), " + end + end + Meta.parse("SymTensorValue{D}($str)") +end + +# c_kl = a_ij*b_ijkl +@generated function double_contraction(a::AbstractSymTensorValue{D}, b::SymFourthOrderTensorValue{D}) where D + str = "" + for k in 1:D + for l in k:D + for i in 1:D + str *= "+ a[$i,$i]*b[$i,$i,$k,$l]" + end + str *= " + 2*(" + for i in 1:D + for j in i+1:D + str *= "+ a[$i,$j]*b[$i,$j,$k,$l]" + end + end + str *= "), " + end + end + Meta.parse("SymTensorValue{D}($str)") +end + + +# c_il = a_ijk*b_jkl +@generated function double_contraction(a::ThirdOrderTensorValue{D1,D,E},b::ThirdOrderTensorValue{D,E,D2}) where {D1,D,E,D2} ss = String[] - for l in 1:D - for i in 1:D - s = join([ "a[$i,$j,$k]*b[$j,$k,$l]+" for j in 1:D for k in 1:D]) + for l in 1:D2 + for i in 1:D1 + s = join([ "a[$i,$j,$k]*b[$j,$k,$l]+" for j in 1:D for k in 1:E]) push!(ss,s[1:(end-1)]*", ") end end str = join(ss) - Meta.parse("TensorValue{$D}($str)") + Meta.parse("TensorValue{$D1,$D2}($str)") end const ⋅² = double_contraction diff --git a/test/TensorValuesTests/OperationsTests.jl b/test/TensorValuesTests/OperationsTests.jl index f7f8e98db..8300c8f64 100644 --- a/test/TensorValuesTests/OperationsTests.jl +++ b/test/TensorValuesTests/OperationsTests.jl @@ -728,6 +728,13 @@ odot_contraction_array = 1*a[:,1,1] + 2*a[:,1,2] + 3*a[:,1,3] + 2*a[:,2,1] + @test odot_contraction == odot_contraction_array # double Contractions w/ products + +v = VectorValue(1:2...) +t1 = TensorValue(1:4...) +t2 = TensorValue(1:9...) +@test_throws ErrorException double_contraction(t1,v) +@test_throws DimensionMismatch double_contraction(t1,t2) + Sym4TensorIndexing = [1111, 1121, 1131, 1122, 1132, 1133, 2111, 2121, 2131, 2122, 2132, 2133, 3111, 3121, 3131, 3122, 3132, 3133, 2211, 2221, 2231, 2222, 2232, 2233, 2311, 2321, 2331, 2322, 2332, 2333, 3311, 3321, 3331, 3322, 3332, 3333] @@ -839,15 +846,59 @@ vals[3,:,:] .= [1 0 0 0 2 1 0 1 3]; t1 = ThirdOrderTensorValue(vals ...) -@test (t1 ⋅² t1)[1,1] == sum(vals[1,i,j] .* vals[i,j,1] for i in 1:3 for j in 1:3) -@test (t1 ⋅² t1)[2,1] == sum(vals[2,i,j] .* vals[i,j,1] for i in 1:3 for j in 1:3) -@test (t1 ⋅² t1)[3,1] == sum(vals[3,i,j] .* vals[i,j,1] for i in 1:3 for j in 1:3) -@test (t1 ⋅² t1)[1,2] == sum(vals[1,i,j] .* vals[i,j,2] for i in 1:3 for j in 1:3) -@test (t1 ⋅² t1)[2,2] == sum(vals[2,i,j] .* vals[i,j,2] for i in 1:3 for j in 1:3) -@test (t1 ⋅² t1)[3,2] == sum(vals[3,i,j] .* vals[i,j,2] for i in 1:3 for j in 1:3) -@test (t1 ⋅² t1)[1,3] == sum(vals[1,i,j] .* vals[i,j,3] for i in 1:3 for j in 1:3) -@test (t1 ⋅² t1)[2,3] == sum(vals[2,i,j] .* vals[i,j,3] for i in 1:3 for j in 1:3) -@test (t1 ⋅² t1)[3,3] == sum(vals[3,i,j] .* vals[i,j,3] for i in 1:3 for j in 1:3) +@test (t1 ⋅² t1)[1,1] == sum(vals[1,i,j] * vals[i,j,1] for i in 1:3 for j in 1:3) +@test (t1 ⋅² t1)[2,1] == sum(vals[2,i,j] * vals[i,j,1] for i in 1:3 for j in 1:3) +@test (t1 ⋅² t1)[3,1] == sum(vals[3,i,j] * vals[i,j,1] for i in 1:3 for j in 1:3) +@test (t1 ⋅² t1)[1,2] == sum(vals[1,i,j] * vals[i,j,2] for i in 1:3 for j in 1:3) +@test (t1 ⋅² t1)[2,2] == sum(vals[2,i,j] * vals[i,j,2] for i in 1:3 for j in 1:3) +@test (t1 ⋅² t1)[3,2] == sum(vals[3,i,j] * vals[i,j,2] for i in 1:3 for j in 1:3) +@test (t1 ⋅² t1)[1,3] == sum(vals[1,i,j] * vals[i,j,3] for i in 1:3 for j in 1:3) +@test (t1 ⋅² t1)[2,3] == sum(vals[2,i,j] * vals[i,j,3] for i in 1:3 for j in 1:3) +@test (t1 ⋅² t1)[3,3] == sum(vals[3,i,j] * vals[i,j,3] for i in 1:3 for j in 1:3) + +# a_il = b_ijk*c_jkl +t1 = ThirdOrderTensorValue{3,2,2}(1:12...) +t2 = ThirdOrderTensorValue{2,2,1}(1:4...) +t1_double_t2 = t1 ⋅² t2 +@test isa(t1_double_t2, TensorValue{3,1}) +@test (t1 ⋅² t2)[1,1] == sum(t1[1,j,k] * t2[j,k,1] for j in 1:2 for k in 1:2) +@test (t1 ⋅² t2)[2,1] == sum(t1[2,j,k] * t2[j,k,1] for j in 1:2 for k in 1:2) +@test (t1 ⋅² t2)[3,1] == sum(t1[3,j,k] * t2[j,k,1] for j in 1:2 for k in 1:2) + +# a_kl = b_ij*c_ijkl +t1 = SymTensorValue{3}(1:6...) +t2 = SymFourthOrderTensorValue(1:36 ...) +v11 = sum(t1[i,j]*t2[i,j,1,1] for i in 1:3 for j in 1:3); +v12 = sum(t1[i,j]*t2[i,j,1,2] for i in 1:3 for j in 1:3); +v13 = sum(t1[i,j]*t2[i,j,1,3] for i in 1:3 for j in 1:3); +v22 = sum(t1[i,j]*t2[i,j,2,2] for i in 1:3 for j in 1:3); +v23 = sum(t1[i,j]*t2[i,j,2,3] for i in 1:3 for j in 1:3); +v33 = sum(t1[i,j]*t2[i,j,3,3] for i in 1:3 for j in 1:3); +t1_double_t2 = t1 ⋅² t2 +@test v11 == (t1_double_t2)[1,1] +@test v12 == (t1_double_t2)[1,2] +@test v13 == (t1_double_t2)[1,3] +@test v22 == (t1_double_t2)[2,2] +@test v23 == (t1_double_t2)[2,3] +@test v33 == (t1_double_t2)[3,3] + +# a_ij = b_ijkl*c_kl +t1 = SymFourthOrderTensorValue(1:36...) +t2 = SymTensorValue{3}(1:6...) +v11 = sum(t1[1,1,k,l]*t2[k,l] for k in 1:3 for l in 1:3); +v12 = sum(t1[1,2,k,l]*t2[k,l] for k in 1:3 for l in 1:3); +v13 = sum(t1[1,3,k,l]*t2[k,l] for k in 1:3 for l in 1:3); +v22 = sum(t1[2,2,k,l]*t2[k,l] for k in 1:3 for l in 1:3); +v23 = sum(t1[2,3,k,l]*t2[k,l] for k in 1:3 for l in 1:3); +v33 = sum(t1[3,3,k,l]*t2[k,l] for k in 1:3 for l in 1:3); +t1_double_t2 = t1 ⋅² t2 +@test v11 == (t1_double_t2)[1,1] +@test v12 == (t1_double_t2)[1,2] +@test v13 == (t1_double_t2)[1,3] +@test v22 == (t1_double_t2)[2,2] +@test v23 == (t1_double_t2)[2,3] +@test v33 == (t1_double_t2)[3,3] + # a_il = b_ij*c_jl v1 = [1 2 3 From 955e32dd1aa235975893027a33b07ceec8020884 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Mon, 21 Oct 2024 11:50:33 +1100 Subject: [PATCH 31/85] fix MultiValue determinent similar to a49beeef --- src/TensorValues/Operations.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/TensorValues/Operations.jl b/src/TensorValues/Operations.jl index 3e0404979..44c7ecc5f 100644 --- a/src/TensorValues/Operations.jl +++ b/src/TensorValues/Operations.jl @@ -612,7 +612,7 @@ function det(a::MultiValue{Tuple{3,3}}) (a_11*a_23*a_32 + a_12*a_21*a_33 + a_13*a_22*a_31) end -inv(a::MultiValue{Tuple{D1,D2}}) where {D1,D2} = TensorValue(inv(get_array(a))) +inv(a::MultiValue{Tuple{D,D}}) where D = TensorValue(inv(get_array(a))) # those still have better perf than the D=2,3 specialization below inv(a::AbstractSymTensorValue{D}) where D = SymTensorValue(inv(get_array(a))) inv(a::SymTracelessTensorValue{2}) = SymTracelessTensorValue(inv(get_array(a))) From 29c53af8957ddff009657679d425c82c717c2d99 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Thu, 24 Oct 2024 12:17:11 +1100 Subject: [PATCH 32/85] clarify Mutable and mutable api Add Mutable and mutable abstract method to clarify they belong to MultiValue's API and add test for them. --- src/TensorValues/MultiValueTypes.jl | 11 ++++ .../SymFourthOrderTensorValueTypes.jl | 4 ++ test/TensorValuesTests/TypesTests.jl | 55 +++++++++++++++++++ 3 files changed, 70 insertions(+) diff --git a/src/TensorValues/MultiValueTypes.jl b/src/TensorValues/MultiValueTypes.jl index ce4fdd9b9..33379f43b 100644 --- a/src/TensorValues/MultiValueTypes.jl +++ b/src/TensorValues/MultiValueTypes.jl @@ -27,6 +27,17 @@ end change_eltype(::Type{<:Number},::Type{T}) where {T} = T change_eltype(::Number,::Type{T2}) where {T2} = change_eltype(Number,T2) +Mutable(::Type{MultiValue}) = @abstractmethod +Mutable(::MultiValue) = Mutable(MultiValue) +mutable(a::MultiValue) = @abstractmethod + +""" + num_components(::Type{<:Number}) + num_components(a::Number) + +Total number of components of a `Number` or `MultiValue`, that is 1 for scalars +and the product of the size dimensions for a `MultiValue`. This is the same as `length`. +""" num_components(::Type{<:Number}) = 1 num_components(::Number) = num_components(Number) num_components(T::Type{<:MultiValue}) = @unreachable "$T type is too abstract to count its components, provide a (parametric) concrete type" diff --git a/src/TensorValues/SymFourthOrderTensorValueTypes.jl b/src/TensorValues/SymFourthOrderTensorValueTypes.jl index f6e425ef6..174652834 100644 --- a/src/TensorValues/SymFourthOrderTensorValueTypes.jl +++ b/src/TensorValues/SymFourthOrderTensorValueTypes.jl @@ -97,6 +97,10 @@ end rand(rng::AbstractRNG,::Random.SamplerType{<:SymFourthOrderTensorValue{D,T,L}}) where {D,T,L} = SymFourthOrderTensorValue{D,T}(Tuple(rand(rng, SVector{L,T}))) +Mutable(::Type{<:SymFourthOrderTensorValue{D,T}}) where {D,T} = @notimplemented +Mutable(::SymFourthOrderTensorValue{D,T}) where {D,T} = Mutable(SymFourthOrderTensorValue{D,T}) +mutable(a::SymFourthOrderTensorValue{D}) where D = @notimplemented + change_eltype(::Type{SymFourthOrderTensorValue{D,T1}},::Type{T2}) where {D,T1,T2} = SymFourthOrderTensorValue{D,T2} change_eltype(::Type{SymFourthOrderTensorValue{D,T1,L}},::Type{T2}) where {D,T1,T2,L} = SymFourthOrderTensorValue{D,T2,L} change_eltype(::SymFourthOrderTensorValue{D,T1,L},::Type{T2}) where {D,T1,T2,L} = change_eltype(SymFourthOrderTensorValue{D,T1,L},T2) diff --git a/test/TensorValuesTests/TypesTests.jl b/test/TensorValuesTests/TypesTests.jl index c9e605cc9..fefce93e1 100644 --- a/test/TensorValuesTests/TypesTests.jl +++ b/test/TensorValuesTests/TypesTests.jl @@ -457,12 +457,67 @@ m = mutable(v) @test m == get_array(v) @test isa(m,MMatrix) +v = SymTensorValue{2}(1,2,3) +m = mutable(v) +@test m == get_array(v) +@test isa(m,MMatrix) + +v = SymTracelessTensorValue{2}(1,2) +m = mutable(v) +@test m == get_array(v) +@test isa(m,MMatrix) + +v = ThirdOrderTensorValue{2,1,3}(1:6...) +m = mutable(v) +@test m == get_array(v) +@test isa(m,MArray) + +v = SymFourthOrderTensorValue{2}(1:9...) +@test_throws ErrorException mutable(v) #notimplemented + M = Mutable(VectorValue{3,Int}) @test M == MVector{3,Int} m = zero(M) v = VectorValue(m) @test isa(v,VectorValue{3,Int}) +M2 = Mutable(v) +@test M == M2 + +M = Mutable(TensorValue{3,3,Int}) +@test M == MMatrix{3,3,Int} +m = zero(M) +v = TensorValue(m) +@test isa(v,TensorValue{3,3,Int}) +M2 = Mutable(v) +@test M == M2 + +M = Mutable(SymTensorValue{3,Int}) +@test M == MMatrix{3,3,Int} +m = zero(M) +v = SymTensorValue(m) +@test isa(v,SymTensorValue{3,Int}) +M2 = Mutable(v) +@test M == M2 + +M = Mutable(SymTracelessTensorValue{3,Int}) +@test M == MMatrix{3,3,Int} +m = zero(M) +v = SymTracelessTensorValue(m) +@test isa(v,SymTracelessTensorValue{3,Int}) +M2 = Mutable(v) +@test M == M2 + +M = Mutable(ThirdOrderTensorValue{3,1,2,Int}) +@test M == MArray{Tuple{3,1,2},Int} +m = zero(M) +v = ThirdOrderTensorValue(m) +@test isa(v,ThirdOrderTensorValue{3,1,2,Int}) +M2 = Mutable(v) +@test M == M2 + +@test_throws ErrorException Mutable(SymFourthOrderTensorValue{2,Int}) # @notimplemented +@test_throws ErrorException Mutable(MultiValue) # @abstractmethod @test num_components(Int) == 1 @test num_components(Float64) == 1 From 6922cf438beed115fc4ecf17f147170aa80cc7ba Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Thu, 24 Oct 2024 12:19:09 +1100 Subject: [PATCH 33/85] Documenting Gridap.TensorValues --- src/TensorValues/MultiValueTypes.jl | 63 +++++++++++-- src/TensorValues/Operations.jl | 88 ++++++++++++++++++- .../SymFourthOrderTensorValueTypes.jl | 13 ++- src/TensorValues/SymTensorValueTypes.jl | 12 ++- .../SymTracelessTensorValueTypes.jl | 12 ++- src/TensorValues/TensorValueTypes.jl | 11 ++- src/TensorValues/TensorValues.jl | 49 ++++++++++- .../ThirdOrderTensorValueTypes.jl | 6 +- src/TensorValues/VectorValueTypes.jl | 4 +- 9 files changed, 236 insertions(+), 22 deletions(-) diff --git a/src/TensorValues/MultiValueTypes.jl b/src/TensorValues/MultiValueTypes.jl index 33379f43b..d95047aa7 100644 --- a/src/TensorValues/MultiValueTypes.jl +++ b/src/TensorValues/MultiValueTypes.jl @@ -3,7 +3,15 @@ ############################################################### """ -Type representing a multi-dimensional value + MultiValue{S,T,N,L} <: Number + +Abstract type representing a multi-dimensional number value. The parameters are analog to that of StaticArrays.jl: +- `S` is a Tuple type holding the size of the tensor, e.g. Tuple{3} for a 3d vector or Tuple{2,4} for a 2 rows and 4 columns tensor, +- `T` is the type of the scalar components, should be subtype of `Number`, +- `N` is the order of the tensor, the length of `S`, +- `L` is the number of components stored internally. + +`MultiValue`s are immutable. See [`TensorValues`](@ref) for more details on usage. """ abstract type MultiValue{S,T,N,L} <: Number end @@ -24,11 +32,37 @@ end # Other constructors and conversions implemented for more generic types ############################################################### +""" + change_eltype(m::Number,::Type{T2}) + change_eltype(M::Type{<:Number},::Type{T2}) + +For multivalues, returns `M` or `typeof(m)` but with the component type (`MultiValue`'s parametric type `T`) changed to `T2`. + +For scalars (or any non MultiValue number), `change_eltype` returns T2. +""" change_eltype(::Type{<:Number},::Type{T}) where {T} = T change_eltype(::Number,::Type{T2}) where {T2} = change_eltype(Number,T2) + +""" + Mutable(T::Type{<:MultiValue}) -> ::Type{<:MArray} + Mutable(a::MultiValue) + +Return the concrete `MArray` type (defined by `StaticArrays.jl`) corresponding +to the `MultiValue` type T or array size and type of `a`. + +See also [`mutable`](@ref). +""" Mutable(::Type{MultiValue}) = @abstractmethod Mutable(::MultiValue) = Mutable(MultiValue) + +""" + mutable(a::MultiValue) + +Converts `a` into an array of type `MArray` defined by `StaticArrays.jl`. + +See also [`Mutable`](@ref). +""" mutable(a::MultiValue) = @abstractmethod """ @@ -43,8 +77,14 @@ num_components(::Number) = num_components(Number) num_components(T::Type{<:MultiValue}) = @unreachable "$T type is too abstract to count its components, provide a (parametric) concrete type" """ -Number of independant components, that is `num_components(::Type{T})` minus the -number of components determined from others by symmetries or constraints. + num_indep_components(::Type{<:Number}) + num_indep_components(a::Number) + +Number of independant components of a `Number`, that is `num_components` +minus the number of components determined from others by symmetries or constraints. + +For example, a `TensorValue{3,3}` has 9 independant components, a `SymTensorValue{3}` +has 6 and a `SymTracelessTensorValue{3}` has 5. But they all have 9 (non independant) components. """ num_indep_components(::Type{T}) where T<:Number = num_components(T) num_indep_components(::T) where T<:Number = num_indep_components(T) @@ -56,15 +96,21 @@ end # This should probably not be exported, as (accessing) the data field of # MultiValue is not a public api +""" +Transforms Cartesian indices to linear indices that index `MultiValue`'s private internal storage, this should'nt be used. +""" function data_index(::Type{<:MultiValue},i...) @abstractmethod end +# The order of export of components is that of their position in the .data +# field, but the actual method "choosing" the export order is +# Gridap.Visualization._prepare_data(::Multivalue). """ indep_comp_getindex(a::Number,i) -Get the ith independent component of `a`. It only differs from `getindex(a,i)` -when the components of `a` are linked, see [`num_indep_components`](@ref), and +Get the `i`th independent component of `a`. It only differs from `getindex(a,i)` +when the components of `a` are interdependant, see [`num_indep_components`](@ref). `i` should be in `1:num_indep_components(a)`. """ function indep_comp_getindex(a::Number,i) @@ -86,9 +132,12 @@ end """ indep_components_names(::MultiValue) -Returns an array of strings containing the component labels in the order they are stored internally, consistently with _prepare_data(::Multivalue) +Return an array of strings containing the component labels in the order they +are exported in VTK file. -If all dimensions of the tensor shape S are smaller than 3, the components should be named with letters "X","Y" and "Z" similarly to the automatic naming of Paraview. Else, if max(S)>3, they are labeled from "1" to "\$dim". +If all dimensions of the tensor shape S are smaller than 3, the components +are named with letters "X","Y" and "Z" similarly to the automatic naming +of Paraview. Else, if max(S)>3, they are labeled by integers starting from "1". """ function indep_components_names(::Type{MultiValue{S,T,N,L}}) where {S,T,N,L} return ["$i" for i in 1:L] diff --git a/src/TensorValues/Operations.jl b/src/TensorValues/Operations.jl index 44c7ecc5f..f822d7ce9 100644 --- a/src/TensorValues/Operations.jl +++ b/src/TensorValues/Operations.jl @@ -213,6 +213,13 @@ function *(::SymTracelessTensorValue,::SymTracelessTensorValue) @unreachable _ms dot(a::MultiValue{Tuple{D}}, b::MultiValue{Tuple{D}}) where D = inner(a,b) +""" + dot(a::MultiValue{Tuple{...,D}}, b::MultiValue{Tuple{D,...}}) + a ⋅¹ b + a ⋅ b + +Inner product of two tensors `a` and `b`, that is the single contraction of the last index of `a` with the first index of `b`. The corresponding dimensions `D` must match. No symmetry is preserved. +""" dot(a::MultiValue,b::MultiValue) = @notimplemented @generated function dot(a::A,b::B) where {A<:MultiValue{Tuple{D1}},B<:MultiValue{Tuple{D1,D2}}} where {D1,D2} @@ -326,6 +333,12 @@ const ⋅¹ = dot inner(a::Number,b::Number) = a*b +""" + inner(a::MultiValue{S}, b::MultiValue{S}) -> scalar + a ⊙ b + +Inner product of two tensors, that is the full contraction along each indices. The size `S` of `a` and `b` must match. +""" function inner(a::MultiValue, b::MultiValue) @notimplemented end @@ -368,6 +381,18 @@ const ⊙ = inner # Double Contractions w/ products ############################################################### +""" + double_contraction(a::MultiValue{Tuple{...,D,E}}, b::MultiValue{Tuple{D,E,...}) + a ⋅² b + +Double contraction of two tensors `a` and `b`, along the two last indices of `a` +and two first of `b`. The corresponding dimensions `D` and `E` must match, the +contraction order is chosen to be consistent with the inner product of second +order tensors. + +The `double_contraction` between second- and/or fourth-order symmetric tensors +preserves the symmetry (returns a symmetric tensor type). +""" function double_contraction(a::MultiValue{S1}, b::MultiValue{S2}) where {S1<:Tuple,S2<:Tuple} L1, L2 = length(S1.types), length(S2.types) if L1<2 || L2<2 @@ -529,13 +554,19 @@ end # Outer product (aka dyadic product) -""" -""" outer(a::Number,b::Number) = a*b outer(a::MultiValue,b::Number) = a*b outer(a::Number,b::MultiValue) = a*b +""" + outer(a,b) + a ⊗ b + +Outer product (or tensor-product) of two `Number`s and/or `MultiValue`s, that is +`(a⊗b)[i₁,...,iₙ,j₁,...,jₙ] = a[i₁,...,iₙ]*b[j₁,...,jₙ]`. This falls back to standard +multiplication if `a` or `b` is a scalar. +""" function outer(a::MultiValue,b::MultiValue) @notimplemented end @@ -588,12 +619,24 @@ function cross(a::MultiValue{Tuple{2}}, b::MultiValue{Tuple{2}}) a[1]b[2]-a[2]b[1] end +""" + cross(a::VectorValue{3}, b::VectorValue{3}) -> VectorValue{3} + cross(a::VectorValue{2}, b::VectorValue{2}) -> Scalar + a × b + +Cross product of 2D and 3D vector. +""" cross(a::MultiValue,b::MultiValue) = error("Cross product only defined for R2 and R3 vectors") ############################################################### # Linear Algebra ############################################################### +""" + det(a::MultiValue{Tuple{D,D},T}) + +Determinent of second order tensors. +""" det(a::MultiValue{Tuple{D,D}}) where {D} = det(get_array(a)) det(a::MultiValue{Tuple{1,1}}) = a[1] @@ -612,6 +655,11 @@ function det(a::MultiValue{Tuple{3,3}}) (a_11*a_23*a_32 + a_12*a_21*a_33 + a_13*a_22*a_31) end +""" + inv(a::MultiValue{Tuple{D,D}}) + +Inverse of a second order tensor. +""" inv(a::MultiValue{Tuple{D,D}}) where D = TensorValue(inv(get_array(a))) # those still have better perf than the D=2,3 specialization below inv(a::AbstractSymTensorValue{D}) where D = SymTensorValue(inv(get_array(a))) @@ -652,15 +700,27 @@ end ############################################################### """ + meas(a::MultiValue{Tuple{D}}) + meas(a::MultiValue{Tuple{1,D2}}) + +Euclidean norm of a vector. """ meas(a::MultiValue{Tuple{D}}) where D = sqrt(inner(a,a)) + +""" + meas(J::MultiValue{Tuple{D1,D2}}) + +Returns the absolute `D1`-dimensional volume of the parallelepiped +formed by the rows of `J`, that is `sqrt(det(J⋅Jᵀ))`, or `abs(det(J))` if `D1`=`D2`. +This is used to compute the contribution of the Jacobian matrix `J` of a changes of variables in integrals. +""" meas(a::MultiValue{Tuple{D,D}}) where D = abs(det(a)) #meas( ::TensorValue{0,D,T}) where {T,D} = one(T) #meas( ::MultiValue{Tuple{0,0},T}) where {T} = one(T) function meas(v::MultiValue{Tuple{1,D}}) where D t = VectorValue(v.data) - sqrt(t ⋅ t) + meas(t) end function meas(v::MultiValue{Tuple{2,3}}) @@ -668,7 +728,7 @@ function meas(v::MultiValue{Tuple{2,3}}) n2 = v[1,3]*v[2,1] - v[1,1]*v[2,3] n3 = v[1,1]*v[2,2] - v[1,2]*v[2,1] n = VectorValue(n1,n2,n3) - sqrt(n ⋅ n) + meas(n) end function meas(Jt::MultiValue{Tuple{D1,D2}}) where {D1,D2} @@ -676,6 +736,12 @@ function meas(Jt::MultiValue{Tuple{D1,D2}}) where {D1,D2} sqrt(det(Jt⋅J)) end +""" + norm(u::MultiValue{Tuple{D}}) + norm(u::MultiValue{Tuple{D1,D2}}) + +Euclidean (2-)norm of `u`, namely `sqrt(inner(u,u))`. +""" @inline norm(u::MultiValue{Tuple{D}}) where D = sqrt(inner(u,u)) @inline norm(u::MultiValue{Tuple{D1,D2}}) where {D1,D2} = sqrt(inner(u,u)) @inline norm(u::MultiValue{Tuple{0},T}) where T = sqrt(zero(T)) @@ -702,12 +768,22 @@ end # Trace ############################################################### +""" + tr(v::MultiValue{Tuple{D1,D2}}) + +Return the trace of a second order tensor, defined by `0` if `D1`≠`D2`, and `Σᵢ vᵢᵢ` else. +""" @generated function tr(v::MultiValue{Tuple{D,D}}) where D str = join([" v[$i,$i] +" for i in 1:D ]) Meta.parse(str[1:(end-1)]) end tr(::SymTracelessTensorValue{D,T}) where {D,T} = zero(T) +""" + tr(v::MultiValue{Tuple{D1,D1,D2}}) -> ::VectorValue{D2} + +Return a vector of length `D2` of traces computed on the first two indices: `resⱼ = Σᵢ vᵢᵢⱼ`. +""" @generated function tr(v::MultiValue{Tuple{A,A,B}}) where {A,B} lis = LinearIndices((A,A,B)) str = "" @@ -768,6 +844,10 @@ transpose(a::AbstractSymTensorValue) = a ############################################################### """ + symmetric_part(v::MultiValue{Tuple{D,D}})::AbstractSymTensorValue + +Return the symmetric part of second order tensor, that is `½(v + vᵀ)`. +Return `v` if `v isa AbstractSymTensorValue`. """ @generated function symmetric_part(v::MultiValue{Tuple{D,D}}) where D str = "(" diff --git a/src/TensorValues/SymFourthOrderTensorValueTypes.jl b/src/TensorValues/SymFourthOrderTensorValueTypes.jl index 174652834..43a41567a 100644 --- a/src/TensorValues/SymFourthOrderTensorValueTypes.jl +++ b/src/TensorValues/SymFourthOrderTensorValueTypes.jl @@ -3,7 +3,11 @@ ############################################################### """ -Type representing a symmetric fourth-order tensor + SymFourthOrderTensorValue{D,T,L} <: MultiValue{Tuple{D,D,D,D},T,4,L} + +Type representing a symmetric second-order `D`×`D`×`D`×`D` tensor, with symmetries ijkl↔jikl and ijkl↔ijlk. It must hold `L` = (`D`(`D`+1)/2)^2. + +It is constructed by providing the components of index (i,j,k,l) for 1 ≤ i ≤ j ≤ `D` and 1 ≤ k ≤ l ≤ `D`. """ struct SymFourthOrderTensorValue{D,T,L} <: MultiValue{Tuple{D,D,D,D},T,4,L} data::NTuple{L,T} @@ -80,6 +84,13 @@ zero(::Type{<:SymFourthOrderTensorValue{D,T,L}}) where {D,T,L} = SymFourthOrderT zero(::SymFourthOrderTensorValue{D,T,L}) where {D,T,L} = zero(SymFourthOrderTensorValue{D,T,L}) # This is in fact the "symmetrized" 4th order identity +""" + one(::SymFourthOrderTensorValue{D,T}}) + +Returns the tensor `resᵢⱼₖₗ = δᵢₖδⱼₗ(δᵢⱼ + (1-δᵢⱼ)/2)`. + +The scalar type `T2` of the result is `typeof(one(T)/2)`. +""" @generated function one(::Type{<:SymFourthOrderTensorValue{D,T}}) where {D,T} S = typeof(one(T)/2) str = join(["($i==$k && $j==$l) ? ( $i==$j ? one($S) : one($S)/2) : zero($S), " for i in 1:D for j in i:D for k in 1:D for l in k:D]) diff --git a/src/TensorValues/SymTensorValueTypes.jl b/src/TensorValues/SymTensorValueTypes.jl index d1b2c1954..258a9785b 100644 --- a/src/TensorValues/SymTensorValueTypes.jl +++ b/src/TensorValues/SymTensorValueTypes.jl @@ -2,12 +2,20 @@ # SymTensorValue Type ############################################################### """ -Abstract type representing any symmetric second-order tensor + AbstractSymTensorValue{D,T,L} <: MultiValue{Tuple{D,D},T,2,L} + +Abstract type representing any symmetric second-order `D`×`D` tensor, with symmetry ij↔ji. + +See also [`SymTensorValue`](@ref), [`SymTracelessTensorValue`](@ref). """ abstract type AbstractSymTensorValue{D,T,L} <: MultiValue{Tuple{D,D},T,2,L} end """ -Type representing a symmetric second-order tensor (with D(D-1)/2 independant components) + SymTensorValue{D,T,L} <: AbstractSymTensorValue{D,T,L} + +Type representing a symmetric second-order `D`×`D` tensor. It must hold `L` = `D`(`D`+1)/2. + +It is constructed by providing the components of index (i,j) for 1 ≤ i ≤ j ≤ `D`. """ struct SymTensorValue{D,T,L} <: AbstractSymTensorValue{D,T,L} data::NTuple{L,T} diff --git a/src/TensorValues/SymTracelessTensorValueTypes.jl b/src/TensorValues/SymTracelessTensorValueTypes.jl index 8295b2b57..9e3ddfa03 100644 --- a/src/TensorValues/SymTracelessTensorValueTypes.jl +++ b/src/TensorValues/SymTracelessTensorValueTypes.jl @@ -3,10 +3,13 @@ ############################################################### """ -Type representing a traceless symmetric second-order tensor, -used to model the Q tensor in nematic liquid cristals + SymTracelessTensorValue{D,T,L} <: AbstractSymTensorValue{D,T,L} + QTensorValue{D,T,L} -The last diagonal value is determined by minus the sum of the other and musn't be provided +Type representing a symetric second-order `D`×`D` tensor with zero trace. It must hold `L` = `D`(`D`+1)/2. +This type is used to model the Q-tensor order parameter in nematic liquid cristals. + +The constructor determines the value of index (`D`,`D`) as minus the sum of the other diagonal values, so it value musn't be provided. The constructor thus expects the `L`-1 components of indices (i,j) for 1 ≤ i ≤ `D`-1 and i ≤ j ≤ `D`. """ struct SymTracelessTensorValue{D,T,L} <: AbstractSymTensorValue{D,T,L} data::NTuple{L,T} @@ -31,6 +34,9 @@ end Meta.parse("($str)") end +""" +Alias for [`SymTracelessTensorValue`](@ref). +""" const QTensorValue = SymTracelessTensorValue ############################################################### diff --git a/src/TensorValues/TensorValueTypes.jl b/src/TensorValues/TensorValueTypes.jl index fb642e7fc..f871c7e51 100644 --- a/src/TensorValues/TensorValueTypes.jl +++ b/src/TensorValues/TensorValueTypes.jl @@ -3,7 +3,11 @@ ############################################################### """ -Type representing a second-order tensor + TensorValue{D1,D2,T,L} <: MultiValue{Tuple{D1,D2},T,2,L} + +Type representing a second-order `D1`×`D2` tensor. It must hold `L` = `D1`*`D2`. + +If only `D1` or no dimension parameter is given to the constructor, `D1`=`D2` is assumed. """ struct TensorValue{D1,D2,T,L} <: MultiValue{Tuple{D1,D2},T,2,L} data::NTuple{L,T} @@ -117,6 +121,11 @@ change_eltype(::TensorValue{D1,D2,T1,L},::Type{T2}) where {D1,D2,T1,T2,L} = chan get_array(arg::TensorValue{D1,D2,T}) where {D1,D2,T} = convert(SMatrix{D1,D2,T},arg) +""" + diagonal_tensor(v::VectorValue{D,T}) -> ::TensorValue{D,D,T} + +Return a diagonal `D`×`D` tensor with diagonal containing the elements of `v`. +""" @generated function diagonal_tensor(v::VectorValue{D,T}) where {D,T} s = ["zero(T), " for i in 1:(D*D)] for i in 1:D diff --git a/src/TensorValues/TensorValues.jl b/src/TensorValues/TensorValues.jl index 0a096b370..11972a83d 100644 --- a/src/TensorValues/TensorValues.jl +++ b/src/TensorValues/TensorValues.jl @@ -1,6 +1,13 @@ """ -This module provides concrete implementations of `Number` that represent -1st, 2nd and general order tensors. +This module provides the abstract interface `MultiValue` representing tensors +that are also `Number`s, along with concrete implementations for the following +tensors: +- 1st order [`VectorValue`](@ref), +- 2nd order [`TensorValue`](@ref), +- 2nd order and symmetric [`SymTensorValue`](@ref), +- 2nd order, symmetric and traceless [`SymTracelessTensorValue`](@ref), +- 3rd order [`ThirdOrderTensorValue`](@ref), +- 4th order and symmetric [`SymFourthOrderTensorValue`](@ref)). ## Why @@ -22,6 +29,44 @@ C = inner.(g,B) # inner product of g against all TensorValues in the array B # C = [2494 2494 2494 2494 2494] ``` +To create a variable of type [`MultiValue`](@ref) from components, these should be given +as separate arguments or all gathered in a `tuple`. The order of the arguments +is the order of the linearized Cartesian indices of the corresponding array +(order of the [`LinearIndices`](@ref) indices): +```julia +using StaticArrays +t = TensorValue( (1, 2, 3, 4) ) +ts= convert(SMatrix{2,2,Int}, t) +@show ts +# 2×2 SMatrix{2, 2, Int64, 4} with indices SOneTo(2)×SOneTo(2): +# 1 3 +# 2 4 +t2[1,2] == t[1,2] == 3 # true +``` +For symetric tensor types, only the independent components should be given, see +[`SymTensorValue`](@ref), [`SymTracelessTensorValue`](@ref) and [`SymFourthOrderTensorValue`](@ref). + +A `MultiValue` can be created from an `AbstractArray` of the same size. If the +`MultiValue` type has internal constraints (e.g. symmetries), ONLY the required +components are picked from the array WITHOUT CHECKING if the given array +did respect the constraints: +```julia +SymTensorValue( [1 2; 3 4] ) # -> SymTensorValue{2, Int64, 3}(1, 2, 4) +SymTensorValue( SMatrix{2}(1,2,3,4) ) # -> SymTensorValue{2, Int64, 3}(1, 3, 4) +``` + +`MultiValue`s can be converted to static and mutable arrays types from +`StaticArrays.jl` using `convert` and [`mutable`](@ref), respectively. + +The concrete `MultiValue` types implement methods for the following +`Base` functions: `length`, `size`, `rand`, `zero`, `real`, `imag` and +`conj`. + +`one` is also implemented in particular cases, it is defined for second +and fourth order tensors. For second order, it returns the identity tensor `δij`, +for fourth order, see [`one`](@ref). `SymTracelessTensorValue` does not implement +`one`. + The exported names are: $(EXPORTS) diff --git a/src/TensorValues/ThirdOrderTensorValueTypes.jl b/src/TensorValues/ThirdOrderTensorValueTypes.jl index 40bc50476..62c1b29ff 100644 --- a/src/TensorValues/ThirdOrderTensorValueTypes.jl +++ b/src/TensorValues/ThirdOrderTensorValueTypes.jl @@ -1,6 +1,10 @@ """ -Type representing a third-order tensor + ThirdOrderTensorValue{D1,D2,D3,T,L} <: MultiValue{Tuple{D1,D2,D3},T,3,L} + +Type representing a third-order `D1`×`D2`×`D3` tensor. It must hold `L` = `D1`\\*`D2`\\*`D3`. + +If only `D1` or no dimension parameter is given to the constructor, `D1`=`D2`=`D3` is assumed. """ struct ThirdOrderTensorValue{D1,D2,D3,T,L} <: MultiValue{Tuple{D1,D2,D3},T,3,L} data::NTuple{L,T} diff --git a/src/TensorValues/VectorValueTypes.jl b/src/TensorValues/VectorValueTypes.jl index 8c6edab83..989dadb37 100644 --- a/src/TensorValues/VectorValueTypes.jl +++ b/src/TensorValues/VectorValueTypes.jl @@ -3,7 +3,9 @@ ############################################################### """ -Type representing a first-order tensor + VectorValue{D,T} <: MultiValue{Tuple{D},T,1,D} + +Type representing a first-order tensor, that is a vector, of length `D`. """ struct VectorValue{D,T} <: MultiValue{Tuple{D},T,1,D} data::NTuple{D,T} From 0b6e4f270994c18c32f5b1e8eae38a7f1c37ff6c Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Mon, 21 Oct 2024 17:31:20 +1100 Subject: [PATCH 34/85] nicer kwarg name --- src/Visualization/Vtk.jl | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/Visualization/Vtk.jl b/src/Visualization/Vtk.jl index 4ad09f3ae..d3d4a7c78 100644 --- a/src/Visualization/Vtk.jl +++ b/src/Visualization/Vtk.jl @@ -94,12 +94,12 @@ function create_vtk_file( if num_cells(trian)>0 for (k,v) in celldata - comp_names = _data_component_names(v) - vtk_cell_data(vtkfile, _prepare_data(v), k; component_names=comp_names) + component_names = _data_component_names(v) + vtk_cell_data(vtkfile, _prepare_data(v), k; component_names) end for (k,v) in nodaldata - comp_names = _data_component_names(v) - vtk_point_data(vtkfile, _prepare_data(v), k; component_names=comp_names) + component_names = _data_component_names(v) + vtk_point_data(vtkfile, _prepare_data(v), k; component_names) end end @@ -120,13 +120,13 @@ function create_pvtk_file( if num_cells(trian) > 0 for (k, v) in celldata - # comp_names are actually always nothing as there are no field in ptvk atm - comp_names = _data_component_names(v) - vtkfile[k, VTKCellData(), component_names=comp_names] = _prepare_data(v) + # component_names are actually always nothing as there are no field in ptvk atm + component_names = _data_component_names(v) + vtkfile[k, VTKCellData(), component_names] = _prepare_data(v) end for (k, v) in nodaldata - comp_names = _data_component_names(v) - vtkfile[k, VTKPointData(), component_names=comp_names] = _prepare_data(v) + component_names = _data_component_names(v) + vtkfile[k, VTKPointData(), component_names] = _prepare_data(v) end end return vtkfile From 7dd0563a3f3d76a3e443ff727e2ef461f5c6e935 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Mon, 21 Oct 2024 17:35:56 +1100 Subject: [PATCH 35/85] Updated NEWS.md --- NEWS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/NEWS.md b/NEWS.md index 98d2d211f..789c85575 100644 --- a/NEWS.md +++ b/NEWS.md @@ -11,6 +11,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Added MacroFElements. These are defined as having the basis/dof-basis of a FESpace created on top of a RefinementRule. Since PR[#1024](https://github.com/gridap/Gridap.jl/pull/1024). - Added Barycentric refinement rule in 2D and 3D. Added Simplexify refinement rule. Since PR[#1024](https://github.com/gridap/Gridap.jl/pull/1024). +- Added names to vector and tensor components in VTK exports, to avoid Paraview's automatic (sometimes wrong) guesses. See `TensorValues.indep_components_names`. Since PR[#1038](https://github.com/gridap/Gridap.jl/pull/1038). ## [0.18.6] - 2024-08-29 From 1c77ac219ce31dd90162e3b77ee0e2aa2827358a Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Mon, 21 Oct 2024 17:57:24 +1100 Subject: [PATCH 36/85] typo --- src/Visualization/Vtk.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Visualization/Vtk.jl b/src/Visualization/Vtk.jl index d3d4a7c78..c1b86c17c 100644 --- a/src/Visualization/Vtk.jl +++ b/src/Visualization/Vtk.jl @@ -122,11 +122,11 @@ function create_pvtk_file( for (k, v) in celldata # component_names are actually always nothing as there are no field in ptvk atm component_names = _data_component_names(v) - vtkfile[k, VTKCellData(), component_names] = _prepare_data(v) + vtkfile[k, VTKCellData(); component_names] = _prepare_data(v) end for (k, v) in nodaldata component_names = _data_component_names(v) - vtkfile[k, VTKPointData(), component_names] = _prepare_data(v) + vtkfile[k, VTKPointData(); component_names] = _prepare_data(v) end end return vtkfile From 308f4eecb4717357bd558f95c77f88e9020213dd Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Mon, 21 Oct 2024 18:06:36 +1100 Subject: [PATCH 37/85] typoo --- src/Visualization/Vtk.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Visualization/Vtk.jl b/src/Visualization/Vtk.jl index c1b86c17c..83c4fb6d7 100644 --- a/src/Visualization/Vtk.jl +++ b/src/Visualization/Vtk.jl @@ -122,11 +122,11 @@ function create_pvtk_file( for (k, v) in celldata # component_names are actually always nothing as there are no field in ptvk atm component_names = _data_component_names(v) - vtkfile[k, VTKCellData(); component_names] = _prepare_data(v) + vtkfile[k, VTKCellData(), component_names=component_names] = _prepare_data(v) end for (k, v) in nodaldata component_names = _data_component_names(v) - vtkfile[k, VTKPointData(); component_names] = _prepare_data(v) + vtkfile[k, VTKPointData(), component_names=component_names] = _prepare_data(v) end end return vtkfile From 4df2b4dbf09dc47b66708664f29fd88986ad42c2 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Thu, 24 Oct 2024 15:15:00 +1100 Subject: [PATCH 38/85] Updated NEWS.md --- NEWS.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/NEWS.md b/NEWS.md index 789c85575..6a84321ed 100644 --- a/NEWS.md +++ b/NEWS.md @@ -12,6 +12,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Added MacroFElements. These are defined as having the basis/dof-basis of a FESpace created on top of a RefinementRule. Since PR[#1024](https://github.com/gridap/Gridap.jl/pull/1024). - Added Barycentric refinement rule in 2D and 3D. Added Simplexify refinement rule. Since PR[#1024](https://github.com/gridap/Gridap.jl/pull/1024). - Added names to vector and tensor components in VTK exports, to avoid Paraview's automatic (sometimes wrong) guesses. See `TensorValues.indep_components_names`. Since PR[#1038](https://github.com/gridap/Gridap.jl/pull/1038). +- Misc improvements of the `TensorValues` module: See `TensorValues.indep_components_names`. Since PR[#1040](https://github.com/gridap/Gridap.jl/pull/1040). + - Documented all symbols exported by the module + - Improved and added test for some API function of `MultiValue` (general `diag` of 2nd order tensors, fixed `convert` of 3rd order tensors to SArray, avoid unwanted fallback of `num_components` on `MultiValue` types with undefined dimensions, more autodiff tests, better `double_contraction` API (prevent invalid operation giving indexing errors and enable valid operations)). + - Added a clear separation between the physical components access (`getindex`, `num_components`) and the numerical access to the stored independent components (`num_indep_components`, `indep_comp_getindex`) to enable using symmetric tensor types as unknown in FE Spaces. + - Implemented automatic differentiation `gradient` and `laplacian` for second order tensor, and `divergence` for third order tensors. + - Added `AbstractSymTensorValue`, an abstract type for second order symmetric tensors, and `SymTracelessTensorValue` (aliased to `QTensorValue`), a type for traceless symmetric tensors. `SymTensorValue` is now subtype of `AbstractSymTensorValue`. + - A convergence test for Poisson problem of `QTensorValue` unknown field validates the implementation. ## [0.18.6] - 2024-08-29 From 76e7226a68ff3bee4bab3073f440d314669eb178 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Fri, 25 Oct 2024 10:46:21 +1100 Subject: [PATCH 39/85] isless improvements - Throw error when undefined comparison is done (instead of falling back into `isless(::Number, ::MultiValue)` and having indexing error) - Added missing isless test coverage --- src/TensorValues/Operations.jl | 1 + test/TensorValuesTests/OperationsTests.jl | 12 ++++++++++++ 2 files changed, 13 insertions(+) diff --git a/src/TensorValues/Operations.jl b/src/TensorValues/Operations.jl index f822d7ce9..e974dedad 100644 --- a/src/TensorValues/Operations.jl +++ b/src/TensorValues/Operations.jl @@ -30,6 +30,7 @@ function isless(a::MultiValue{Tuple{L}},b::MultiValue{Tuple{L}}) where L end isless(a::Number,b::MultiValue) = all(isless.(a, b.data)) +isless(a::MultiValue,b::MultiValue) = @unreachable "Comparison is not defined between tensor of order greater than 1" ############################################################### # Addition / subtraction diff --git a/test/TensorValuesTests/OperationsTests.jl b/test/TensorValuesTests/OperationsTests.jl index 8300c8f64..1b4069d08 100644 --- a/test/TensorValuesTests/OperationsTests.jl +++ b/test/TensorValuesTests/OperationsTests.jl @@ -1,5 +1,6 @@ module OperationsTests +using Test: Error using Test using Gridap.TensorValues using Gridap.Arrays @@ -16,6 +17,12 @@ b = VectorValue(1,3,3) @test (a >= b) == false @test (a > b) == false +@test (a < a) == false +@test (a <= a) == true +@test (a == a) == true +@test (a >= a) == true +@test (a > a) == false + @test VectorValue(1,2,3) == VectorValue(1.0,2.0,3.0) @test VectorValue(1,2,3) == VectorValue(1+0im, 2+0im, 3+0im) @test VectorValue(1,2,3) ≠ VectorValue(1,2) @@ -33,6 +40,11 @@ b = VectorValue(2,1,6) @test [a,a] == [a,a] @test [a,a] ≈ [a,a] +c = TensorValue(1,2,3,4) + +@test_throws ErrorException (a < c) +@test_throws ErrorException (a <= c) + # Addition / subtraction c = +a From ca01a55fc099ccb84c1661eb18cbfcdf3de24181 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Fri, 25 Oct 2024 11:14:48 +1100 Subject: [PATCH 40/85] fix some MultiValue +,-,*,\ and their test coverage --- src/TensorValues/Operations.jl | 12 +++++--- test/TensorValuesTests/OperationsTests.jl | 37 +++++++++++++++++++++++ 2 files changed, 45 insertions(+), 4 deletions(-) diff --git a/src/TensorValues/Operations.jl b/src/TensorValues/Operations.jl index e974dedad..42f1fa390 100644 --- a/src/TensorValues/Operations.jl +++ b/src/TensorValues/Operations.jl @@ -179,10 +179,14 @@ function (/)(a::SymTracelessTensorValue,b::Number) end const _err = " with number is undefined for traceless tensors" -function -(::SymTracelessTensorValue,::Number) error("Addition" *_err) end -function +(::SymTracelessTensorValue,::Number) error("Subtraction"*_err) end -function -(::Number,::SymTracelessTensorValue) error("Addition" *_err) end -function +(::Number,::SymTracelessTensorValue) error("Subtraction"*_err) end +function +(::SymTracelessTensorValue,::Number) error("Addition" *_err) end +function -(::SymTracelessTensorValue,::Number) error("Subtraction"*_err) end +function +(::Number,::SymTracelessTensorValue) error("Addition" *_err) end +function -(::Number,::SymTracelessTensorValue) error("Subtraction"*_err) end +function +(::SymTracelessTensorValue,::MultiValue) error("Addition" *_err) end +function -(::SymTracelessTensorValue,::MultiValue) error("Subtraction"*_err) end +function +(::MultiValue,::SymTracelessTensorValue) error("Addition" *_err) end +function -(::MultiValue,::SymTracelessTensorValue) error("Subtraction"*_err) end @inline function _eltype(op,r,a,b) eltype(r) diff --git a/test/TensorValuesTests/OperationsTests.jl b/test/TensorValuesTests/OperationsTests.jl index 1b4069d08..14a18f942 100644 --- a/test/TensorValuesTests/OperationsTests.jl +++ b/test/TensorValuesTests/OperationsTests.jl @@ -203,6 +203,43 @@ c = b - a r = TensorValue(-4,-4,-3,9) @test c==r +v = VectorValue(1,2) +t = TensorValue(1,2,3,4) +s = SymTensorValue(1,2,3) +q = SymTracelessTensorValue(1,2) +r = ThirdOrderTensorValue(1:8...) +f = SymFourthOrderTensorValue(1:9...) + +@test_throws ErrorException v+t +@test_throws ErrorException r+v +@test_throws ErrorException r-f +@test_throws ErrorException f-v +@test_throws ErrorException v-s +@test_throws ErrorException q+v +@test_throws ErrorException v+q +@test_throws ErrorException v-q +@test_throws ErrorException q-v +@test_throws ErrorException q+0 +@test_throws ErrorException 0+q +@test_throws ErrorException 0-q +@test_throws ErrorException q-0 + +# Multiplication / division + +@test_throws ErrorException v*t +@test_throws ErrorException r*v +@test_throws ErrorException r/f +@test_throws ErrorException f/v +@test_throws ErrorException v/s +@test_throws ErrorException q*v +@test_throws ErrorException v*q +@test_throws ErrorException q*s +@test_throws ErrorException s*q +@test_throws ErrorException s*s +@test_throws ErrorException q*q +@test_throws ErrorException v/q +@test_throws ErrorException q/v + # Matrix Division a = VectorValue(1,2,3) From f3b7b1a823b25262b223a1b3069a27fffd5a1cf8 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Fri, 25 Oct 2024 11:19:47 +1100 Subject: [PATCH 41/85] test cover of inner error --- src/TensorValues/Operations.jl | 2 +- test/TensorValuesTests/OperationsTests.jl | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/TensorValues/Operations.jl b/src/TensorValues/Operations.jl index 42f1fa390..2f884b3eb 100644 --- a/src/TensorValues/Operations.jl +++ b/src/TensorValues/Operations.jl @@ -345,7 +345,7 @@ inner(a::Number,b::Number) = a*b Inner product of two tensors, that is the full contraction along each indices. The size `S` of `a` and `b` must match. """ function inner(a::MultiValue, b::MultiValue) - @notimplemented + @notimplemented "Sizes of tensors must match." end @generated function inner(a::MultiValue{S}, b::MultiValue{S}) where S diff --git a/test/TensorValuesTests/OperationsTests.jl b/test/TensorValuesTests/OperationsTests.jl index 14a18f942..7534d9525 100644 --- a/test/TensorValuesTests/OperationsTests.jl +++ b/test/TensorValuesTests/OperationsTests.jl @@ -493,6 +493,10 @@ c = qt ⊙ qt2 @test isa(c,Int) @test c == inner(TensorValue(get_array(qt)),TensorValue(get_array(qt2))) +@test_throws ErrorException inner(a,t) +@test_throws ErrorException inner(s,a) +@test_throws ErrorException inner(a,q) + # Reductions a = VectorValue(1,2,3) @@ -723,7 +727,7 @@ I = one(SymFourthOrderTensorValue{2,Int}) @test I[2,2,2,2] == 1 @test I ⊙ ε == ε -#@test I : ε == ε +@test ε ⊙ I == ε a = TensorValue(1,2,3,4) b = I ⊙ a From f62ccea2af471ae827b7a28a854b615bc00d20d8 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Fri, 25 Oct 2024 11:39:32 +1100 Subject: [PATCH 42/85] added missing double_contraction test coverage --- test/TensorValuesTests/OperationsTests.jl | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/test/TensorValuesTests/OperationsTests.jl b/test/TensorValuesTests/OperationsTests.jl index 7534d9525..3514e8ed8 100644 --- a/test/TensorValuesTests/OperationsTests.jl +++ b/test/TensorValuesTests/OperationsTests.jl @@ -493,6 +493,11 @@ c = qt ⊙ qt2 @test isa(c,Int) @test c == inner(TensorValue(get_array(qt)),TensorValue(get_array(qt2))) +t1 = TensorValue{2,3}(1:6...) +t2 = TensorValue{2,3}(10:15...) +@test inner(t1,t1) == 91 +@test double_contraction(t1,t2) == inner(t1,t2) + @test_throws ErrorException inner(a,t) @test_throws ErrorException inner(s,a) @test_throws ErrorException inner(a,q) @@ -785,8 +790,10 @@ odot_contraction_array = 1*a[:,1,1] + 2*a[:,1,2] + 3*a[:,1,3] + 2*a[:,2,1] + v = VectorValue(1:2...) t1 = TensorValue(1:4...) t2 = TensorValue(1:9...) +s4 = SymFourthOrderTensorValue(1:9...) @test_throws ErrorException double_contraction(t1,v) @test_throws DimensionMismatch double_contraction(t1,t2) +@test_throws ErrorException double_contraction(t1,s4) # @notimplemented Sym4TensorIndexing = [1111, 1121, 1131, 1122, 1132, 1133, 2111, 2121, 2131, 2122, 2132, 2133, 3111, 3121, 3131, 3122, 3132, 3133, 2211, 2221, 2231, 2222, 2232, 2233, @@ -952,6 +959,18 @@ t1_double_t2 = t1 ⋅² t2 @test v23 == (t1_double_t2)[2,3] @test v33 == (t1_double_t2)[3,3] +# a_k = b_ij*c_ijk +t1 = TensorValue{3,2}(1:6...) +t2 = ThirdOrderTensorValue{3,2,4}(1:24...) +v1 = sum(t1[i,j]*t2[i,j,1] for i in 1:3 for j in 1:2); +v2 = sum(t1[i,j]*t2[i,j,2] for i in 1:3 for j in 1:2); +v3 = sum(t1[i,j]*t2[i,j,3] for i in 1:3 for j in 1:2); +v4 = sum(t1[i,j]*t2[i,j,4] for i in 1:3 for j in 1:2); +t1_double_t2 = t1 ⋅² t2 +@test v1 == (t1_double_t2)[1] +@test v2 == (t1_double_t2)[2] +@test v3 == (t1_double_t2)[3] +@test v4 == (t1_double_t2)[4] # a_il = b_ij*c_jl v1 = [1 2 3 From f708a51c660db71d112d55976f234b5355639cd5 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Fri, 25 Oct 2024 12:10:17 +1100 Subject: [PATCH 43/85] prevent wrong `det` call and test coverage of `inv`, `det` and `outer` --- src/TensorValues/Operations.jl | 9 +++++---- test/TensorValuesTests/OperationsTests.jl | 18 ++++++++++++++++++ 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/src/TensorValues/Operations.jl b/src/TensorValues/Operations.jl index 2f884b3eb..30cc21ded 100644 --- a/src/TensorValues/Operations.jl +++ b/src/TensorValues/Operations.jl @@ -631,7 +631,7 @@ end Cross product of 2D and 3D vector. """ -cross(a::MultiValue,b::MultiValue) = error("Cross product only defined for R2 and R3 vectors") +cross(a::MultiValue,b::MultiValue) = error("Cross product only defined for R2 and R3 vectors of same dimension") ############################################################### # Linear Algebra @@ -640,9 +640,10 @@ cross(a::MultiValue,b::MultiValue) = error("Cross product only defined for R2 an """ det(a::MultiValue{Tuple{D,D},T}) -Determinent of second order tensors. +Determinent of square second order tensors. """ det(a::MultiValue{Tuple{D,D}}) where {D} = det(get_array(a)) +det(a::MultiValue)= @unreachable "det undefined for this tensor shape: $(size(a))" det(a::MultiValue{Tuple{1,1}}) = a[1] @@ -666,8 +667,8 @@ end Inverse of a second order tensor. """ inv(a::MultiValue{Tuple{D,D}}) where D = TensorValue(inv(get_array(a))) -# those still have better perf than the D=2,3 specialization below -inv(a::AbstractSymTensorValue{D}) where D = SymTensorValue(inv(get_array(a))) + +# this has better perf than the D=2,3 specialization below inv(a::SymTracelessTensorValue{2}) = SymTracelessTensorValue(inv(get_array(a))) function inv(a::MultiValue{Tuple{1,1}}) diff --git a/test/TensorValuesTests/OperationsTests.jl b/test/TensorValuesTests/OperationsTests.jl index 3514e8ed8..9cbddae19 100644 --- a/test/TensorValuesTests/OperationsTests.jl +++ b/test/TensorValuesTests/OperationsTests.jl @@ -548,6 +548,8 @@ k = TensorValue(1,2,3,4) c = outer(e,k) @test c == ThirdOrderTensorValue{2,2,2}(10, 20, 20, 40, 30, 60, 40, 80) +@test_throws ErrorException outer(k,c) # @notimplemented + @test tr(c) == VectorValue(50,110) # Cross product @@ -570,6 +572,9 @@ a = VectorValue(4.0,1.0) b = VectorValue(3.0,-2.0) @test cross(a, b) == -11.0 +a = VectorValue(4.0,1.0) +b = VectorValue(3.0,-2.0,1.0) +@test_throws ErrorException cross(a, b) # Linear Algebra t = TensorValue(10,2,30,4,5,6,70,8,9) @@ -598,6 +603,19 @@ t = TensorValue(1,4,-1,1) @test det(t) == det(TensorValue(get_array(t))) @test inv(t) == inv(TensorValue(get_array(t))) +t = TensorValue(1:16...) +t += one(t) +@test det(t) == det(TensorValue(get_array(t))) +@test inv(t) == inv(TensorValue(get_array(t))) + +q = SymTracelessTensorValue(1,2) +@test det(q) == det(TensorValue(get_array(q))) +@test inv(q) == SymTracelessTensorValue(inv(get_array(q))) + +t = TensorValue{2,3}(1:6...) +@test_throws ErrorException det(t) +@test_throws ErrorException inv(t) + # Measure a = VectorValue(1,2,3) From c2265d76a1956feeb52f5f49c1ddbc12f3f068b1 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Fri, 25 Oct 2024 12:55:22 +1100 Subject: [PATCH 44/85] add test coverage for adjoint and transpose --- test/TensorValuesTests/OperationsTests.jl | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/test/TensorValuesTests/OperationsTests.jl b/test/TensorValuesTests/OperationsTests.jl index 9cbddae19..f2d1bfed7 100644 --- a/test/TensorValuesTests/OperationsTests.jl +++ b/test/TensorValuesTests/OperationsTests.jl @@ -702,9 +702,16 @@ b = a' @test b == TensorValue(1,3,2,4) @test a⋅b == TensorValue(10,14,14,20) +a = TensorValue(1+0im,2-im,3,4+2im) +b = a' +@test adjoint(a) == b +@test b == TensorValue(1,3,2+im,4-2im) +@test a⋅b == TensorValue(10,14+5im,14-5im,25) + a = TensorValue(1,2,3,4) b = a' @test transpose(a) == b +@test transpose(a) == adjoint(a) @test b == TensorValue(1,3,2,4) @test a⋅b == TensorValue(10,14,14,20) @@ -720,6 +727,12 @@ sb = sa' @test sb == SymTensorValue(1,2,3,5,6,9) @test sa⋅sb == TensorValue(get_array(sa))⋅TensorValue(get_array(sb)) +sa = SymTracelessTensorValue(1,2,3,5,6) +sb = sa' +@test adjoint(sa) == sb +@test sb == SymTracelessTensorValue(1,2,3,5,6) +@test sa⋅sb == TensorValue(get_array(sa))⋅TensorValue(get_array(sb)) + u = VectorValue(1.0,2.0) v = VectorValue(2.0,3.0) @test dot(u,v) ≈ inner(u,v) From d5d2e1576a8ffddb85693587d90dd853886754db Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Tue, 29 Oct 2024 11:53:12 +1100 Subject: [PATCH 45/85] add test coverage for SymTracelessTensorValue --- test/TensorValuesTests/TypesTests.jl | 71 ++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/test/TensorValuesTests/TypesTests.jl b/test/TensorValuesTests/TypesTests.jl index fefce93e1..7306be651 100644 --- a/test/TensorValuesTests/TypesTests.jl +++ b/test/TensorValuesTests/TypesTests.jl @@ -57,6 +57,15 @@ t = TensorValue{2,2,Int}(1,2.0,3,4) @test isa(t,TensorValue{2,2,Int}) @test convert(SMatrix{2,2,Int},t) == [1 3;2 4] +a = [11.0 21.0; 12.0 22.0] +@test isa(a,AbstractArray{Float64,2}) +t = convert(TensorValue{2,2,Float64},a) +@test t == TensorValue{2,2,Float64,3}(11.0, 12.0, 21.0, 22.0) +m = convert(MMatrix{2,2,Float64},t) +@test m == MMatrix{2}(11.0, 12.0, 21.0, 22.0) +u = convert(NTuple{4,Float64},t) +@test u == tuple(11.0, 12.0, 21.0, 22.0) + # Constructors (SymTensorValue) s = SymTensorValue( (11,21,22) ) @@ -103,12 +112,45 @@ s = SymTensorValue{2,Int}(11,21.0,22) @test isa(s,SymTensorValue{2,Int}) @test convert(SMatrix{2,2,Int},s) == [11.0 21.0;21.0 22.0] +a = [11.0 21.0; NaN 22.0] +@test isa(a,AbstractArray{Float64,2}) +@test convert(SymTensorValue{2,Float64},a) == SymTensorValue{2,Float64,3}(11.0, 21.0, 22.0) + # Constructors (SymTracelessTensorValue) +q_none = SymTracelessTensorValue{0, Int64, 0}() +q = SymTracelessTensorValue() +@test q == q_none +q = SymTracelessTensorValue{0}() +@test q == q_none +q = SymTracelessTensorValue{0}() +@test q == q_none +q = SymTracelessTensorValue(Tuple{}()) +@test q == q_none +q = SymTracelessTensorValue{0}(Tuple{}()) +@test q == q_none + +q_zero = SymTracelessTensorValue{1,Int}(NTuple{0,Int}()) +q = SymTracelessTensorValue{1}(Tuple{}()) +@test q == q_zero +q = SymTracelessTensorValue{1,Int}(Tuple{}()) +@test q == q_zero + +q = rand(SymTracelessTensorValue{0,Int}) +@test eltype(q) == Int +@test eltype(typeof(q)) == Int q = SymTracelessTensorValue( (11,21) ) @test isa(q,SymTracelessTensorValue{2,Int}) @test convert(SMatrix{2,2,Int},q) == [11 21;21 -11] +q = SymTracelessTensorValue{2,Int,3}( (11,21) ) +@test isa(q,SymTracelessTensorValue{2,Int,3}) +@test convert(SMatrix{2,2,Int},q) == [11 21;21 -11] + +q = SymTracelessTensorValue{2,Int,3}(11,21) +@test isa(q,SymTracelessTensorValue{2,Int,3}) +@test convert(SMatrix{2,2,Int},q) == [11 21;21 -11] + q = SymTracelessTensorValue(11,21) @test isa(q,SymTracelessTensorValue{2,Int}) @test convert(SMatrix{2,2,Float64},q) == [11.0 21.0;21.0 -11.0] @@ -157,6 +199,15 @@ q = SymTracelessTensorValue{2,Int}(11,21.0) @test isa(q,SymTracelessTensorValue{2,Int}) @test convert(SMatrix{2,2,Int},q) == [11.0 21.0;21.0 -11.0] +a = [11.0 21.0; NaN NaN] +@test isa(a,AbstractArray{Float64,2}) +t = convert(SymTracelessTensorValue{2,Float64},a) +@test t == SymTracelessTensorValue{2,Float64,3}(11.0, 21.0) +m = convert(MMatrix{2,2,Float64},t) +@test m == MMatrix{2}(11.0, 21.0, 21.0, -11.0) +u = convert(NTuple{3,Float64},t) +@test u == tuple(11.0, 21.0, -11.0) + # Constructors (SymFourthOrderTensorValue) s = SymFourthOrderTensorValue( (1111,1121,1122, 2111,2121,2122, 2211,2221,2222) ) @@ -284,30 +335,50 @@ g = VectorValue((1.0,2,3.0,4)) @test isa(g,VectorValue{4,Float64}) @test convert(SVector{4,Float64},g) == [1,2,3,4] +a = [1.0, 2.0] +@test isa(a,AbstractArray{Float64,1}) +t = convert(VectorValue{2,Float64},a) +@test t == VectorValue{2,Float64}(1.0, 2.0) +m = convert(MVector{2,Float64},t) +@test m == MVector{2}(1.0, 2.0) +u = convert(NTuple{2,Float64},t) +@test u == tuple(1.0, 2.0) # Initializers z = zero(TensorValue{3,3,Int,9}) +z2= zero(z) +@test z == z2 @test isa(z,TensorValue{3,3,Int,9}) @test convert(SMatrix{3,3,Int},z) == zeros(Int,(3,3)) z = zero(SymTensorValue{3,Int}) +z2= zero(z) +@test z == z2 @test isa(z,SymTensorValue{3,Int,6}) @test convert(SMatrix{3,3,Int},z) == zeros(Int,(3,3)) z = zero(SymTracelessTensorValue{3,Int}) +z2= zero(z) +@test z == z2 @test isa(z,SymTracelessTensorValue{3,Int,6}) @test convert(SMatrix{3,3,Int},z) == zeros(Int,(3,3)) z = zero(ThirdOrderTensorValue{3,3,3,Int,27}) +z2= zero(z) +@test z == z2 @test isa(z,ThirdOrderTensorValue{3,3,3,Int,27}) @test Tuple(z) == Tuple(zeros(Int,(27))) z = zero(SymFourthOrderTensorValue{2,Int}) +z2= zero(z) +@test z == z2 @test isa(z,SymFourthOrderTensorValue{2,Int,9}) @test Tuple(z) == Tuple(zeros(Int,(9))) z = zero(VectorValue{3,Int}) +z2= zero(z) +@test z == z2 @test isa(z,VectorValue{3,Int}) @test convert(SVector{3,Int},z) == zeros(Int,3) From e35b3bbb7c56ea4e5399eb0f81bfdb1cafabb7e9 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Tue, 29 Oct 2024 12:18:02 +1100 Subject: [PATCH 46/85] add test coverage for new autodiff methods --- test/FieldsTests/DiffOperatorsTests.jl | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/test/FieldsTests/DiffOperatorsTests.jl b/test/FieldsTests/DiffOperatorsTests.jl index 35f262de5..9c4fa58b6 100644 --- a/test/FieldsTests/DiffOperatorsTests.jl +++ b/test/FieldsTests/DiffOperatorsTests.jl @@ -79,6 +79,15 @@ u_ten(x) = TensorValue( x[1]^2 + x[2], 4*x[1] - x[2]^2, -x[1]^2 - x[2], -4*x[1] -2*x[1],-one(x[2]),-4*one(x[1]), + 2*x[2] ) Δu_ten(x) = TensorValue( 2, -2, -2, 2 ) +u_ten23(x) = TensorValue{2,3}( x[1]^2 + x[2], 4*x[1] - x[2]^2, + -x[1]^2 - x[2],-4*x[1] + x[2]^2, + x[1]^2 + x[2], 4*x[1] - x[2]^2) +∇u_ten23(x) = ThirdOrderTensorValue{2,2,3}( + 2*x[1], one(x[2]), 4*one(x[1]), - 2*x[2], + -2*x[1],-one(x[2]),-4*one(x[1]), + 2*x[2], + 2*x[1], one(x[2]), 4*one(x[1]), - 2*x[2],) +Δu_ten23(x) = TensorValue{2,3}( 2, -2, -2, 2, 2, -2) + u_sten(x) = SymTensorValue( x[1]^2 + x[2], 4*x[1] - x[2]^2, -4*x[1] + x[2]^2 ) ∇u_sten(x) = ThirdOrderTensorValue( 2*x[1], one(x[2]), 4*one(x[1]), - 2*x[2], 4*one(x[1]),-2*x[2], -4*one(x[1]), + 2*x[2] ) @@ -89,6 +98,9 @@ u_qten(x) = SymTracelessTensorValue( x[1]^3 + 2x[2]^3, 5*x[1]^3 - 7x[2]^3) 15x[1]^2,-21x[2]^2, -3x[1]^2, -6x[2]^2) Δu_qten(x) = SymTracelessTensorValue( 6x[1] + 12x[2], 30x[1] - 42x[2] ) +u_ten3(x) = ThirdOrderTensorValue{2,1,2}( x[1]^2 + x[2], 4*x[1] - x[2]^2, -x[1]^2 - x[2], -4*x[1] + x[2]^2 ) +Δu_ten3(x) = ThirdOrderTensorValue{2,1,2}( 2, -2, -2, 2 ) + xs = [ Point(1.,1.), Point(2.,0.), Point(0.,3.), Point(-1.,3.)] for x in xs @test ∇(u_scal)(x) == ∇u_scal(x) @@ -103,6 +115,11 @@ for x in xs @test (∇⋅u_ten)(x) == tr(∇u_ten(x)) @test Δ(u_ten)(x) == (∇⋅∇u_ten)(x) + @test ∇(u_ten23)(x) == ∇u_ten23(x) + @test Δ(u_ten23)(x) == Δu_ten23(x) + @test (∇⋅u_ten23)(x) == tr(∇u_ten23(x)) + @test Δ(u_ten23)(x) == (∇⋅∇u_ten23)(x) + @test ∇(u_sten)(x) == ∇u_sten(x) @test Δ(u_sten)(x) == Δu_sten(x) @test (∇⋅u_sten)(x) == tr(∇u_sten(x)) @@ -114,6 +131,8 @@ for x in xs @test (∇⋅u_qten)(x) == tr(∇u_qten(x)) #@test Δ(u_qten)(x) == (∇⋅∇u_qten)(x) @test get_array(Δ(u_qten)(x)) == get_array((∇⋅∇u_qten)(x)) + + @test Δ(u_ten3)(x) == Δu_ten3(x) end u(x) = VectorValue( x[1]^2 + 2*x[2]^2, -x[1]^2 ) From a4149f2917b1be77229a6f4af84193f3fb95ec57 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Tue, 29 Oct 2024 14:28:24 +1100 Subject: [PATCH 47/85] remove dev comment --- src/ODEs/TimeDerivatives.jl | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/src/ODEs/TimeDerivatives.jl b/src/ODEs/TimeDerivatives.jl index 06d7b65f9..78f7608d7 100644 --- a/src/ODEs/TimeDerivatives.jl +++ b/src/ODEs/TimeDerivatives.jl @@ -126,16 +126,6 @@ function _time_derivative(T::Type{<:MultiValue}, f, t, x) T(ForwardDiff.derivative(partial, t)) end -#function _time_derivative(T::Type{<:VectorValue}, f, t, x) -# partial(t) = get_array(f(t)(x)) -# VectorValue(ForwardDiff.derivative(partial, t)) -#end -# -#function _time_derivative(T::Type{<:TensorValue}, f, t, x) -# partial(t) = get_array(f(t)(x)) -# TensorValue(ForwardDiff.derivative(partial, t)) -#end - ########################################## # Specialisation for `TimeSpaceFunction` # ########################################## From 680d17e5c2eaedcf004bdbe3e75be156b7939146 Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Tue, 29 Oct 2024 15:51:56 +1100 Subject: [PATCH 48/85] add test coverage for sym tensors MonomialBases --- test/PolynomialsTests/MonomialBasesTests.jl | 71 ++++++++++++++++++++- 1 file changed, 70 insertions(+), 1 deletion(-) diff --git a/test/PolynomialsTests/MonomialBasesTests.jl b/test/PolynomialsTests/MonomialBasesTests.jl index 5dab24542..e087d71b8 100644 --- a/test/PolynomialsTests/MonomialBasesTests.jl +++ b/test/PolynomialsTests/MonomialBasesTests.jl @@ -120,7 +120,7 @@ g = G[ (0.0, 1.0, 0.0, 0.0), (0.0, 0.0, 0.0, 1.0), (3.0, 2.0, 0.0, 0.0), (0.0, 0.0, 3.0, 2.0), (0.0, 6.0, 0.0, 0.0), (0.0, 0.0, 0.0, 6.0), - (9.0, 12.0, 0.0, 0.0), (0.0, 0.0, 9.0, 12.0)] + (9.0, 12.0, 0.0, 0.0),(0.0, 0.0, 9.0, 12.0)] bx = repeat(permutedims(v),np) ∇bx = repeat(permutedims(g),np) @@ -166,6 +166,58 @@ bx = repeat(permutedims(v),np) test_field_array(b,x,bx,grad=∇bx) test_field_array(b,x[1],bx[1,:],grad=∇bx[1,:]) +# SymTensor-valued P space + +order = 1 +V = SymTensorValue{2,Float64} +G = gradient_type(V,xi) +filter = (e,o) -> sum(e) <= o +b = MonomialBasis{2}(V,order,filter) + +v = V[(1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0), + (2.0, 0.0, 0.0), (0.0, 2.0, 0.0), (0.0, 0.0, 2.0), + (3.0, 0.0, 0.0), (0.0, 3.0, 0.0), (0.0, 0.0, 3.0)] + +g = G[(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), + (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), + (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), + (1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), + (0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0), + (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0), + (0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), + (0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0), + (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0)] + +bx = repeat(permutedims(v),np) +∇bx = repeat(permutedims(g),np) +test_field_array(b,x,bx,grad=∇bx) +test_field_array(b,x[1],bx[1,:],grad=∇bx[1,:]) + +# SymTracelessTensor-valued P space + +order = 1 +V = SymTracelessTensorValue{2,Float64} +G = gradient_type(V,xi) +filter = (e,o) -> sum(e) <= o +b = MonomialBasis{2}(V,order,filter) + +v = V[(1.0, 0.0), (0.0, 1.0), + (2.0, 0.0), (0.0, 2.0), + (3.0, 0.0), (0.0, 3.0)] + +g = G[(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), + (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), + (1.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1., 0.0), + (0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0), + (0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.), + (0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0)] + +bx = repeat(permutedims(v),np) +∇bx = repeat(permutedims(g),np) +test_field_array(b,x,bx,grad=∇bx) +test_field_array(b,x[1],bx[1,:],grad=∇bx[1,:]) + + order = 1 b = MonomialBasis{1}(Float64,order) @test evaluate(b,Point{1,Float64}[(0,),(1,)]) == [1.0 0.0; 1.0 1.0] @@ -173,4 +225,21 @@ b = MonomialBasis{1}(Float64,order) b = MonomialBasis{0}(VectorValue{2,Float64},order) @test evaluate(b,Point{0,Float64}[(),()]) == VectorValue{2,Float64}[(1.0, 0.0) (0.0, 1.0); (1.0, 0.0) (0.0, 1.0)] +b = MonomialBasis{0}(TensorValue{2,2,Float64},order) +@test evaluate(b,Point{0,Float64}[(),()]) == TensorValue{2,2,Float64}[ + (1.0, 0.0, 0.0, 0.0) (0.0, 1.0, 0.0, 0.0) (0.0, 0.0, 1.0, 0.0) (0.0, 0.0, 0.0, 1.0); + (1.0, 0.0, 0.0, 0.0) (0.0, 1.0, 0.0, 0.0) (0.0, 0.0, 1.0, 0.0) (0.0, 0.0, 0.0, 1.0) +] + +b = MonomialBasis{0}(SymTensorValue{2,Float64},order) +@test evaluate(b,Point{0,Float64}[(),()]) == SymTensorValue{2,Float64}[ + (1.0, 0.0, 0.0) (0.0, 1.0, 0.0) (0.0, 0.0, 1.0); + (1.0, 0.0, 0.0) (0.0, 1.0, 0.0) (0.0, 0.0, 1.0) +] + +b = MonomialBasis{0}(SymTracelessTensorValue{2,Float64},order) +@test evaluate(b,Point{0,Float64}[(),()]) == SymTracelessTensorValue{2,Float64}[ + (1.0, 0.0) (0.0, 1.0); (1.0, 0.0) (0.0, 1.0) +] + end # module From 8e7ddc60e3d5e78b6dc8801c5e9f8a3aa5c54f1d Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Wed, 30 Oct 2024 12:30:29 +1100 Subject: [PATCH 49/85] fix ODE SymTracelessTensorValue test --- test/ODEsTests/TimeDerivativesTests.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/ODEsTests/TimeDerivativesTests.jl b/test/ODEsTests/TimeDerivativesTests.jl index 28c80f6d6..5ae3971be 100644 --- a/test/ODEsTests/TimeDerivativesTests.jl +++ b/test/ODEsTests/TimeDerivativesTests.jl @@ -96,7 +96,7 @@ f1(t) = x -> SymTracelessTensorValue(x[1] * t, x[2] * t^2) ∂tf1(t) = x -> SymTracelessTensorValue(x[1], 2 * x[2] * t) for (f, ∂tf) in ((f1, ∂tf1),) - dtf(t) = x -> SymTensorValue(ForwardDiff.derivative(t -> get_array(f(t)(x)), t)) + dtf(t) = x -> SymTracelessTensorValue(ForwardDiff.derivative(t -> get_array(f(t)(x)), t)) tv = rand(Float64) xv = Point(rand(Float64, 2)...) From dfa71c4b95a3991bdb675f9161afac7e1497912c Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Thu, 31 Oct 2024 16:06:40 +1100 Subject: [PATCH 50/85] use MVector instead of julia arrays in low-level evaluations --- src/FESpaces/CLagrangianFESpaces.jl | 4 ++-- src/FESpaces/FESpaces.jl | 1 + src/Polynomials/ModalC0Bases.jl | 4 ++-- src/Polynomials/MonomialBases.jl | 8 ++++---- src/Polynomials/Polynomials.jl | 1 + src/ReferenceFEs/LagrangianDofBases.jl | 4 ++-- src/ReferenceFEs/ReferenceFEs.jl | 1 + 7 files changed, 13 insertions(+), 10 deletions(-) diff --git a/src/FESpaces/CLagrangianFESpaces.jl b/src/FESpaces/CLagrangianFESpaces.jl index a631c6916..706c73209 100644 --- a/src/FESpaces/CLagrangianFESpaces.jl +++ b/src/FESpaces/CLagrangianFESpaces.jl @@ -218,7 +218,7 @@ function _generate_node_to_dof_glue_component_major( node_and_comp_to_dof = Vector{T}(undef,nnodes) nfree_dofs = 0 ndiri_dofs = 0 - m = zeros(Int32, ncomps) + m = zero(MVector{ncomps, Int32}) for (node,tag) in enumerate(node_to_tag) if tag == UNSET for comp in 1:ncomps @@ -245,7 +245,7 @@ function _generate_node_to_dof_glue_component_major( end end end - node_and_comp_to_dof[node] = T(m...) + node_and_comp_to_dof[node] = Tuple(m) end glue = NodeToDofGlue( free_dof_to_node, diff --git a/src/FESpaces/FESpaces.jl b/src/FESpaces/FESpaces.jl index 31d446d1e..b42054859 100644 --- a/src/FESpaces/FESpaces.jl +++ b/src/FESpaces/FESpaces.jl @@ -10,6 +10,7 @@ using Test using FillArrays using SparseArrays using LinearAlgebra +using StaticArrays using ForwardDiff using Gridap.Helpers diff --git a/src/Polynomials/ModalC0Bases.jl b/src/Polynomials/ModalC0Bases.jl index 834b742cc..b98b6515f 100644 --- a/src/Polynomials/ModalC0Bases.jl +++ b/src/Polynomials/ModalC0Bases.jl @@ -393,7 +393,7 @@ end @inline function _set_value_mc0!(v::AbstractVector{V},s::T,k,l) where {V,T} ncomp = num_indep_components(V) - m = zeros(T,ncomp) + m = zero(MVector{ncomp,T}) z = zero(T) js = 1:ncomp for j in js @@ -402,7 +402,7 @@ end end @inbounds m[j] = s i = k+l*(j-1) - @inbounds v[i] = V(m...) + @inbounds v[i] = Tuple(m) end k+1 end diff --git a/src/Polynomials/MonomialBases.jl b/src/Polynomials/MonomialBases.jl index 502568ba0..25ccd9a36 100644 --- a/src/Polynomials/MonomialBases.jl +++ b/src/Polynomials/MonomialBases.jl @@ -406,16 +406,16 @@ function _evaluate_nd!( end function _set_value!(v::AbstractVector{V},s::T,k) where {V,T} - ncomp = num_indep_components(V) - m = zeros(T,ncomp) + ncomp::Int = num_indep_components(V) + m = zero(MVector{ncomp,T}) z = zero(T) - js = 1:ncomp + js = SOneTo(ncomp)#1:ncomp for j in js for i in js @inbounds m[i] = z end m[j] = s - v[k] = V(m...) + v[k] = Tuple(m) k += 1 end k diff --git a/src/Polynomials/Polynomials.jl b/src/Polynomials/Polynomials.jl index 826e6f07f..d2c622288 100644 --- a/src/Polynomials/Polynomials.jl +++ b/src/Polynomials/Polynomials.jl @@ -9,6 +9,7 @@ module Polynomials using DocStringExtensions using LinearAlgebra: mul! +using StaticArrays using Gridap.Helpers using Gridap.Arrays using Gridap.TensorValues diff --git a/src/ReferenceFEs/LagrangianDofBases.jl b/src/ReferenceFEs/LagrangianDofBases.jl index 28666c197..cd882aa10 100644 --- a/src/ReferenceFEs/LagrangianDofBases.jl +++ b/src/ReferenceFEs/LagrangianDofBases.jl @@ -74,7 +74,7 @@ function _generate_dof_layout_node_major(::Type{T},nnodes::Integer) where T<:Mul dof_to_comp = zeros(Int,ndofs) dof_to_node = zeros(Int,ndofs) node_and_comp_to_dof = Vector{V}(undef,nnodes) - m = zeros(Int,ncomps) + m = zero(MVector{ncomps,Int}) for node in 1:nnodes for comp in 1:ncomps o = nnodes*(comp-1) @@ -83,7 +83,7 @@ function _generate_dof_layout_node_major(::Type{T},nnodes::Integer) where T<:Mul dof_to_node[dof] = node m[comp] = dof end - node_and_comp_to_dof[node] = V(m...) + node_and_comp_to_dof[node] = Tuple(m) end (dof_to_node, dof_to_comp, node_and_comp_to_dof) end diff --git a/src/ReferenceFEs/ReferenceFEs.jl b/src/ReferenceFEs/ReferenceFEs.jl index dc3d56205..ab0dd1e91 100644 --- a/src/ReferenceFEs/ReferenceFEs.jl +++ b/src/ReferenceFEs/ReferenceFEs.jl @@ -8,6 +8,7 @@ module ReferenceFEs using Test using DocStringExtensions using LinearAlgebra +using StaticArrays using Combinatorics using FillArrays using ..Gridap From 0db27ac0a2655787ac40561e2f10c271fb869f6d Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Mon, 4 Nov 2024 10:21:39 +1100 Subject: [PATCH 51/85] revert divergence added constraints --- src/Fields/AutoDiff.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Fields/AutoDiff.jl b/src/Fields/AutoDiff.jl index 1aadf2d09..b3b39b657 100644 --- a/src/Fields/AutoDiff.jl +++ b/src/Fields/AutoDiff.jl @@ -78,7 +78,7 @@ function divergence(f::Function,x::Point) divergence(f,x,return_value(f,x)) end -function divergence(f::Function,x::Point{D},fx::VectorValue{D}) where D +function divergence(f::Function,x::Point,fx::VectorValue) tr(gradient(f,x,fx)) end @@ -92,7 +92,7 @@ function divergence(f::Function,x::Point{D},fx::S) where S<:MultiValue{Tuple{D,A TensorValue{A,B,T}( ntuple(k -> sum(i-> a[(k-1)*D+i,i], 1:D),A*B) ) end -function divergence(f::Function,x::Point{2},fx::TensorValue{2,2}) +function divergence(f::Function,x::Point,fx::TensorValue{2,2}) g(x) = SVector(f(x).data) a = ForwardDiff.jacobian(g,get_array(x)) VectorValue( @@ -101,7 +101,7 @@ function divergence(f::Function,x::Point{2},fx::TensorValue{2,2}) ) end -function divergence(f::Function,x::Point{3},fx::TensorValue{3,3}) +function divergence(f::Function,x::Point,fx::TensorValue{3,3}) g(x) = SVector(f(x).data) a = ForwardDiff.jacobian(g,get_array(x)) VectorValue( From 5a9aa7e488a37814ff71e15af340d6808a2cb50b Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 5 Nov 2024 11:42:56 +1100 Subject: [PATCH 52/85] Deactivated unbound-args tests --- test/Aqua.jl | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/test/Aqua.jl b/test/Aqua.jl index 38fd18af5..361fa29be 100644 --- a/test/Aqua.jl +++ b/test/Aqua.jl @@ -1,3 +1,4 @@ +module AquaTests using Gridap using Aqua @@ -5,4 +6,28 @@ using Aqua Aqua.test_all( Gridap, ambiguities = false, + unbound_args = false ) + +""" +Comment: Ambiguities + +I think all ambiguities are either false positives or never used in practice... I've seen +other packages set `ambiguities = false` as well, so I think it's fine to do so. +""" + +""" +Comment: Unbound Args + +We do have some unbound type warnings. However, these are calls which are never executed in +the code. + +They mostly involve things like `f(a::T...) where T`, which trigger the warning +in the case were the function `f` is called with no arguments. This can be fixed as described +in https://juliatesting.github.io/Aqua.jl/stable/unbound_args/#test_unbound_args, but it is quite +a pain to do so... + +I guess something to think about in the future. +""" + +end \ No newline at end of file From 0652dc2423984fe742d8706c52f06aeb5d833042 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 5 Nov 2024 11:44:25 +1100 Subject: [PATCH 53/85] Fixed undefined exports --- src/Geometry/Geometry.jl | 2 -- src/ODEs/ODEs.jl | 2 -- src/ReferenceFEs/ReferenceFEs.jl | 2 -- 3 files changed, 6 deletions(-) diff --git a/src/Geometry/Geometry.jl b/src/Geometry/Geometry.jl index 8723502eb..471b420ff 100644 --- a/src/Geometry/Geometry.jl +++ b/src/Geometry/Geometry.jl @@ -97,8 +97,6 @@ export is_oriented export is_regular export expand_cell_data export compress_cell_data -export compress_contributions -export compress_ids export UnstructuredGridTopology diff --git a/src/ODEs/ODEs.jl b/src/ODEs/ODEs.jl index daec78a48..30f294d5d 100644 --- a/src/ODEs/ODEs.jl +++ b/src/ODEs/ODEs.jl @@ -72,8 +72,6 @@ export StageOperator export NonlinearStageOperator export LinearStageOperator -export massless_residual_weights - include("ODESolvers.jl") export ODESolver diff --git a/src/ReferenceFEs/ReferenceFEs.jl b/src/ReferenceFEs/ReferenceFEs.jl index ab0dd1e91..24cf3d310 100644 --- a/src/ReferenceFEs/ReferenceFEs.jl +++ b/src/ReferenceFEs/ReferenceFEs.jl @@ -104,12 +104,10 @@ export get_face_moments export get_face_nodes_dofs export get_nodes export evaluate! -export evaluate_dof export return_cache export return_type export test_dof export test_dof_array -# export evaluate_dof_array export ReferenceFE export ReferenceFEName From 45ff3e39db01766eb695c5b54c66cdc3ea094262 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 5 Nov 2024 11:57:56 +1100 Subject: [PATCH 54/85] Fixed unbound dependencies by adding compats for Julia base libs to 1 --- Project.toml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 49303019f..d185b8358 100644 --- a/Project.toml +++ b/Project.toml @@ -46,13 +46,18 @@ ForwardDiff = "0.10.10" JLD2 = "0.1.11, 0.3, 0.4, 0.5" JSON = "0.21.0" LineSearches = "7.0.1" +LinearAlgebra = "1" NLsolve = "4.3.0" NearestNeighbors = "0.4.8" PolynomialBases = "0.4.12" Preferences = "1.4" QuadGK = "2.3.1, 2.4" +Random = "1" +SparseArrays = "1" SparseMatricesCSR = "0.6.4" StaticArrays = "0.12.1, 1.0" +Statistics = "1" +Test = "1" WriteVTK = "1.12.0" julia = "1.3" @@ -61,4 +66,4 @@ Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [targets] -test = ["Aqua","Test"] +test = ["Aqua", "Test"] From e4645af772ea8319a484b4be567e1e40253c3109 Mon Sep 17 00:00:00 2001 From: Jordi Manyer Fuertes Date: Tue, 5 Nov 2024 13:42:39 +1100 Subject: [PATCH 55/85] Update NEWS.md --- NEWS.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/NEWS.md b/NEWS.md index 6a84321ed..1130e10a2 100644 --- a/NEWS.md +++ b/NEWS.md @@ -20,6 +20,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Added `AbstractSymTensorValue`, an abstract type for second order symmetric tensors, and `SymTracelessTensorValue` (aliased to `QTensorValue`), a type for traceless symmetric tensors. `SymTensorValue` is now subtype of `AbstractSymTensorValue`. - A convergence test for Poisson problem of `QTensorValue` unknown field validates the implementation. + ### Fixed + + - Fixed constructor of RungeKutta with only one solver. Since PR[#999](https://github.com/gridap/Gridap.jl/pull/999). + ## [0.18.6] - 2024-08-29 ### Fixed From 60fab730b9425f4735ce783a60d5bb79b35ae258 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 5 Nov 2024 14:27:04 +1100 Subject: [PATCH 56/85] Made benchmarks extendable --- benchmark/README.md | 53 +++++++++++++++++++++++++++++++++ benchmark/benchmarks.jl | 31 +++++-------------- benchmark/bm/bm_assembly.jl | 59 +++++++++++++++++++++++++++++++++++++ benchmark/drivers.jl | 22 -------------- 4 files changed, 120 insertions(+), 45 deletions(-) create mode 100644 benchmark/README.md create mode 100644 benchmark/bm/bm_assembly.jl delete mode 100644 benchmark/drivers.jl diff --git a/benchmark/README.md b/benchmark/README.md new file mode 100644 index 000000000..989a1865f --- /dev/null +++ b/benchmark/README.md @@ -0,0 +1,53 @@ +# Benchmarking Suite + +The following benchmarking suite uses `PkgBenchmark.jl` and `BenchmarkTools.jl` to compare the performance of different branches of Gridap. + +## Running the benchmarks + +### Running as CI job + +The benchmarks are setup as a manual Github Actions workflow in `.github/workflows/benchmark.yml`. To run the workflow, you will need administrator access to the Gridap repository, then follow instructions [here](https://docs.github.com/en/actions/managing-workflow-runs-and-deployments/managing-workflow-runs/manually-running-a-workflow). + +The workflow will has two inputs: `target` and `base`, which are the branches/tags/commits you want to compare. The workflow will run the benchmarks on the `target` branch and compare them with the `base` branch (`master` by default). + +### Running Locally + +To run the benchmarks locally, you can have a look at the [documentation for `PkgBenchmark.jl`](https://juliaci.github.io/PkgBenchmark.jl/stable/run_benchmarks/). + +Alternatively, you can run the CI script locally from a local copy of the repository. From the Gridap root directory, run the following commands: + +```bash +# Instantiate Gridap and Gridap/benchmark +julia -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()' +julia --project=benchmark/ -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()' +# Run the benchmarks +export BM_TARGET = "your target branch" +export BM_BASE = "your reference branch" +julia --project=benchmark/ --color=yes benchmark/run_benchmarks.jl +``` + +where `BM_TARGET` and `BM_BASE` are the branches/tags/commits you want to compare. + +## Adding a new benchmark + +To add a new benchmark suite `xyx`, create a new file `bm/bm_xyx.jl` that with the following structure: + +```julia +module bm_xyx + +using PkgBenchmark, BenchmarkTools + +const SUITE = BenchmarkGroup() + +[... Add your benchmarks here ...] + +end # module +``` + +Then, add the following line to the `benchmarks.jl` file: + +```julia +@include_bm "bm_xyz" +``` + +This should automatically include the new benchmarks into the global benchmarking suite. diff --git a/benchmark/benchmarks.jl b/benchmark/benchmarks.jl index e14106e0a..ad8aa0aa6 100644 --- a/benchmark/benchmarks.jl +++ b/benchmark/benchmarks.jl @@ -1,29 +1,14 @@ using BenchmarkTools +using PkgBenchmark using Gridap -include("drivers.jl") +macro include_bm(SUITE,name) + quote + include("bm/$($name).jl") + SUITE["$($name)"] = $(Symbol(name)).SUITE + end +end const SUITE = BenchmarkGroup() -for (D,ncells) in [(2,20),(3,8)] - for order in [1,2,3] - basis_cases = [ - ("lagrangian",lagrangian,Float64,order), - ("vector_lagragian",lagrangian,VectorValue{D,Float64},order), - ("raviart_thomas",raviart_thomas,Float64,order-1), - ] - for (basis_name,basis,T,degree) in basis_cases - biform_cases = [ - ("mass",mass,2*order), - ("laplacian",laplacian,2*(order-1)), - ] - for (biform_name,biform,qdegree) in biform_cases - reffe = ReferenceFE(basis, T, degree) - name = "assembly_$(D)D_$(basis_name)_$(biform_name)_$(order)" - SUITE[name] = @benchmarkable bm_matrix_assembly( - $(D),$(ncells),$(reffe),$(qdegree),$(biform) - ) - end - end - end -end +@include_bm SUITE "bm_assembly" diff --git a/benchmark/bm/bm_assembly.jl b/benchmark/bm/bm_assembly.jl new file mode 100644 index 000000000..80b378163 --- /dev/null +++ b/benchmark/bm/bm_assembly.jl @@ -0,0 +1,59 @@ +module bm_assembly + +using PkgBenchmark, BenchmarkTools +using Gridap +using Gridap.Geometry + +mass(u,v,dΩ) = ∫(u⋅v)dΩ +laplacian(u,v,dΩ) = ∫(∇(u)⊙∇(v))dΩ +graddiv(u,v,dΩ) = ∫((∇⋅u)⋅(∇⋅v))dΩ + +function driver( + Ω :: Triangulation, + reffe :: Tuple, + qdegree :: Integer, + biform :: Function +) + model = get_background_model(Ω) + dΩ = Measure(Ω,qdegree) + V = TestFESpace(model, reffe) + a(u,v) = biform(u,v,dΩ) + A = assemble_matrix(a, V, V) +end + +const SUITE = BenchmarkGroup() + +for (D,n) in [(2,10),(3,6)] + domain = Tuple([repeat([0,1], D)...]) + partition = Tuple(fill(n, D)) + model = UnstructuredDiscreteModel(CartesianDiscreteModel(domain, partition)) + trian_cases = [ + ("bulk",Triangulation(model)), + ("view",Triangulation(model,collect(1:div(n^D,2)))), # About half of the cells + ] + for (trian_name, trian) in trian_cases + for order in [1,2,3] + basis_cases = [ + ("lagrangian",lagrangian,Float64,order), + ("vector_lagragian",lagrangian,VectorValue{D,Float64},order), + ("raviart_thomas",raviart_thomas,Float64,order-1), + ] + for (basis_name,basis,T,degree) in basis_cases + biform_cases = [ + ("mass",mass,2*order), + ("laplacian",laplacian,2*(order-1)), + ("graddiv",graddiv,2*(order-1)), + ] + for (biform_name,biform,qdegree) in biform_cases + reffe = ReferenceFE(basis, T, degree) + name = "assembly_$(D)D_$(trian_name)_$(basis_name)_$(biform_name)_$(order)" + SUITE[name] = @benchmarkable driver( + $(trian),$(reffe),$(qdegree),$(biform) + ) + end + end + end + end +end + +end # module \ No newline at end of file diff --git a/benchmark/drivers.jl b/benchmark/drivers.jl deleted file mode 100644 index 6ac5bf6a5..000000000 --- a/benchmark/drivers.jl +++ /dev/null @@ -1,22 +0,0 @@ - -using Gridap.Geometry - -mass(u,v,dΩ) = ∫(u⋅v)dΩ -laplacian(u,v,dΩ) = ∫(∇(u)⊙∇(v))dΩ - -function bm_matrix_assembly( - D :: Integer, - n :: Integer, - reffe :: Tuple, - qdegree :: Integer, - biform :: Function -) - domain = Tuple([repeat([0,1], D)...]) - partition = Tuple(fill(n, D)) - model = UnstructuredDiscreteModel(CartesianDiscreteModel(domain, partition)) - Ω = Triangulation(model) - dΩ = Measure(Ω,qdegree) - V = TestFESpace(model, reffe) - a(u,v) = biform(u,v,dΩ) - A = assemble_matrix(a, V, V) -end From fa0e471c672e0da64896dbde6320ee71334ed964 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 5 Nov 2024 14:35:30 +1100 Subject: [PATCH 57/85] Minor --- benchmark/run_benchmarks.jl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/benchmark/run_benchmarks.jl b/benchmark/run_benchmarks.jl index db3cc5fa3..0db9aedab 100644 --- a/benchmark/run_benchmarks.jl +++ b/benchmark/run_benchmarks.jl @@ -12,15 +12,15 @@ config_kwargs = (; ) if haskey(ENV,"BM_TARGET") # Provided by CI workflow - target = BenchmarkConfig(config_kwargs..., id = ENV["BM_TARGET"]) + target = BenchmarkConfig(;config_kwargs..., id = ENV["BM_TARGET"]) else # Default to the current commit - target = BenchmarkConfig(config_kwargs...) + target = BenchmarkConfig(;config_kwargs...) end if haskey(ENV,"BM_BASE") # Provided by CI workflow - base = BenchmarkConfig(config_kwargs..., id = ENV["BM_BASE"]) + base = BenchmarkConfig(;config_kwargs..., id = ENV["BM_BASE"]) else # Default to master - base = BenchmarkConfig(config_kwargs..., id = "master") + base = BenchmarkConfig(;config_kwargs..., id = "master") end results = judge(Gridap, target, base) From 8299da7d78f62b055cf46cdbb580e098da44437d Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 5 Nov 2024 14:53:21 +1100 Subject: [PATCH 58/85] Trigger CI for the first time --- .github/workflows/benchmark.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index e7c29485a..8581f6420 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -12,6 +12,7 @@ on: required: true default: 'master' type: string + pull_request: jobs: benchmark: From 040e278cda47553508a78f33eec3fef50c27317c Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 5 Nov 2024 14:56:47 +1100 Subject: [PATCH 59/85] Remove pr trigger now that the workflow is registered --- .github/workflows/benchmark.yml | 1 - benchmark/README.md | 6 ++++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 8581f6420..e7c29485a 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -12,7 +12,6 @@ on: required: true default: 'master' type: string - pull_request: jobs: benchmark: diff --git a/benchmark/README.md b/benchmark/README.md index 989a1865f..e2b35fd2d 100644 --- a/benchmark/README.md +++ b/benchmark/README.md @@ -10,6 +10,12 @@ The benchmarks are setup as a manual Github Actions workflow in `.github/workflo The workflow will has two inputs: `target` and `base`, which are the branches/tags/commits you want to compare. The workflow will run the benchmarks on the `target` branch and compare them with the `base` branch (`master` by default). +You can also run the workflow using Github CLI and the following command: + +```bash +gh workflow run benchmark.yml -f target=your_target_branch -f base=your_base_branch +``` + ### Running Locally To run the benchmarks locally, you can have a look at the [documentation for `PkgBenchmark.jl`](https://juliaci.github.io/PkgBenchmark.jl/stable/run_benchmarks/). From c657b3a9049858c5297ba81418664228bc0c556a Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 5 Nov 2024 15:15:29 +1100 Subject: [PATCH 60/85] Updated NEWS.md --- NEWS.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/NEWS.md b/NEWS.md index aae1f43b6..72dd89b72 100644 --- a/NEWS.md +++ b/NEWS.md @@ -19,11 +19,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Implemented automatic differentiation `gradient` and `laplacian` for second order tensor, and `divergence` for third order tensors. - Added `AbstractSymTensorValue`, an abstract type for second order symmetric tensors, and `SymTracelessTensorValue` (aliased to `QTensorValue`), a type for traceless symmetric tensors. `SymTensorValue` is now subtype of `AbstractSymTensorValue`. - A convergence test for Poisson problem of `QTensorValue` unknown field validates the implementation. -- Added support for benchmarking, through PkgBenchmark.jl. Since PR[#1039](https://github.com/gridap/Gridap.jl/pull/1039). +- Added support for benchmarking, through `PkgBenchmark.jl`. Since PR[#1039](https://github.com/gridap/Gridap.jl/pull/1039). +- Added code quality tests, through `Aqua.jl`. Since PR[#1039](https://github.com/gridap/Gridap.jl/pull/1039). - ### Fixed +### Fixed - - Fixed constructor of RungeKutta with only one solver. Since PR[#999](https://github.com/gridap/Gridap.jl/pull/999). +- Fixed constructor of RungeKutta with only one solver. Since PR[#999](https://github.com/gridap/Gridap.jl/pull/999). ## [0.18.6] - 2024-08-29 From ffb8cfc2563628f94d0eb714cfd5090b972cb54a Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 6 Nov 2024 10:02:40 +1100 Subject: [PATCH 61/85] CI overhaul - Updated actions to newer versions - Expanded tests to windows and macos - Consolidated x64 and x86 tests in a single file - Moved documentation building to a new file - New Downgrade workflow - New Invalidations workflow --- .github/workflows/Documenter.yml | 26 +++++++++ .github/workflows/Downgrade.yml | 36 ++++++++++++ .github/workflows/Invalidations.yml | 37 ++++++++++++ .github/workflows/benchmark.yml | 5 ++ .github/workflows/ci.yml | 91 +++++++++++++---------------- .github/workflows/ci_x86.yml | 39 ------------- 6 files changed, 143 insertions(+), 91 deletions(-) create mode 100644 .github/workflows/Documenter.yml create mode 100644 .github/workflows/Downgrade.yml create mode 100644 .github/workflows/Invalidations.yml delete mode 100644 .github/workflows/ci_x86.yml diff --git a/.github/workflows/Documenter.yml b/.github/workflows/Documenter.yml new file mode 100644 index 000000000..9ba1ec69d --- /dev/null +++ b/.github/workflows/Documenter.yml @@ -0,0 +1,26 @@ +name: Documentation + +on: [push, pull_request] + +# Cancel redundant CI tests automatically +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: julia-actions/setup-julia@v2 + with: + version: '1.10' + - run: | + julia --project=docs -e ' + using Pkg + Pkg.develop(PackageSpec(path=pwd())) + Pkg.instantiate()' + - run: julia --project=docs docs/make.jl + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }} diff --git a/.github/workflows/Downgrade.yml b/.github/workflows/Downgrade.yml new file mode 100644 index 000000000..5ad50a817 --- /dev/null +++ b/.github/workflows/Downgrade.yml @@ -0,0 +1,36 @@ +name: Downgrade + +on: [pull_request, workflow_dispatch] + +# Cancel redundant CI tests automatically +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + downgrade_test: + name: Downgrade ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + version: + - '1.10' + os: + - ubuntu-latest + arch: + - x64 + steps: + - uses: actions/checkout@v4 + - uses: julia-actions/setup-julia@v2 + with: + version: ${{ matrix.version }} + arch: ${{ matrix.arch }} + - uses: julia-actions/cache@v2 + - uses: julia-actions/julia-downgrade-compat@v1 + with: # As per documentation, we exclude packages within the Julia standard library + skip: LinearAlgebra,SparseArrays,Random,Statistics,Test + - uses: julia-actions/julia-buildpkg@v1 + - uses: julia-actions/julia-runtest@v1 + with: + coverage: false diff --git a/.github/workflows/Invalidations.yml b/.github/workflows/Invalidations.yml new file mode 100644 index 000000000..d36b45b52 --- /dev/null +++ b/.github/workflows/Invalidations.yml @@ -0,0 +1,37 @@ +name: Invalidations + +on: [pull_request] + +# Cancel redundant CI tests automatically +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + invalidations: + # Only run on PRs to the default branch. + if: github.base_ref == github.event.repository.default_branch + runs-on: ubuntu-latest + steps: + - uses: julia-actions/setup-julia@v2 + with: + version: '1' + - uses: actions/checkout@v4 + - uses: julia-actions/julia-buildpkg@v1 + - uses: julia-actions/julia-invalidations@v1 + id: invs_pr + + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.repository.default_branch }} + - uses: julia-actions/julia-buildpkg@v1 + - uses: julia-actions/julia-invalidations@v1 + id: invs_default + + - name: Report invalidation counts + run: | + echo "Invalidations on default branch: ${{ steps.invs_default.outputs.total }} (${{ steps.invs_default.outputs.deps }} via deps)" >> $GITHUB_STEP_SUMMARY + echo "This branch: ${{ steps.invs_pr.outputs.total }} (${{ steps.invs_pr.outputs.deps }} via deps)" >> $GITHUB_STEP_SUMMARY + - name: Check if the PR does increase number of invalidations + if: steps.invs_pr.outputs.total > steps.invs_default.outputs.total + run: exit 1 diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index e7c29485a..4617dfc91 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -26,6 +26,11 @@ jobs: - x64 steps: - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - run: | + git fetch --tags + git branch --create-reflog main origin/main - uses: julia-actions/setup-julia@v2 with: version: ${{ matrix.version }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6c0b104cb..5d6d65676 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,5 +1,12 @@ name: CI -on: [push, pull_request] + +on: [push, pull_request, workflow_dispatch] + +# Cancel redundant CI tests automatically +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: test: name: Tests ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} @@ -8,35 +15,47 @@ jobs: fail-fast: false matrix: version: - - '1.8' + - '1.10' os: - ubuntu-latest + - windows-latest + - macos-latest arch: - x64 + - x86 + - aarch64 + exclude: # Exclude some combinations + - os: ubuntu-latest + arch: aarch64 + - os: windows-latest + arch: aarch64 + - os: macos-latest + arch: x64 + - os: macos-latest + arch: x86 + include: # Legacy support for old Julia versions + - version: '1.8' + os: ubuntu-latest + arch: x64 + - version: '1.9' + os: ubuntu-latest + arch: x64 steps: - - uses: actions/checkout@v2 - - uses: julia-actions/setup-julia@v1 + - uses: actions/checkout@v4 + - uses: julia-actions/setup-julia@v2 with: version: ${{ matrix.version }} arch: ${{ matrix.arch }} - - uses: actions/cache@v1 - env: - cache-name: cache-artifacts - with: - path: ~/.julia/artifacts - key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} - restore-keys: | - ${{ runner.os }}-test-${{ env.cache-name }}- - ${{ runner.os }}-test- - ${{ runner.os }}- + - uses: julia-actions/cache@v2 - uses: julia-actions/julia-buildpkg@v1 - uses: julia-actions/julia-runtest@v1 - uses: julia-actions/julia-processcoverage@v1 - uses: codecov/codecov-action@v4 with: file: lcov.info + verbose: true token: ${{ secrets.CODECOV_TOKEN }} - + drivers: name: Drivers ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} runs-on: ${{ matrix.os }} @@ -44,54 +63,22 @@ jobs: fail-fast: false matrix: version: - - '1.8' + - '1.10' os: - ubuntu-latest arch: - x64 steps: - - uses: actions/checkout@v2 - - uses: julia-actions/setup-julia@v1 + - uses: actions/checkout@v4 + - uses: julia-actions/setup-julia@v2 with: version: ${{ matrix.version }} arch: ${{ matrix.arch }} - - uses: actions/cache@v1 - env: - cache-name: cache-artifacts - with: - path: ~/.julia/artifacts - key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} - restore-keys: | - ${{ runner.os }}-test-${{ env.cache-name }}- - ${{ runner.os }}-test- - ${{ runner.os }}- + - uses: julia-actions/cache@v2 - uses: julia-actions/julia-buildpkg@v1 - run: | julia --color=yes --project=. --check-bounds=yes --depwarn=error -e ' using Pkg; Pkg.instantiate()' - run: | julia --color=yes --project=. --check-bounds=yes --depwarn=error -e ' - (1,) .== 1; include("test/GridapTests/runtests.jl")' - - docs: - name: Documentation - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: julia-actions/setup-julia@v1 - with: - version: '1.8' - - run: | - julia --project=docs -e ' - using Pkg - Pkg.develop(PackageSpec(path=pwd())) - Pkg.instantiate()' -# - run: | -# julia --project=docs -e ' -# using Documenter: doctest -# using Gridap -# doctest(Gridap)' - - run: julia --project=docs docs/make.jl - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }} + include("test/GridapTests/runtests.jl")' diff --git a/.github/workflows/ci_x86.yml b/.github/workflows/ci_x86.yml deleted file mode 100644 index 03ca2ab11..000000000 --- a/.github/workflows/ci_x86.yml +++ /dev/null @@ -1,39 +0,0 @@ -name: CI_X86 -on: - push: - branches: - - master - pull_request: - branches: - - master -jobs: - test: - name: Tests ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - version: - - '1.8' - os: - - ubuntu-latest - arch: - - x86 - steps: - - uses: actions/checkout@v2 - - uses: julia-actions/setup-julia@v1 - with: - version: ${{ matrix.version }} - arch: ${{ matrix.arch }} - - uses: actions/cache@v1 - env: - cache-name: cache-artifacts - with: - path: ~/.julia/artifacts - key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} - restore-keys: | - ${{ runner.os }}-test-${{ env.cache-name }}- - ${{ runner.os }}-test- - ${{ runner.os }}- - - uses: julia-actions/julia-buildpkg@v1 - - uses: julia-actions/julia-runtest@v1 From 22ca48537745d0bdf77ce79a55c6dd950df5e21c Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 6 Nov 2024 13:10:05 +1100 Subject: [PATCH 62/85] Downgrade change: Restricted JDL2 compat to 0.2 or higher to align with DataStructures. --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index d185b8358..aa1307a23 100644 --- a/Project.toml +++ b/Project.toml @@ -43,7 +43,7 @@ FastGaussQuadrature = "0.4.2, 1" FileIO = "1.2.2, 1.3, 1.4" FillArrays = "0.8.4, 0.9, 0.10, 0.11, 0.12, 0.13, 1" ForwardDiff = "0.10.10" -JLD2 = "0.1.11, 0.3, 0.4, 0.5" +JLD2 = "0.2, 0.3, 0.4, 0.5" JSON = "0.21.0" LineSearches = "7.0.1" LinearAlgebra = "1" From fd5d59f397acb33613d0110727259486267a3945 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 6 Nov 2024 15:12:51 +1100 Subject: [PATCH 63/85] Removed invalidations --- .github/workflows/Invalidations.yml | 37 ----------------------------- 1 file changed, 37 deletions(-) delete mode 100644 .github/workflows/Invalidations.yml diff --git a/.github/workflows/Invalidations.yml b/.github/workflows/Invalidations.yml deleted file mode 100644 index d36b45b52..000000000 --- a/.github/workflows/Invalidations.yml +++ /dev/null @@ -1,37 +0,0 @@ -name: Invalidations - -on: [pull_request] - -# Cancel redundant CI tests automatically -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - invalidations: - # Only run on PRs to the default branch. - if: github.base_ref == github.event.repository.default_branch - runs-on: ubuntu-latest - steps: - - uses: julia-actions/setup-julia@v2 - with: - version: '1' - - uses: actions/checkout@v4 - - uses: julia-actions/julia-buildpkg@v1 - - uses: julia-actions/julia-invalidations@v1 - id: invs_pr - - - uses: actions/checkout@v4 - with: - ref: ${{ github.event.repository.default_branch }} - - uses: julia-actions/julia-buildpkg@v1 - - uses: julia-actions/julia-invalidations@v1 - id: invs_default - - - name: Report invalidation counts - run: | - echo "Invalidations on default branch: ${{ steps.invs_default.outputs.total }} (${{ steps.invs_default.outputs.deps }} via deps)" >> $GITHUB_STEP_SUMMARY - echo "This branch: ${{ steps.invs_pr.outputs.total }} (${{ steps.invs_pr.outputs.deps }} via deps)" >> $GITHUB_STEP_SUMMARY - - name: Check if the PR does increase number of invalidations - if: steps.invs_pr.outputs.total > steps.invs_default.outputs.total - run: exit 1 From 18c72549d276e728cfcb93fbbdfa191a6fe82945 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 6 Nov 2024 15:23:54 +1100 Subject: [PATCH 64/85] Updated compats for QuadGK --- Project.toml | 2 +- src/CellData/CellData.jl | 7 ++++--- src/CellData/DomainContributions.jl | 1 - 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Project.toml b/Project.toml index aa1307a23..61a3a082b 100644 --- a/Project.toml +++ b/Project.toml @@ -51,7 +51,7 @@ NLsolve = "4.3.0" NearestNeighbors = "0.4.8" PolynomialBases = "0.4.12" Preferences = "1.4" -QuadGK = "2.3.1, 2.4" +QuadGK = "2.4, 2.5, 2.6, 2.7, 2.8, 2.9, 2.20, 2.11" Random = "1" SparseArrays = "1" SparseMatricesCSR = "0.6.4" diff --git a/src/CellData/CellData.jl b/src/CellData/CellData.jl index 19624ec27..d96ecca48 100644 --- a/src/CellData/CellData.jl +++ b/src/CellData/CellData.jl @@ -9,6 +9,10 @@ using Test using DocStringExtensions using FillArrays +using NearestNeighbors +using StaticArrays +using DataStructures + using Gridap.Helpers using Gridap.Algebra using Gridap.Arrays @@ -17,9 +21,6 @@ using Gridap.Fields using Gridap.ReferenceFEs using Gridap.Geometry -using NearestNeighbors -using StaticArrays - import Gridap.Arrays: lazy_append import Gridap.Arrays: get_array import Gridap.Arrays: evaluate! diff --git a/src/CellData/DomainContributions.jl b/src/CellData/DomainContributions.jl index 3487f7c12..fbe9358d8 100644 --- a/src/CellData/DomainContributions.jl +++ b/src/CellData/DomainContributions.jl @@ -1,4 +1,3 @@ -using DataStructures """ """ From 90a0ca1d137eca7002d48d4acdde96cd3e3635ab Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 6 Nov 2024 16:01:04 +1100 Subject: [PATCH 65/85] Updated compats for NLSolve --- .github/workflows/Documenter.yml | 15 +++++++-------- Project.toml | 2 +- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/.github/workflows/Documenter.yml b/.github/workflows/Documenter.yml index 9ba1ec69d..b1c0a4f15 100644 --- a/.github/workflows/Documenter.yml +++ b/.github/workflows/Documenter.yml @@ -15,12 +15,11 @@ jobs: - uses: julia-actions/setup-julia@v2 with: version: '1.10' - - run: | - julia --project=docs -e ' - using Pkg - Pkg.develop(PackageSpec(path=pwd())) - Pkg.instantiate()' - - run: julia --project=docs docs/make.jl + - uses: julia-actions/cache@v2 + - name: Install dependencies + run: julia --project=docs/ -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()' + - name: Build and deploy env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # If authenticating with GitHub Actions token + DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }} # If authenticating with SSH deploy key + run: julia --project=docs/ docs/make.jl diff --git a/Project.toml b/Project.toml index 61a3a082b..90a3f942f 100644 --- a/Project.toml +++ b/Project.toml @@ -47,7 +47,7 @@ JLD2 = "0.2, 0.3, 0.4, 0.5" JSON = "0.21.0" LineSearches = "7.0.1" LinearAlgebra = "1" -NLsolve = "4.3.0" +NLsolve = "4.4" NearestNeighbors = "0.4.8" PolynomialBases = "0.4.12" Preferences = "1.4" From 8fb2ccf2ae7639101d63518c62e9f843b2c0f574 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 6 Nov 2024 16:06:50 +1100 Subject: [PATCH 66/85] Updated compats for PolynomialBases --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 90a3f942f..6861c8f81 100644 --- a/Project.toml +++ b/Project.toml @@ -49,7 +49,7 @@ LineSearches = "7.0.1" LinearAlgebra = "1" NLsolve = "4.4" NearestNeighbors = "0.4.8" -PolynomialBases = "0.4.12" +PolynomialBases = "0.4.13" Preferences = "1.4" QuadGK = "2.4, 2.5, 2.6, 2.7, 2.8, 2.9, 2.20, 2.11" Random = "1" From 684a3e88871c64c91691dca5b2ab9fbe05a892de Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 6 Nov 2024 17:07:43 +1100 Subject: [PATCH 67/85] Updated compats for BlockArrays and FillArrays --- Project.toml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/Project.toml b/Project.toml index 6861c8f81..5cd34ede4 100644 --- a/Project.toml +++ b/Project.toml @@ -35,15 +35,15 @@ WriteVTK = "64499a7a-5c06-52f2-abe2-ccb03c286192" AbstractTrees = "0.3.3, 0.4" Aqua = "0.8" BSON = "0.2.5, 0.3" -BlockArrays = "0.12.12, 0.13, 0.14, 0.15, 0.16" +BlockArrays = "0.16, 1" Combinatorics = "1.0.0" DataStructures = "0.18.13" DocStringExtensions = "0.8.1, 0.9" FastGaussQuadrature = "0.4.2, 1" FileIO = "1.2.2, 1.3, 1.4" -FillArrays = "0.8.4, 0.9, 0.10, 0.11, 0.12, 0.13, 1" +FillArrays = "0.13, 1" ForwardDiff = "0.10.10" -JLD2 = "0.2, 0.3, 0.4, 0.5" +JLD2 = "0.4.28, 0.5" JSON = "0.21.0" LineSearches = "7.0.1" LinearAlgebra = "1" @@ -51,15 +51,15 @@ NLsolve = "4.4" NearestNeighbors = "0.4.8" PolynomialBases = "0.4.13" Preferences = "1.4" -QuadGK = "2.4, 2.5, 2.6, 2.7, 2.8, 2.9, 2.20, 2.11" +QuadGK = "2.4 - 2.11" Random = "1" SparseArrays = "1" SparseMatricesCSR = "0.6.4" -StaticArrays = "0.12.1, 1.0" +StaticArrays = "0.12.1, 1" Statistics = "1" Test = "1" -WriteVTK = "1.12.0" -julia = "1.3" +WriteVTK = "1.18" +julia = "1.6" [extras] Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" From d5f9c2711711fcc392309c591c98e91788a50b99 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 6 Nov 2024 17:12:19 +1100 Subject: [PATCH 68/85] Minor --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 5cd34ede4..1e7051447 100644 --- a/Project.toml +++ b/Project.toml @@ -51,7 +51,7 @@ NLsolve = "4.4" NearestNeighbors = "0.4.8" PolynomialBases = "0.4.13" Preferences = "1.4" -QuadGK = "2.4 - 2.11" +QuadGK = "2.4, 2.5, 2.6, 2.7, 2.8, 2.9, 2.10, 2.11" Random = "1" SparseArrays = "1" SparseMatricesCSR = "0.6.4" From 7c5a3726eb7281db2a438ca27d1940061e31f2b1 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 6 Nov 2024 17:33:02 +1100 Subject: [PATCH 69/85] Minor --- Project.toml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Project.toml b/Project.toml index 1e7051447..54aad1d2b 100644 --- a/Project.toml +++ b/Project.toml @@ -35,16 +35,16 @@ WriteVTK = "64499a7a-5c06-52f2-abe2-ccb03c286192" AbstractTrees = "0.3.3, 0.4" Aqua = "0.8" BSON = "0.2.5, 0.3" -BlockArrays = "0.16, 1" +BlockArrays = "0.16.10, 1" Combinatorics = "1.0.0" DataStructures = "0.18.13" DocStringExtensions = "0.8.1, 0.9" -FastGaussQuadrature = "0.4.2, 1" +FastGaussQuadrature = "0.4.5, 1" FileIO = "1.2.2, 1.3, 1.4" -FillArrays = "0.13, 1" -ForwardDiff = "0.10.10" +FillArrays = "0.13.8, 1" +ForwardDiff = "0.10.14" JLD2 = "0.4.28, 0.5" -JSON = "0.21.0" +JSON = "0.21" LineSearches = "7.0.1" LinearAlgebra = "1" NLsolve = "4.4" From 07b69006e23463e9dcf2d070f07f072f1d740e11 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 6 Nov 2024 17:46:24 +1100 Subject: [PATCH 70/85] Minor --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 54aad1d2b..d484cdf0c 100644 --- a/Project.toml +++ b/Project.toml @@ -47,7 +47,7 @@ JLD2 = "0.4.28, 0.5" JSON = "0.21" LineSearches = "7.0.1" LinearAlgebra = "1" -NLsolve = "4.4" +NLsolve = "4.5.1" NearestNeighbors = "0.4.8" PolynomialBases = "0.4.13" Preferences = "1.4" From 809e996b0830d44b2305cd7734ac4ceb7a7f5fe3 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 7 Nov 2024 00:51:06 +1100 Subject: [PATCH 71/85] Fixed all legacy compats --- .github/workflows/Downgrade.yml | 3 ++- Project.toml | 12 ++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/.github/workflows/Downgrade.yml b/.github/workflows/Downgrade.yml index 5ad50a817..43460c5b1 100644 --- a/.github/workflows/Downgrade.yml +++ b/.github/workflows/Downgrade.yml @@ -15,7 +15,7 @@ jobs: fail-fast: false matrix: version: - - '1.10' + - '1.8' # Needs to be lowest supported version os: - ubuntu-latest arch: @@ -26,6 +26,7 @@ jobs: with: version: ${{ matrix.version }} arch: ${{ matrix.arch }} + - run: julia -e 'using InteractiveUtils; versioninfo(verbose=true)' - uses: julia-actions/cache@v2 - uses: julia-actions/julia-downgrade-compat@v1 with: # As per documentation, we exclude packages within the Julia standard library diff --git a/Project.toml b/Project.toml index d484cdf0c..1736c1b66 100644 --- a/Project.toml +++ b/Project.toml @@ -34,16 +34,16 @@ WriteVTK = "64499a7a-5c06-52f2-abe2-ccb03c286192" [compat] AbstractTrees = "0.3.3, 0.4" Aqua = "0.8" -BSON = "0.2.5, 0.3" +BSON = "0.3.4" BlockArrays = "0.16.10, 1" Combinatorics = "1.0.0" DataStructures = "0.18.13" DocStringExtensions = "0.8.1, 0.9" FastGaussQuadrature = "0.4.5, 1" FileIO = "1.2.2, 1.3, 1.4" -FillArrays = "0.13.8, 1" +FillArrays = "0.13.10, 1" ForwardDiff = "0.10.14" -JLD2 = "0.4.28, 0.5" +JLD2 = "0.4.36, 0.5" JSON = "0.21" LineSearches = "7.0.1" LinearAlgebra = "1" @@ -55,11 +55,11 @@ QuadGK = "2.4, 2.5, 2.6, 2.7, 2.8, 2.9, 2.10, 2.11" Random = "1" SparseArrays = "1" SparseMatricesCSR = "0.6.4" -StaticArrays = "0.12.1, 1" +StaticArrays = "1.4" Statistics = "1" Test = "1" -WriteVTK = "1.18" -julia = "1.6" +WriteVTK = "1.21.1" +julia = "1.8" [extras] Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" From f4a79afca5ce526d8963763a2ea27642c51e6369 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 7 Nov 2024 00:51:25 +1100 Subject: [PATCH 72/85] Unsupported Julia 1.3 --- src/Algebra/AlgebraInterfaces.jl | 21 +-------------------- src/ODEs/TransientFESpaces.jl | 5 +---- 2 files changed, 2 insertions(+), 24 deletions(-) diff --git a/src/Algebra/AlgebraInterfaces.jl b/src/Algebra/AlgebraInterfaces.jl index 7548cd9cb..242f87327 100644 --- a/src/Algebra/AlgebraInterfaces.jl +++ b/src/Algebra/AlgebraInterfaces.jl @@ -223,26 +223,7 @@ end Matrix multiply a*b and add to result to c. Returns c. """ -function muladd!(c,a,b) - _muladd!(c,a,b) - c -end - -@static if VERSION >= v"1.3" - function _muladd!(c,a,b) - mul!(c,a,b,1,1) - end -else - function _muladd!(c,a,b) - @assert length(c) == size(a,1) - @assert length(b) == size(a,2) - @inbounds for j in 1:size(a,2) - for i in 1:size(a,1) - c[i] += a[i,j]*b[j] - end - end - end -end +muladd!(b,A,x) = mul!(b,A,x,one(eltype(b)),one(eltype(b))) """ axpy_entries!(α::Number, A::T, B::T) where {T<: AbstractMatrix} -> T diff --git a/src/ODEs/TransientFESpaces.jl b/src/ODEs/TransientFESpaces.jl index 75a55a54c..2b5d77869 100644 --- a/src/ODEs/TransientFESpaces.jl +++ b/src/ODEs/TransientFESpaces.jl @@ -149,10 +149,7 @@ Arrays.evaluate!(transient_space::FESpace, space::FESpace, t::Real) = space Arrays.evaluate(space::FESpace, t::Real) = space Arrays.evaluate(space::FESpace, t::Nothing) = space -# TODO why is this needed? -@static if VERSION >= v"1.3" - (space::FESpace)(t) = evaluate(space, t) -end +(space::FESpace)(t) = evaluate(space, t) (space::TrialFESpace)(t) = evaluate(space, t) (space::ZeroMeanFESpace)(t) = evaluate(space, t) From 541458e4036b96c919b126aa12b9725932349680 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 7 Nov 2024 01:15:16 +1100 Subject: [PATCH 73/85] Removed all rm directories for temp dirs. Note this is not necessary for Julia 1.3 and higher --- test/ArraysTests/TablesTests.jl | 2 -- test/GeometryTests/DiscreteModelsTests.jl | 2 -- test/GeometryTests/FaceLabelingsTests.jl | 2 -- test/GeometryTests/UnstructuredDiscreteModelsTests.jl | 2 -- test/GeometryTests/UnstructuredGridsTests.jl | 2 -- test/GridapTests/issue_778.jl | 2 -- test/IoTests/BsonTests.jl | 2 -- test/IoTests/JLD2Tests.jl | 2 -- test/IoTests/JsonTests.jl | 2 -- test/ReferenceFEsTests/CLagrangianRefFEsTests.jl | 2 -- test/ReferenceFEsTests/NedelecRefFEsTests.jl | 1 - test/VisualizationTests/VtkTests.jl | 3 --- 12 files changed, 24 deletions(-) diff --git a/test/ArraysTests/TablesTests.jl b/test/ArraysTests/TablesTests.jl index a292ccf35..546dae528 100644 --- a/test/ArraysTests/TablesTests.jl +++ b/test/ArraysTests/TablesTests.jl @@ -171,6 +171,4 @@ f = joinpath(d,"a.jld2") to_jld2_file(a,f) @test a == from_jld2_file(typeof(a),f) -rm(d,recursive=true) - end # module diff --git a/test/GeometryTests/DiscreteModelsTests.jl b/test/GeometryTests/DiscreteModelsTests.jl index 66b6ae993..85c43da4a 100644 --- a/test/GeometryTests/DiscreteModelsTests.jl +++ b/test/GeometryTests/DiscreteModelsTests.jl @@ -107,6 +107,4 @@ to_json_file(model2,filename) model3 = DiscreteModelFromFile(filename) test_discrete_model(model3) -rm(d,recursive=true) - end # module diff --git a/test/GeometryTests/FaceLabelingsTests.jl b/test/GeometryTests/FaceLabelingsTests.jl index d2631d8fd..25c9f2360 100644 --- a/test/GeometryTests/FaceLabelingsTests.jl +++ b/test/GeometryTests/FaceLabelingsTests.jl @@ -64,8 +64,6 @@ labels2 = from_jld2_file(typeof(labels),f) @test labels2.tag_to_entities == labels.tag_to_entities @test labels2.tag_to_name == labels.tag_to_name -rm(d,recursive=true) - @test get_tags_from_names(labels,["interior","all"]) == [1,3] face_to_mask = get_face_mask(labels,"interior",1) diff --git a/test/GeometryTests/UnstructuredDiscreteModelsTests.jl b/test/GeometryTests/UnstructuredDiscreteModelsTests.jl index 413bc79c1..f1a9287a9 100644 --- a/test/GeometryTests/UnstructuredDiscreteModelsTests.jl +++ b/test/GeometryTests/UnstructuredDiscreteModelsTests.jl @@ -53,6 +53,4 @@ model2 == from_jld2_file(typeof(model),f) @test model.grid_topology.polytopes == model2.grid_topology.polytopes @test model.grid_topology.vertex_coordinates == model2.grid_topology.vertex_coordinates -rm(d,recursive=true) - end # module diff --git a/test/GeometryTests/UnstructuredGridsTests.jl b/test/GeometryTests/UnstructuredGridsTests.jl index 8a30b5c3a..e774700bc 100644 --- a/test/GeometryTests/UnstructuredGridsTests.jl +++ b/test/GeometryTests/UnstructuredGridsTests.jl @@ -122,6 +122,4 @@ grid2 = from_jld2_file(typeof(grid),f) @test grid.node_coordinates == grid2.node_coordinates @test grid.reffes == grid2.reffes -rm(d,recursive=true) - end # module diff --git a/test/GridapTests/issue_778.jl b/test/GridapTests/issue_778.jl index 3318e0155..e6de82d48 100644 --- a/test/GridapTests/issue_778.jl +++ b/test/GridapTests/issue_778.jl @@ -16,6 +16,4 @@ model = CartesianDiscreteModel(domain,cells,isperiodic=(true,false)) model = simplexify(model) writevtk(model,joinpath(d,"tmp_model_2")) -rm(d,recursive=true) - end # module diff --git a/test/IoTests/BsonTests.jl b/test/IoTests/BsonTests.jl index 68bf0a870..575c98446 100644 --- a/test/IoTests/BsonTests.jl +++ b/test/IoTests/BsonTests.jl @@ -34,8 +34,6 @@ f = joinpath(d,"foo.bson") to_bson_file(foo,f) @test foo == from_bson_file(Foo,f) -rm(d,recursive=true) - end # module diff --git a/test/IoTests/JLD2Tests.jl b/test/IoTests/JLD2Tests.jl index 6386b4519..474b1930d 100644 --- a/test/IoTests/JLD2Tests.jl +++ b/test/IoTests/JLD2Tests.jl @@ -24,6 +24,4 @@ foo = Dict("a"=>Int32(1),2=>Int(3),4.0=>Float32(5),"six"=>Float64(7),:s=>"Symbol to_jld2_file(foo,f) @test foo == from_jld2_file(typeof(foo),f) -rm(d,recursive=true) - end # module diff --git a/test/IoTests/JsonTests.jl b/test/IoTests/JsonTests.jl index 73d072f6c..52b960424 100644 --- a/test/IoTests/JsonTests.jl +++ b/test/IoTests/JsonTests.jl @@ -33,8 +33,6 @@ d = mktempdir() f = joinpath(d,"foo.json") to_json_file(foo,f) @test foo == from_json_file(Foo,f) -rm(d,recursive=true) - @test foo == from_json(Foo,JSON.json(foo)) end # module diff --git a/test/ReferenceFEsTests/CLagrangianRefFEsTests.jl b/test/ReferenceFEsTests/CLagrangianRefFEsTests.jl index 1723b85ab..f2c6204f0 100644 --- a/test/ReferenceFEsTests/CLagrangianRefFEsTests.jl +++ b/test/ReferenceFEsTests/CLagrangianRefFEsTests.jl @@ -220,6 +220,4 @@ f = joinpath(d,"reffe.jld2") to_jld2_file(reffe,f) @test reffe == from_jld2_file(typeof(reffe),f) -rm(d,recursive=true) - end # module diff --git a/test/ReferenceFEsTests/NedelecRefFEsTests.jl b/test/ReferenceFEsTests/NedelecRefFEsTests.jl index 8a312bde6..1e0db175c 100644 --- a/test/ReferenceFEsTests/NedelecRefFEsTests.jl +++ b/test/ReferenceFEsTests/NedelecRefFEsTests.jl @@ -177,6 +177,5 @@ gndat = ["g$i"=>gux[:,i] for i in 1:num_dofs(reffe)] d = mktempdir() f = joinpath(d, "nede_tet_1") writevtk(grid,f,nodaldata=vcat(ndat,gndat)) -rm(d,recursive=true) end # module diff --git a/test/VisualizationTests/VtkTests.jl b/test/VisualizationTests/VtkTests.jl index af9ad91c4..f3e837f20 100644 --- a/test/VisualizationTests/VtkTests.jl +++ b/test/VisualizationTests/VtkTests.jl @@ -202,7 +202,4 @@ vtk_save(pvtk) pvtk = create_pvtk_file(Ω2,f; part=2, nparts=2,celldata=["u"=>rand(num_cells(Ω2))]) vtk_save(pvtk) - -rm(d,recursive=true) - end # module From c082fafa95ab0bbcf6d5c1f0034af98443d04494 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 7 Nov 2024 10:41:49 +1100 Subject: [PATCH 74/85] Fixed issues with BlockArrays 1.0 --- src/Algebra/Algebra.jl | 1 + src/Algebra/AlgebraInterfaces.jl | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Algebra/Algebra.jl b/src/Algebra/Algebra.jl index 72f2782ac..46a520f35 100644 --- a/src/Algebra/Algebra.jl +++ b/src/Algebra/Algebra.jl @@ -18,6 +18,7 @@ using Gridap.Helpers import Base: convert, size, getindex, show, count, * import LinearAlgebra: mul! import SparseArrays: nnz, nonzeros, nzrange, findnz, rowvals +import BlockArrays: AbstractBlockedUnitRange export length_to_ptrs! export rewind_ptrs! diff --git a/src/Algebra/AlgebraInterfaces.jl b/src/Algebra/AlgebraInterfaces.jl index 7548cd9cb..44298b2f8 100644 --- a/src/Algebra/AlgebraInterfaces.jl +++ b/src/Algebra/AlgebraInterfaces.jl @@ -40,7 +40,7 @@ function allocate_vector(::Type{V},n::Integer) where V V(undef,n) end -function allocate_vector(::Type{<:BlockVector{T,VV}},indices::BlockedUnitRange) where {T,VV} +function allocate_vector(::Type{<:BlockVector{T,VV}},indices::AbstractBlockedUnitRange) where {T,VV} V = eltype(VV) mortar(map(ids -> allocate_vector(V,ids),blocks(indices))) end From 1aa723ae93d996544157009bf30b9b0939f1bdc5 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 7 Nov 2024 14:54:35 +1100 Subject: [PATCH 75/85] Minor --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 1736c1b66..612fad832 100644 --- a/Project.toml +++ b/Project.toml @@ -35,7 +35,7 @@ WriteVTK = "64499a7a-5c06-52f2-abe2-ccb03c286192" AbstractTrees = "0.3.3, 0.4" Aqua = "0.8" BSON = "0.3.4" -BlockArrays = "0.16.10, 1" +BlockArrays = "1" Combinatorics = "1.0.0" DataStructures = "0.18.13" DocStringExtensions = "0.8.1, 0.9" From 1b5d4fafed31439ac84643e852cf604ed8e8a61d Mon Sep 17 00:00:00 2001 From: Antoine Marteau Date: Thu, 7 Nov 2024 15:00:13 +1100 Subject: [PATCH 76/85] make test retro compatible with 1.6 --- test/TensorValuesTests/TypesTests.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/TensorValuesTests/TypesTests.jl b/test/TensorValuesTests/TypesTests.jl index 7306be651..bb1dfd073 100644 --- a/test/TensorValuesTests/TypesTests.jl +++ b/test/TensorValuesTests/TypesTests.jl @@ -181,11 +181,11 @@ q = SymTracelessTensorValue{0,Int}() q = SymTracelessTensorValue{1,Int}( () ) @test isa(q,SymTracelessTensorValue{1,Int}) -@test convert(SMatrix{1,1,Int},q) == [0;;] +@test convert(SMatrix{1,1,Int},q) == zeros(Int,1,1) q = SymTracelessTensorValue{1,Int}() @test isa(q,SymTracelessTensorValue{1,Int}) -@test convert(SMatrix{1,1,Int},q) == [0;;] +@test convert(SMatrix{1,1,Int},q) == zeros(Int,1,1) q = SymTracelessTensorValue(11,21.0) @test isa(q,SymTracelessTensorValue{2,Float64}) From e0be5c8e78b2eb05422b7c5057c0f59ab51cf1f9 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 7 Nov 2024 15:01:22 +1100 Subject: [PATCH 77/85] Minor --- Project.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Project.toml b/Project.toml index 612fad832..e9c139cbc 100644 --- a/Project.toml +++ b/Project.toml @@ -36,12 +36,12 @@ AbstractTrees = "0.3.3, 0.4" Aqua = "0.8" BSON = "0.3.4" BlockArrays = "1" -Combinatorics = "1.0.0" +Combinatorics = "1" DataStructures = "0.18.13" DocStringExtensions = "0.8.1, 0.9" FastGaussQuadrature = "0.4.5, 1" FileIO = "1.2.2, 1.3, 1.4" -FillArrays = "0.13.10, 1" +FillArrays = "1.11" ForwardDiff = "0.10.14" JLD2 = "0.4.36, 0.5" JSON = "0.21" From ba21e382ef7575e5e7194dae35e917c6cbf5ed0d Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 7 Nov 2024 15:20:52 +1100 Subject: [PATCH 78/85] Minor --- Project.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Project.toml b/Project.toml index e9c139cbc..03b8d8340 100644 --- a/Project.toml +++ b/Project.toml @@ -40,10 +40,10 @@ Combinatorics = "1" DataStructures = "0.18.13" DocStringExtensions = "0.8.1, 0.9" FastGaussQuadrature = "0.4.5, 1" -FileIO = "1.2.2, 1.3, 1.4" +FileIO = "1.5" FillArrays = "1.11" ForwardDiff = "0.10.14" -JLD2 = "0.4.36, 0.5" +JLD2 = "0.5" JSON = "0.21" LineSearches = "7.0.1" LinearAlgebra = "1" From 0a81b59ab85f4e8b90e6f0f712e144cbf2d11c81 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 7 Nov 2024 15:21:40 +1100 Subject: [PATCH 79/85] Reverted to supporting Julia 1.6 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 03b8d8340..95ed94f03 100644 --- a/Project.toml +++ b/Project.toml @@ -59,7 +59,7 @@ StaticArrays = "1.4" Statistics = "1" Test = "1" WriteVTK = "1.21.1" -julia = "1.8" +julia = "1.6" [extras] Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" From f0a1c91e9fe33ff08fdb1f0baf82153db18d3922 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 8 Nov 2024 00:54:26 +1100 Subject: [PATCH 80/85] Deactivated x86 windows tests. Somehow they hang forever --- .github/workflows/Downgrade.yml | 2 +- .github/workflows/ci.yml | 28 +++++++++------------------- 2 files changed, 10 insertions(+), 20 deletions(-) diff --git a/.github/workflows/Downgrade.yml b/.github/workflows/Downgrade.yml index 43460c5b1..75eeeba91 100644 --- a/.github/workflows/Downgrade.yml +++ b/.github/workflows/Downgrade.yml @@ -8,7 +8,7 @@ concurrency: cancel-in-progress: true jobs: - downgrade_test: + downgrade: name: Downgrade ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} runs-on: ${{ matrix.os }} strategy: diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5d6d65676..9d46d8f7f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -8,37 +8,27 @@ concurrency: cancel-in-progress: true jobs: - test: + test: # Main CI tests name: Tests ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} runs-on: ${{ matrix.os }} strategy: fail-fast: false - matrix: + matrix: # Main tests for linux version: + - '1.8' + - '1.9' - '1.10' os: - ubuntu-latest - - windows-latest - - macos-latest arch: - x64 - x86 - - aarch64 - exclude: # Exclude some combinations - - os: ubuntu-latest - arch: aarch64 - - os: windows-latest + include: # Test macos/windows on latest LTS + - version: '1.10' + os: macos-latest arch: aarch64 - - os: macos-latest - arch: x64 - - os: macos-latest - arch: x86 - include: # Legacy support for old Julia versions - - version: '1.8' - os: ubuntu-latest - arch: x64 - - version: '1.9' - os: ubuntu-latest + - version: '1.10' + os: windows-latest arch: x64 steps: - uses: actions/checkout@v4 From 9ec14f1a9981a7c3efe4a725a31e21b4b03fb54c Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 8 Nov 2024 00:58:34 +1100 Subject: [PATCH 81/85] Minor --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 95ed94f03..5dc64d83d 100644 --- a/Project.toml +++ b/Project.toml @@ -51,7 +51,7 @@ NLsolve = "4.5.1" NearestNeighbors = "0.4.8" PolynomialBases = "0.4.13" Preferences = "1.4" -QuadGK = "2.4, 2.5, 2.6, 2.7, 2.8, 2.9, 2.10, 2.11" +QuadGK = "2.4" Random = "1" SparseArrays = "1" SparseMatricesCSR = "0.6.4" From 899bbdc2f7c1caaa8fd5a461b22e11bdacd64007 Mon Sep 17 00:00:00 2001 From: Jordi Manyer Fuertes Date: Fri, 8 Nov 2024 09:33:55 +1100 Subject: [PATCH 82/85] Bump version --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index e7c681a17..994891294 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Gridap" uuid = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" authors = ["Santiago Badia ", "Francesc Verdugo ", "Alberto F. Martin "] -version = "0.18.6" +version = "0.18.7" [deps] AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" From b31f36d9e1d207a71806fd3e7d9fa9fac21a8284 Mon Sep 17 00:00:00 2001 From: Jordi Manyer Fuertes Date: Fri, 8 Nov 2024 09:34:28 +1100 Subject: [PATCH 83/85] Update NEWS.md --- NEWS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/NEWS.md b/NEWS.md index 72dd89b72..5217ce785 100644 --- a/NEWS.md +++ b/NEWS.md @@ -5,7 +5,7 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [Unreleased] +## [0.18.7] - 2024-10-8 ### Added From 2477b6cf952112cfd45bfe6ec9d4579a8979657d Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 8 Nov 2024 12:31:27 +1100 Subject: [PATCH 84/85] Reduced memory in tests --- test/AdaptivityTests/MacroFEStokesTests.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/AdaptivityTests/MacroFEStokesTests.jl b/test/AdaptivityTests/MacroFEStokesTests.jl index 2dfe2c53b..607ddc521 100644 --- a/test/AdaptivityTests/MacroFEStokesTests.jl +++ b/test/AdaptivityTests/MacroFEStokesTests.jl @@ -14,7 +14,7 @@ function main(Dc,reftype) p_sol(x) = x[1] - 1.0/2.0 domain = (Dc == 2) ? (0,1,0,1) : (0,1,0,1,0,1) - nc = (Dc == 2) ? (2,2) : (1,1,1) + nc = (Dc == 2) ? (1,1) : (1,1,1) model = simplexify(CartesianDiscreteModel(domain,nc)) poly = (Dc == 2) ? TRI : TET @@ -65,7 +65,7 @@ end main(2,:barycentric) #main(2,:powellsabin) -main(3,:barycentric) +#main(3,:barycentric) #main(3,:powellsabin) end # module \ No newline at end of file From 06de5f7988226aec4e4c1e8592f04ece22e0e453 Mon Sep 17 00:00:00 2001 From: Jordi Manyer Fuertes Date: Fri, 8 Nov 2024 15:00:44 +1100 Subject: [PATCH 85/85] Deactivated x86 tests on older Julia versions --- .github/workflows/ci.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9d46d8f7f..d609848c8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -22,14 +22,16 @@ jobs: - ubuntu-latest arch: - x64 - - x86 - include: # Test macos/windows on latest LTS + include: # Test macos/windows on latest LTS + x86 on ubuntu - version: '1.10' os: macos-latest arch: aarch64 - version: '1.10' os: windows-latest arch: x64 + - version: '1.10' + os: ubuntu-latest + arch: x86 steps: - uses: actions/checkout@v4 - uses: julia-actions/setup-julia@v2