Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Request For Comment: Arithemtic between Operators and LazyOperators #86

Merged
merged 5 commits into from
Apr 24, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 11 additions & 1 deletion src/operators_lazyproduct.jl
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ complex factor is stored in the `factor` field which allows for fast
multiplication with numbers.
"""

mutable struct LazyProduct{BL,BR,F,T,KTL,BTR} <: AbstractOperator{BL,BR}
mutable struct LazyProduct{BL,BR,F,T,KTL,BTR} <: LazyOperator{BL,BR}
basis_l::BL
basis_r::BR
factor::F
Expand Down Expand Up @@ -74,6 +74,16 @@ permutesystems(op::LazyProduct, perm::Vector{Int}) = LazyProduct(([permutesystem
identityoperator(::Type{LazyProduct}, ::Type{S}, b1::Basis, b2::Basis) where S<:Number = LazyProduct(identityoperator(S, b1, b2))



function tensor(a::Operator{B1,B2},b::LazyProduct{B3, B4, F, T, KTL, BTR}) where {B1,B2,B3,B4, F, T, KTL, BTR}
ops = ([(i == 1 ? a : identityoperator(a.basis_r) ) ⊗ op for (i,op) in enumerate(b.operators)]...,)
LazyProduct(ops,b.factor)
end
function tensor(a::LazyProduct{B1, B2, F, T, KTL, BTR},b::Operator{B3,B4}) where {B1,B2,B3,B4, F, T, KTL, BTR}
ops = ([op ⊗ (i == length(a.operators) ? b : identityoperator(a.basis_l) ) for (i,op) in enumerate(a.operators)]...,)
LazyProduct(ops,a.factor)
end

function mul!(result::Ket{B1},a::LazyProduct{B1,B2},b::Ket{B2},alpha,beta) where {B1,B2}
if length(a.operators)==1
mul!(result,a.operators[1],b,a.factor*alpha,beta)
Expand Down
32 changes: 27 additions & 5 deletions src/operators_lazysum.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,11 @@ function _check_bases(basis_l, basis_r, operators)
end
end

"""
Abstract class for all Lazy type operators ([`LazySum`](@ref), [`LazyProduct`](@ref), and [`LazyTensor`](@ref))
"""
abstract type LazyOperator{BL,BR} <: AbstractOperator{BL,BR} end

"""
LazySum([Tf,] [factors,] operators)
LazySum([Tf,] basis_l, basis_r, [factors,] [operators])
Expand All @@ -28,7 +33,7 @@ of operator-state operations, such as simulating time evolution. A `Vector` can
reduce compile-time overhead when doing arithmetic on `LazySum`s, such as
summing many `LazySum`s together.
"""
mutable struct LazySum{BL,BR,F,T} <: AbstractOperator{BL,BR}
mutable struct LazySum{BL,BR,F,T} <: LazyOperator{BL,BR}
basis_l::BL
basis_r::BR
factors::F
Expand Down Expand Up @@ -57,6 +62,7 @@ end
LazySum(::Type{Tf}, operators::AbstractOperator...) where Tf = LazySum(ones(Tf, length(operators)), (operators...,))
LazySum(operators::AbstractOperator...) = LazySum(mapreduce(eltype, promote_type, operators), operators...)
LazySum() = throw(ArgumentError("LazySum needs a basis, or at least one operator!"))
LazySum(x::LazySum) = x

Base.copy(x::LazySum) = @samebases LazySum(x.basis_l, x.basis_r, copy(x.factors), copy.(x.operators))
Base.eltype(x::LazySum) = promote_type(eltype(x.factors), mapreduce(eltype, promote_type, x.operators))
Expand All @@ -81,8 +87,9 @@ function +(a::LazySum{B1,B2}, b::LazySum{B1,B2}) where {B1,B2}
@samebases LazySum(a.basis_l, a.basis_r, factors, ops)
end
+(a::LazySum{B1,B2}, b::LazySum{B3,B4}) where {B1,B2,B3,B4} = throw(IncompatibleBases())
+(a::LazySum, b::AbstractOperator) = a + LazySum(b)
+(a::AbstractOperator, b::LazySum) = LazySum(a) + b
+(a::LazyOperator, b::AbstractOperator) = LazySum(a) + LazySum(b)
+(a::AbstractOperator, b::LazyOperator) = LazySum(a) + LazySum(b)
+(a::O1, b::O2) where {O1<:LazyOperator,O2<:LazyOperator} = LazySum(a) + LazySum(b)

-(a::LazySum) = @samebases LazySum(a.basis_l, a.basis_r, -a.factors, a.operators)
function -(a::LazySum{B1,B2}, b::LazySum{B1,B2}) where {B1,B2}
Expand All @@ -92,8 +99,10 @@ function -(a::LazySum{B1,B2}, b::LazySum{B1,B2}) where {B1,B2}
@samebases LazySum(a.basis_l, a.basis_r, factors, ops)
end
-(a::LazySum{B1,B2}, b::LazySum{B3,B4}) where {B1,B2,B3,B4} = throw(IncompatibleBases())
-(a::LazySum, b::AbstractOperator) = a - LazySum(b)
-(a::AbstractOperator, b::LazySum) = LazySum(a) - b
-(a::LazyOperator, b::AbstractOperator) = LazySum(a) - LazySum(b)
-(a::AbstractOperator, b::LazyOperator) = LazySum(a) - LazySum(b)
-(a::O1, b::O2) where {O1<:LazyOperator,O2<:LazyOperator} = LazySum(a) - LazySum(b)


function *(a::LazySum, b::Number)
factors = b*a.factors
Expand All @@ -106,6 +115,19 @@ function /(a::LazySum, b::Number)
@samebases LazySum(a.basis_l, a.basis_r, factors, a.operators)
end

function tensor(a::Operator,b::LazySum)
btotal_l = a.basis_l ⊗ b.basis_l
btotal_r = a.basis_r ⊗ b.basis_r
ops = ([a ⊗ op for op in b.operators]...,)
LazySum(btotal_l,btotal_r,b.factors,ops)
end
function tensor(a::LazySum,b::Operator)
btotal_l = a.basis_l ⊗ b.basis_l
btotal_r = a.basis_r ⊗ b.basis_r
ops = ([op ⊗ b for op in a.operators]...,)
LazySum(btotal_l,btotal_r,a.factors,ops)
end

function dagger(op::LazySum)
ops = dagger.(op.operators)
@samebases LazySum(op.basis_r, op.basis_l, conj.(op.factors), ops)
Expand Down
32 changes: 24 additions & 8 deletions src/operators_lazytensor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ must be sorted.
Additionally, a factor is stored in the `factor` field which allows for fast
multiplication with numbers.
"""
mutable struct LazyTensor{BL,BR,F,I,T} <: AbstractOperator{BL,BR}
mutable struct LazyTensor{BL,BR,F,I,T} <: LazyOperator{BL,BR}
basis_l::BL
basis_r::BR
factor::F
Expand Down Expand Up @@ -96,21 +96,37 @@ isequal(x::LazyTensor, y::LazyTensor) = samebases(x,y) && isequal(x.indices, y.i
# Arithmetic operations
-(a::LazyTensor) = LazyTensor(a, -a.factor)

function +(a::LazyTensor{B1,B2}, b::LazyTensor{B1,B2}) where {B1,B2}
if length(a.indices) == 1 && a.indices == b.indices
const single_dataoperator{B1,B2} = LazyTensor{B1,B2,F,I,Tuple{T}} where {B1,B2,F,I,T<:DataOperator}
function +(a::T1,b::T2) where {T1 <: single_dataoperator{B1,B2},T2 <: single_dataoperator{B1,B2}} where {B1,B2}
if a.indices == b.indices
op = a.operators[1] * a.factor + b.operators[1] * b.factor
return LazyTensor(a.basis_l, a.basis_r, a.indices, (op,))
Copy link
Collaborator

@amilsted amilsted Apr 12, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the only place I am not sure about defaulting to laziness. It's quite a special case, but I encounter it quite a bit. I suppose the reason to do lazy summing here is mainly to be consistent with the laziness-preserving principle. I have some code that makes use of the existing behavior, but of course I can still do this kind of concrete summing manually if I want to, so I'm not arguing hard to keep it. What are your thoughts?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

My experience was that the previous implementation was very limiting. Especially since the custom operators I have been playing around with were not DataOperators but AbstractOperators, where the operation was defined via a function rather than a matrix. Therefore, these cannot be trivially added (except by using LazySum), and the above implementation fails. Also, length(a.indices) ==1 is required, and I could imagine situations where one would like to be able to add LazyTensors containing more than one operator.

However, one could perhaps keep the original behavior by dispatching on LazyTensors containing only one DataOperator. That is adding a function like this (draft, I'm not entirely sure it works):

const single_dataoperator{B1,B2} = LazyTensor{B1,B2,ComplexF64,Vector{Int64},Tuple{T}} where {B1,B2,T<:DataOperator} 
function +(a::T1,b::T2) where {T1 <: single_dataoperator{B1,B2},T2 <: single_dataoperator{B1,B2}}
    if length(a.indices) == 1 && a.indices == b.indices
        op = a.operators[1] * a.factor + b.operators[1] * b.factor
        return LazyTensor(a.basis_l, a.basis_r, a.indices, (op,))
    end
    throw(ArgumentError("Addition of LazyTensor operators is only defined in case both operators act nontrivially on the same, single tensor factor."))
end

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@mabuni1998 I think it's worth trying to keep the original intact as you suggest. If we can handle it via dispatch, we won't lose anything. Or am I missing some case here?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Collaborator

@amilsted amilsted Apr 14, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for finding a way to keep the original behavior. This is not type-stable, but I can't think of an obvious way to make it otherwise, except by letting LazyTensor indices be type parameters. Maybe it doesn't matter too much, as this will not typically be performance-critical?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Probably won't be performance-critical no, as you are most likely creating the operators once at the beginning of the simulation and then not changing them as you do multiplications etc.

end
throw(ArgumentError("Addition of LazyTensor operators is only defined in case both operators act nontrivially on the same, single tensor factor."))
LazySum(a) + LazySum(b)
end

function -(a::LazyTensor{B1,B2}, b::LazyTensor{B1,B2}) where {B1,B2}
if length(a.indices) == 1 && a.indices == b.indices
function -(a::T1,b::T2) where {T1 <: single_dataoperator{B1,B2},T2 <: single_dataoperator{B1,B2}} where {B1,B2}
if a.indices == b.indices
op = a.operators[1] * a.factor - b.operators[1] * b.factor
return LazyTensor(a.basis_l, a.basis_r, a.indices, (op,))
end
throw(ArgumentError("Subtraction of LazyTensor operators is only defined in case both operators act nontrivially on the same, single tensor factor."))
LazySum(a) - LazySum(b)
end

function tensor(a::LazyTensor{B1,B2},b::AbstractOperator{B3,B4}) where {B1,B2,B3,B4}
if B3 <: CompositeBasis || B4 <: CompositeBasis
throw(ArgumentError("tensor(a::LazyTensor{B1,B2},b::AbstractOperator{B3,B4}) is not implemented for B3 or B4 being CompositeBasis unless b is identityoperator "))
else
a ⊗ LazyTensor(b.basis_l,b.basis_r,[1],(b,),1)
end
end
function tensor(a::AbstractOperator{B1,B2},b::LazyTensor{B3,B4}) where {B1,B2,B3,B4}
if B1 <: CompositeBasis || B2 <: CompositeBasis
throw(ArgumentError("tensor(a::AbstractOperator{B1,B2},b::LazyTensor{B3,B4}) is not implemented for B1 or B2 being CompositeBasis unless b is identityoperator "))
else
LazyTensor(a.basis_l,a.basis_r,[1],(a,),1) ⊗ b
end
end


function *(a::LazyTensor{B1,B2}, b::LazyTensor{B2,B3}) where {B1,B2,B3}
indices = sort(union(a.indices, b.indices))
Expand Down
4 changes: 1 addition & 3 deletions test/test_abstractdata.jl
Original file line number Diff line number Diff line change
Expand Up @@ -413,8 +413,6 @@ xbra1 = Bra(b_l, rand(ComplexF64, length(b_l)))
xbra2 = Bra(b_l, rand(ComplexF64, length(b_l)))

# Addition
@test_throws ArgumentError op1 + op2
@test_throws ArgumentError op1 - op2
@test D(-op1_, -op1, 1e-12)

# Test multiplication
Expand Down Expand Up @@ -448,7 +446,7 @@ xbra1 = Bra(b_l, rand(ComplexF64, length(b_l)))
xbra2 = Bra(b_l, rand(ComplexF64, length(b_l)))

# Addition
@test_throws ArgumentError op1 + op2
@test D(2.1*op1 + 0.3*op2, 2.1*op1_+0.3*op2_)
@test D(-op1_, -op1)

# Test multiplication
Expand Down
32 changes: 30 additions & 2 deletions test/test_operators_lazyproduct.jl
Original file line number Diff line number Diff line change
Expand Up @@ -50,21 +50,41 @@ op1b = randoperator(b_r, b_l)
op2a = randoperator(b_l, b_r)
op2b = randoperator(b_r, b_l)
op3a = randoperator(b_l, b_l)
op3b = randoperator(b_r, b_r)

op1 = LazyProduct([op1a, sparse(op1b)])*0.1
op1_ = 0.1*(op1a*op1b)
op2 = LazyProduct([sparse(op2a), op2b], 0.3)
op2_ = 0.3*(op2a*op2b)
op3 = LazyProduct(op3a)
op3_ = op3a
op4 = LazyProduct(op3b)
op4_ = op3b

x1 = Ket(b_l, rand(ComplexF64, length(b_l)))
x2 = Ket(b_l, rand(ComplexF64, length(b_l)))
xbra1 = Bra(b_l, rand(ComplexF64, length(b_l)))
xbra2 = Bra(b_l, rand(ComplexF64, length(b_l)))

# Addition
@test_throws ArgumentError op1 + op2
# Test Addition
@test_throws QuantumOpticsBase.IncompatibleBases op1 + dagger(op4)
@test 1e-14 > D(-op1_, -op1)
@test 1e-14 > D(op1+op2, op1_+op2_)
@test 1e-14 > D(op1+op2_, op1_+op2_)
@test 1e-14 > D(op1_+op2, op1_+op2_)
#Check for unallowed addition:
@test_throws QuantumOpticsBase.IncompatibleBases LazyProduct([op1a, sparse(op1a)])+LazyProduct([sparse(op2b), op2b], 0.3)

# Test Subtraction
@test_throws QuantumOpticsBase.IncompatibleBases op1 - dagger(op4)
@test 1e-14 > D(op1 - op2, op1_ - op2_)
@test 1e-14 > D(op1 - op2_, op1_ - op2_)
@test 1e-14 > D(op1_ - op2, op1_ - op2_)
@test 1e-14 > D(op1 + (-op2), op1_ - op2_)
@test 1e-14 > D(op1 + (-1*op2), op1_ - op2_)
#Check for unallowed subtraction:
@test_throws QuantumOpticsBase.IncompatibleBases LazyProduct([op1a, sparse(op1a)])-LazyProduct([sparse(op2b), op2b], 0.3)


# Test multiplication
@test_throws DimensionMismatch op1a*op1a
Expand All @@ -78,6 +98,14 @@ xbra2 = Bra(b_l, rand(ComplexF64, length(b_l)))
# Test division
@test 1e-14 > D(op1/7, op1_/7)

#Test Tensor product
op = op1a ⊗ op1
op_ = op1a ⊗ op1_
@test 1e-11 > D(op,op_)
op = op1 ⊗ op2a
op_ = op1_ ⊗ op2a
@test 1e-11 > D(op,op_)

# Test identityoperator
Idense = identityoperator(DenseOpType, b_l)
id = identityoperator(LazyProduct, b_l)
Expand Down
23 changes: 23 additions & 0 deletions test/test_operators_lazysum.jl
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,29 @@ xbra1 = Bra(b_l, rand(ComplexF64, length(b_l)))
# Test division
@test 1e-14 > D(op1/7, op1_/7)

#Test Tensor
op1_tensor = op1a ⊗ op1
op2_tensor = op1 ⊗ op1a
op1_tensor_ = op1a ⊗ op1_
op2_tensor_ = op1_ ⊗ op1a
@test 1e-14 > D(op1_tensor,op1_tensor_)
@test 1e-14 > D(op1_tensor_,op1_tensor)

#Test addition of with and of LazyTensor and LazyProduct
subop1 = randoperator(b1a, b1b)
subop2 = randoperator(b2a, b2b)
subop3 = randoperator(b3a, b3b)
I1 = dense(identityoperator(b1a, b1b))
I2 = dense(identityoperator(b2a, b2b))
I3 = dense(identityoperator(b3a, b3b))
op1_LT = LazyTensor(b_l, b_r, [1, 3], (subop1, sparse(subop3)), 0.1)
op1_LT_ = 0.1*subop1 ⊗ I2 ⊗ subop3
op1_LP = LazyProduct([op1a])*0.1
op1_LP_ = op1a*0.1
@test 1e-14 > D(op1_LT+op1_LP,op1_LT_+op1_LP_)
@test 1e-14 > D(op1_LT+op1,op1_LT_+op1_)
@test 1e-14 > D(op1_LP+op1,op1_LP_+op1_)

# Test tuples vs. vectors
@test (op1+op1).operators isa Tuple
@test (op1+op2).operators isa Tuple
Expand Down
41 changes: 34 additions & 7 deletions test/test_operators_lazytensor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ mutable struct test_lazytensor{BL<:Basis,BR<:Basis} <: AbstractOperator{BL,BR}
end
Base.eltype(::test_lazytensor) = ComplexF64

@testset "operators-lazytensor" begin

Random.seed!(0)

Expand Down Expand Up @@ -102,21 +101,51 @@ op2 = LazyTensor(b_l, b_r, [2, 3], (sparse(subop2), subop3), 0.7)
op2_ = 0.7*I1 ⊗ subop2 ⊗ subop3
op3 = 0.3*LazyTensor(b_l, b_r, 3, subop3)
op3_ = 0.3*I1 ⊗ I2 ⊗ subop3
op4 = 0.4*LazyTensor(b_l, b_r, 2, subop2)
op4_ = 0.4*I1 ⊗ subop2 ⊗ I3

x1 = Ket(b_r, rand(ComplexF64, length(b_r)))
x2 = Ket(b_r, rand(ComplexF64, length(b_r)))
xbra1 = Bra(b_l, rand(ComplexF64, length(b_l)))
xbra2 = Bra(b_l, rand(ComplexF64, length(b_l)))

# Allowed addition
# Addition one operator at same index
fac = randn()
@test dense(op3 + fac * op3) ≈ dense(op3) * (1 + fac)
@test dense(op3 - fac * op3) ≈ dense(op3) * (1 - fac)
# Addition one operator at different index
@test dense(op3 + fac * op4) ≈ dense(op3_ + fac*op4_)
@test dense(op3 - fac * op4) ≈ dense(op3_ - fac*op4_)

# Forbidden addition
@test_throws ArgumentError op1 + op2
@test_throws ArgumentError op1 - op2
#Test addition
@test_throws QuantumOpticsBase.IncompatibleBases op1 + dagger(op2)
@test 1e-14 > D(-op1_, -op1)
@test 1e-14 > D(op1+op2, op1_+op2_)
@test 1e-14 > D(op1+op2_, op1_+op2_)
@test 1e-14 > D(op1_+op2, op1_+op2_)

#Test substraction
@test_throws QuantumOpticsBase.IncompatibleBases op1 - dagger(op2)
@test 1e-14 > D(op1 - op2, op1_ - op2_)
@test 1e-14 > D(op1 - op2_, op1_ - op2_)
@test 1e-14 > D(op1_ - op2, op1_ - op2_)
@test 1e-14 > D(op1 + (-op2), op1_ - op2_)
@test 1e-14 > D(op1 + (-1*op2), op1_ - op2_)


#Test tensor
op1_tensor = subop1 ⊗ op1
op1_tensor_ = subop1 ⊗ op1_
op2_tensor = op1 ⊗ subop1
op2_tensor_ = op1_ ⊗ subop1
@test 1e-14 > D(op1_tensor,op1_tensor_)
@test 1e-14 > D(op1_tensor_,op1_tensor)

#Cannot tensor CompositeBasis with LazyTensor:
@test_throws ArgumentError op1 ⊗ op1_
@test_throws ArgumentError op2_ ⊗ op2



# Test multiplication
@test_throws DimensionMismatch op1*op2
Expand Down Expand Up @@ -412,5 +441,3 @@ Lop1 = LazyTensor(b1^2, b2^2, 2, sparse(randoperator(b1, b2)))
@test_throws DimensionMismatch sparse(Lop1)*Lop1
@test_throws DimensionMismatch Lop1*dense(Lop1)
@test_throws DimensionMismatch Lop1*sparse(Lop1)

end # testset