From e25e081f18ec3d55ce4b255173595f830e87fbe9 Mon Sep 17 00:00:00 2001 From: Miles Date: Thu, 9 May 2024 23:00:36 -0400 Subject: [PATCH 01/14] Simplify vertex crossing check and other improvements --- .../opsum_to_ttn/opsum_to_ttn.jl | 54 ++++++++++--------- 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl index 09098b68..c5b1a107 100644 --- a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl @@ -1,11 +1,13 @@ #using FillArrays: OneElement #using DataGraphs: DataGraph -using Graphs: degree, is_tree +using Graphs: degree, is_tree, steiner_tree using ITensorMPS: ITensorMPS, cutoff, linkdims, ops, truncate!, val using ITensors: flux, has_fermion_string, itensor, removeqns, space using ITensors.LazyApply: Prod, Sum, coefficient using ITensors.NDTensors: Block, blockdim, maxdim, nblocks, nnzblocks -using ITensors.Ops: argument, coefficient, Op, OpSum, name, params, site, terms, which_op +using ITensors.Ops: + argument, coefficient, Op, OpSum, name, params, site, sites, terms, which_op +using IterTools: partition using NamedGraphs.GraphsExtensions: GraphsExtensions, boundary_edges, degrees, is_leaf_vertex, vertex_path using StaticArrays: MVector @@ -90,16 +92,15 @@ end function make_symbolic_ttn( coefficient_type, opsum::OpSum, - sites::IndsNetwork; + sites_::IndsNetwork; ordered_verts, ordered_edges, root_vertex, term_qn_map, ) - inmaps = Dict{Pair{edgetype(sites),QN},Dict{Vector{Op},Int}}() - outmaps = Dict{Pair{edgetype(sites),QN},Dict{Vector{Op},Int}}() - - #g = underlying_graph(sites) + graph = underlying_graph(sites_) + inmaps = Dict{Pair{edgetype(graph),QN},Dict{Vector{Op},Int}}() + outmaps = Dict{Pair{edgetype(graph),QN},Dict{Vector{Op},Int}}() # Bond coefficients for incoming edge channels. # These become the "M" coefficient matrices that get SVD'd. @@ -112,17 +113,17 @@ function make_symbolic_ttn( # Temporary symbolic representation of TTN Hamiltonian symbolic_ttn = Dict( - v => QNArrElem{Scaled{coefficient_type,Prod{Op}},degree(sites, v)}[] for + v => QNArrElem{Scaled{coefficient_type,Prod{Op}},degree(graph, v)}[] for v in ordered_verts ) # Build compressed finite state machine representation (symbolic_ttn) for v in ordered_verts - v_degree = degree(sites, v) + v_degree = degree(graph, v) # For every vertex, find all edges that contain this vertex # (align_and_reorder_edges makes the output of indicident edges match the # direction and ordering match that of ordered_edges) - edges = align_and_reorder_edges(incident_edges(sites, v), ordered_edges) + edges = align_and_reorder_edges(incident_edges(graph, v), ordered_edges) # Use the corresponding ordering as index order for tensor elements at this site dim_in = findfirst(e -> dst(e) == v, edges) @@ -133,10 +134,8 @@ function make_symbolic_ttn( # For every site v' except v, determine the incident edge to v # that lies in the edge_path(v',v) # TODO: better way to make which_incident_edge below? - subgraphs = split_at_vertex(sites, v) - _boundary_edges = [ - only(boundary_edges(underlying_graph(sites), subgraph)) for subgraph in subgraphs - ] + subgraphs = split_at_vertex(graph, v) + _boundary_edges = [only(boundary_edges(graph, subgraph)) for subgraph in subgraphs] _boundary_edges = align_edges(_boundary_edges, edges) which_incident_edge = Dict( Iterators.flatten([ @@ -145,20 +144,23 @@ function make_symbolic_ttn( ) # Sanity check, leaves only have single incoming or outgoing edge - @assert !isempty(dims_out) || !isnothing(dim_in) - (isempty(dims_out) || isnothing(dim_in)) && @assert is_leaf_vertex(sites, v) + #@assert !isempty(dims_out) || !isnothing(dim_in) + #(isempty(dims_out) || isnothing(dim_in)) && @assert is_leaf_vertex(graph, v) for term in opsum - # Loop over OpSum and pick out terms that act on current vertex - ops = ITensors.terms(term) - if v in ITensors.site.(ops) - crosses_vertex = true - else - crosses_vertex = - !isone(length(Set([which_incident_edge[site] for site in site.(ops)]))) - end + ops = terms(term) + op_sites = ITensors.sites(term) + + # Only consider terms crossing current vertex + crosses_vertex = any( + ((s1, s2),) -> (v ∈ vertex_path(graph, s1, s2)), partition(op_sites, 2, 1) + ) + # TODO: seems we could make this just + # crosses_vertex = (v ∈ vertices(steiner_tree(graph,op_sites)))) + # but it is not working - steiner_tree seems to have spurious extra vertices? + # If term doesn't cross vertex, skip it - crosses_vertex || continue + (crosses_vertex || (v ∈ op_sites)) || continue # Filter out ops that acts on current vertex onsite_ops = filter(t -> (site(t) == v), ops) @@ -221,7 +223,7 @@ function make_symbolic_ttn( end # Add onsite identity for interactions passing through vertex if isempty(onsite_ops) - if !ITensors.using_auto_fermion() && isfermionic(incoming_ops, sites) + if !ITensors.using_auto_fermion() && isfermionic(incoming_ops, graph) push!(onsite_ops, Op("F", v)) else push!(onsite_ops, Op("Id", v)) From e6630115a314e5a459bc3f7b348624fe6b3b9942 Mon Sep 17 00:00:00 2001 From: Miles Date: Sat, 11 May 2024 18:31:26 -0400 Subject: [PATCH 02/14] Fixes --- src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl index c5b1a107..3ed5da26 100644 --- a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl @@ -6,7 +6,7 @@ using ITensors: flux, has_fermion_string, itensor, removeqns, space using ITensors.LazyApply: Prod, Sum, coefficient using ITensors.NDTensors: Block, blockdim, maxdim, nblocks, nnzblocks using ITensors.Ops: - argument, coefficient, Op, OpSum, name, params, site, sites, terms, which_op + argument, coefficient, Op, OpSum, name, params, site, terms, which_op using IterTools: partition using NamedGraphs.GraphsExtensions: GraphsExtensions, boundary_edges, degrees, is_leaf_vertex, vertex_path @@ -92,13 +92,13 @@ end function make_symbolic_ttn( coefficient_type, opsum::OpSum, - sites_::IndsNetwork; + sites::IndsNetwork; ordered_verts, ordered_edges, root_vertex, term_qn_map, ) - graph = underlying_graph(sites_) + graph = underlying_graph(sites) inmaps = Dict{Pair{edgetype(graph),QN},Dict{Vector{Op},Int}}() outmaps = Dict{Pair{edgetype(graph),QN},Dict{Vector{Op},Int}}() @@ -223,7 +223,7 @@ function make_symbolic_ttn( end # Add onsite identity for interactions passing through vertex if isempty(onsite_ops) - if !ITensors.using_auto_fermion() && isfermionic(incoming_ops, graph) + if !ITensors.using_auto_fermion() && isfermionic(incoming_ops, sites) push!(onsite_ops, Op("F", v)) else push!(onsite_ops, Op("Id", v)) From 831eaec88e750aed0150973042cb5ccd03d5935b Mon Sep 17 00:00:00 2001 From: Miles Date: Sat, 11 May 2024 23:35:47 -0400 Subject: [PATCH 03/14] Use DataGraphs to store symbolic TTN data --- .../opsum_to_ttn/opsum_to_ttn.jl | 59 ++++++++++++------- .../opsum_to_ttn/qnarrelem.jl | 13 ++-- 2 files changed, 44 insertions(+), 28 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl index 3ed5da26..dca5281b 100644 --- a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl @@ -1,15 +1,14 @@ -#using FillArrays: OneElement -#using DataGraphs: DataGraph +using DataGraphs: DataGraph using Graphs: degree, is_tree, steiner_tree using ITensorMPS: ITensorMPS, cutoff, linkdims, ops, truncate!, val using ITensors: flux, has_fermion_string, itensor, removeqns, space using ITensors.LazyApply: Prod, Sum, coefficient using ITensors.NDTensors: Block, blockdim, maxdim, nblocks, nnzblocks -using ITensors.Ops: - argument, coefficient, Op, OpSum, name, params, site, terms, which_op +using ITensors.Ops: argument, coefficient, Op, OpSum, name, params, site, terms, which_op using IterTools: partition using NamedGraphs.GraphsExtensions: GraphsExtensions, boundary_edges, degrees, is_leaf_vertex, vertex_path +using NDTensors.SparseArrayDOKs: SparseArrayDOK using StaticArrays: MVector # @@ -104,18 +103,27 @@ function make_symbolic_ttn( # Bond coefficients for incoming edge channels. # These become the "M" coefficient matrices that get SVD'd. - inbond_coefs = Dict( - e => Dict{QN,Vector{MatElem{coefficient_type}}}() for e in ordered_edges - ) + + #edge_matrix_type = SparseArrayDOK{coefficient_type} + edge_matrix_type = Vector{MatElem{coefficient_type}} + + edge_data_eltype = Dict{QN,edge_matrix_type} + inbond_coefs = DataGraph(graph; edge_data_eltype) + # Default initialize the DataGraph + for e in edges(inbond_coefs) + inbond_coefs[e] = edge_data_eltype() + end # List of terms for which the coefficient has been added to a site factor site_coef_done = Prod{Op}[] # Temporary symbolic representation of TTN Hamiltonian - symbolic_ttn = Dict( - v => QNArrElem{Scaled{coefficient_type,Prod{Op}},degree(graph, v)}[] for - v in ordered_verts - ) + vertex_data_eltype = Vector{QNArrElem{Scaled{coefficient_type,Prod{Op}}}} + symbolic_ttn = DataGraph(graph; vertex_data_eltype) + # Default initialize the DataGraph + for v in vertices(symbolic_ttn) + symbolic_ttn[v] = vertex_data_eltype() + end # Build compressed finite state machine representation (symbolic_ttn) for v in ordered_verts @@ -134,6 +142,8 @@ function make_symbolic_ttn( # For every site v' except v, determine the incident edge to v # that lies in the edge_path(v',v) # TODO: better way to make which_incident_edge below? + # TODO: instead of split at vertex, should + # be able to traverse subtrees formed by cutting incoming bond subgraphs = split_at_vertex(graph, v) _boundary_edges = [only(boundary_edges(graph, subgraph)) for subgraph in subgraphs] _boundary_edges = align_edges(_boundary_edges, edges) @@ -152,15 +162,14 @@ function make_symbolic_ttn( op_sites = ITensors.sites(term) # Only consider terms crossing current vertex - crosses_vertex = any( - ((s1, s2),) -> (v ∈ vertex_path(graph, s1, s2)), partition(op_sites, 2, 1) - ) + crosses_v((s1, s2),) = (v ∈ vertex_path(graph, s1, s2)) + v_crossed = any(crosses_v, partition(op_sites, 2, 1)) # TODO: seems we could make this just - # crosses_vertex = (v ∈ vertices(steiner_tree(graph,op_sites)))) + # v_crossed = (v ∈ vertices(steiner_tree(graph,op_sites)))) # but it is not working - steiner_tree seems to have spurious extra vertices? # If term doesn't cross vertex, skip it - (crosses_vertex || (v ∈ op_sites)) || continue + (v_crossed || (v ∈ op_sites)) || continue # Filter out ops that acts on current vertex onsite_ops = filter(t -> (site(t) == v), ops) @@ -186,8 +195,8 @@ function make_symbolic_ttn( site_qn = term_qn_map(onsite_ops) # Initialize QNArrayElement indices and quantum numbers - T_inds = MVector{v_degree}(fill(-1, v_degree)) - T_qns = MVector{v_degree}(fill(QN(), v_degree)) + T_inds = fill(-1, v_degree) + T_qns = fill(QN(), v_degree) # initialize ArrayElement indices for inbond_coefs bond_row = -1 bond_col = -1 @@ -200,10 +209,16 @@ function make_symbolic_ttn( bond_row = pos_in_link!(cinmap, incoming_ops) bond_col = pos_in_link!(coutmap, non_incoming_ops) # get incoming channel bond_coef = convert(coefficient_type, coefficient(term)) - q_inbond_coefs = get!( - inbond_coefs[edge_in], incoming_qn, MatElem{coefficient_type}[] - ) - push!(q_inbond_coefs, MatElem(bond_row, bond_col, bond_coef)) + #edge_coefs = get!(inbond_coefs,edge_in,Dict{QN,edge_matrix_type}()) + + # TODO: alternate version using SparseArraysDOK + #q_edge_coefs = get!(edge_coefs, incoming_qn, edge_matrix_type()) + #q_edge_coefs[bond_row,bond_col] = bond_coef + + # Version using MatElem + q_edge_coefs = get!(inbond_coefs[edge_in], incoming_qn, edge_matrix_type[]) + push!(q_edge_coefs, MatElem(bond_row, bond_col, bond_coef)) + T_inds[dim_in] = bond_col T_qns[dim_in] = -incoming_qn end diff --git a/src/treetensornetworks/opsum_to_ttn/qnarrelem.jl b/src/treetensornetworks/opsum_to_ttn/qnarrelem.jl index 4f765f07..0b6101d4 100644 --- a/src/treetensornetworks/opsum_to_ttn/qnarrelem.jl +++ b/src/treetensornetworks/opsum_to_ttn/qnarrelem.jl @@ -1,21 +1,22 @@ -using StaticArrays: MVector ##################################### # QNArrElem (sparse array with QNs) # ##################################### -struct QNArrElem{T,N} - qn_idxs::MVector{N,QN} - idxs::MVector{N,Int} +struct QNArrElem{T} + qn_idxs::Vector{QN} + idxs::Vector{Int} val::T end -function Base.:(==)(a1::QNArrElem{T,N}, a2::QNArrElem{T,N})::Bool where {T,N} +function Base.:(==)(a1::QNArrElem{T}, a2::QNArrElem{T})::Bool where {T} return (a1.idxs == a2.idxs && a1.val == a2.val && a1.qn_idxs == a2.qn_idxs) end -function Base.isless(a1::QNArrElem{T,N}, a2::QNArrElem{T,N})::Bool where {T,N} +function Base.isless(a1::QNArrElem{T}, a2::QNArrElem{T})::Bool where {T} ###two separate loops s.t. the MPS case reduces to the ITensors Implementation of QNMatElem + N = length(a1.qn_idxs) + @assert N == length(a2.qn_idxs) for n in 1:N if a1.qn_idxs[n] != a2.qn_idxs[n] return a1.qn_idxs[n] < a2.qn_idxs[n] From 8e20c23c21963cca6157f5b713dfd12a761ffeec Mon Sep 17 00:00:00 2001 From: Miles Date: Sat, 11 May 2024 23:35:59 -0400 Subject: [PATCH 04/14] Remove where keyword in abstractprojttn.jl --- src/treetensornetworks/projttns/abstractprojttn.jl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/treetensornetworks/projttns/abstractprojttn.jl b/src/treetensornetworks/projttns/abstractprojttn.jl index 55852677..cd5fdae3 100644 --- a/src/treetensornetworks/projttns/abstractprojttn.jl +++ b/src/treetensornetworks/projttns/abstractprojttn.jl @@ -31,12 +31,12 @@ on_edge(P::AbstractProjTTN) = isa(pos(P), edgetype(P)) ITensorMPS.nsite(P::AbstractProjTTN) = on_edge(P) ? 0 : length(pos(P)) -function sites(P::AbstractProjTTN{V}) where {V} - on_edge(P) && return V[] +function sites(P::AbstractProjTTN) + on_edge(P) && return vertextype(P)[] return pos(P) end -function NamedGraphs.incident_edges(P::AbstractProjTTN{V}) where {V} +function NamedGraphs.incident_edges(P::AbstractProjTTN) on_edge(P) && return [pos(P), reverse(pos(P))] edges = [ [edgetype(P)(n => v) for n in setdiff(neighbors(underlying_graph(P), v), sites(P))] for @@ -45,7 +45,7 @@ function NamedGraphs.incident_edges(P::AbstractProjTTN{V}) where {V} return collect(Base.Iterators.flatten(edges)) end -function internal_edges(P::AbstractProjTTN{V}) where {V} +function internal_edges(P::AbstractProjTTN) on_edge(P) && return edgetype(P)[] edges = [ [edgetype(P)(v => n) for n in neighbors(underlying_graph(P), v) ∩ sites(P)] for From 6184f496e35670b94239e675a9eb1c75b49e844b Mon Sep 17 00:00:00 2001 From: Miles Date: Sat, 11 May 2024 23:40:43 -0400 Subject: [PATCH 05/14] Remove unneeded dictionary --- src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl index dca5281b..4fb5e8fb 100644 --- a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl @@ -183,12 +183,6 @@ function make_symbolic_ttn( t -> (site(t) == v) || which_incident_edge[site(t)] != edge_in, ops ) - # For every outgoing edge, filter out ops that go out along that edge - outgoing_ops = Dict( - e => filter(t -> which_incident_edge[site(t)] == e, non_onsite_ops) for - e in edges_out - ) - # Compute QNs incoming_qn = term_qn_map(incoming_ops) non_incoming_qn = term_qn_map(non_incoming_ops) @@ -209,7 +203,6 @@ function make_symbolic_ttn( bond_row = pos_in_link!(cinmap, incoming_ops) bond_col = pos_in_link!(coutmap, non_incoming_ops) # get incoming channel bond_coef = convert(coefficient_type, coefficient(term)) - #edge_coefs = get!(inbond_coefs,edge_in,Dict{QN,edge_matrix_type}()) # TODO: alternate version using SparseArraysDOK #q_edge_coefs = get!(edge_coefs, incoming_qn, edge_matrix_type()) @@ -224,7 +217,7 @@ function make_symbolic_ttn( end for dout in dims_out out_edge = edges[dout] - out_op = outgoing_ops[out_edge] + out_op = filter(t -> which_incident_edge[site(t)] == out_edge, non_onsite_ops) coutmap = get!(outmaps, out_edge => term_qn_map(out_op), Dict{Vector{Op},Int}()) # Add outgoing channel T_inds[dout] = pos_in_link!(coutmap, out_op) From b7c5ec80929b9f26cb1fa1a03c6083451fd2c3e7 Mon Sep 17 00:00:00 2001 From: Miles Date: Sat, 11 May 2024 23:49:30 -0400 Subject: [PATCH 06/14] Minor changes --- src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl index 4fb5e8fb..bca8164e 100644 --- a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl @@ -197,11 +197,11 @@ function make_symbolic_ttn( if !isempty(incoming_ops) # Get the correct map from edge=>QN to term and channel. # This checks if term exists on edge=>QN (otherwise insert it) and returns its index. - coutmap = get!(outmaps, edge_in => non_incoming_qn, Dict{Vector{Op},Int}()) - cinmap = get!(inmaps, edge_in => -incoming_qn, Dict{Vector{Op},Int}()) + qn_outmap = get!(outmaps, edge_in => non_incoming_qn, Dict{Vector{Op},Int}()) + qn_inmap = get!(inmaps, edge_in => -incoming_qn, Dict{Vector{Op},Int}()) - bond_row = pos_in_link!(cinmap, incoming_ops) - bond_col = pos_in_link!(coutmap, non_incoming_ops) # get incoming channel + bond_row = pos_in_link!(qn_inmap, incoming_ops) + bond_col = pos_in_link!(qn_outmap, non_incoming_ops) # get incoming channel bond_coef = convert(coefficient_type, coefficient(term)) # TODO: alternate version using SparseArraysDOK @@ -218,9 +218,9 @@ function make_symbolic_ttn( for dout in dims_out out_edge = edges[dout] out_op = filter(t -> which_incident_edge[site(t)] == out_edge, non_onsite_ops) - coutmap = get!(outmaps, out_edge => term_qn_map(out_op), Dict{Vector{Op},Int}()) + qn_outmap = get!(outmaps, out_edge => term_qn_map(out_op), Dict{Vector{Op},Int}()) # Add outgoing channel - T_inds[dout] = pos_in_link!(coutmap, out_op) + T_inds[dout] = pos_in_link!(qn_outmap, out_op) T_qns[dout] = term_qn_map(out_op) end # If term starts at this site, add its coefficient as a site factor From 232600905cda73e16f7d2ea4a06dd8a59ec9782d Mon Sep 17 00:00:00 2001 From: Miles Date: Sun, 12 May 2024 00:07:42 -0400 Subject: [PATCH 07/14] Improve svd_bond_coefs function --- .../opsum_to_ttn/opsum_to_ttn.jl | 26 +++++++++---------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl index bca8164e..d311dedf 100644 --- a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl @@ -250,20 +250,18 @@ end function svd_bond_coefs( coefficient_type, sites, inbond_coefs; ordered_verts, ordered_edges, kws... ) - Vs = Dict(e => Dict{QN,Matrix{coefficient_type}}() for e in ordered_edges) - for v in ordered_verts - edges = align_and_reorder_edges(incident_edges(sites, v), ordered_edges) - dim_in = findfirst(e -> dst(e) == v, edges) - if !isnothing(dim_in) && !isempty(inbond_coefs[edges[dim_in]]) - for (q, mat) in inbond_coefs[edges[dim_in]] - M = toMatrix(mat) - U, S, V = svd(M) - P = S .^ 2 - truncate!(P; kws...) - tdim = length(P) - nc = size(M, 2) - Vs[edges[dim_in]][q] = Matrix{coefficient_type}(V[1:nc, 1:tdim]) - end + edge_data_eltype = Dict{QN,Matrix{coefficient_type}} + Vs = DataGraph(sites; edge_data_eltype) + for e in edges(sites) + Vs[e] = edge_data_eltype() + for (q, coefs) in inbond_coefs[e] + M = toMatrix(coefs) + U, S, V = svd(M) + P = S .^ 2 + truncate!(P; kws...) + tdim = length(P) + nc = size(M, 2) + Vs[e][q] = Matrix{coefficient_type}(V[1:nc, 1:tdim]) end end return Vs From 957de59a63f2a5b6d5f64880cc6883327121ac4c Mon Sep 17 00:00:00 2001 From: Miles Date: Sun, 12 May 2024 00:07:58 -0400 Subject: [PATCH 08/14] Use DataGraph in more places --- .../opsum_to_ttn/opsum_to_ttn.jl | 43 ++++++++----------- 1 file changed, 17 insertions(+), 26 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl index d311dedf..8080985c 100644 --- a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl @@ -97,19 +97,16 @@ function make_symbolic_ttn( root_vertex, term_qn_map, ) - graph = underlying_graph(sites) - inmaps = Dict{Pair{edgetype(graph),QN},Dict{Vector{Op},Int}}() - outmaps = Dict{Pair{edgetype(graph),QN},Dict{Vector{Op},Int}}() + inmaps = Dict{Pair{edgetype(sites),QN},Dict{Vector{Op},Int}}() + outmaps = Dict{Pair{edgetype(sites),QN},Dict{Vector{Op},Int}}() # Bond coefficients for incoming edge channels. # These become the "M" coefficient matrices that get SVD'd. - #edge_matrix_type = SparseArrayDOK{coefficient_type} edge_matrix_type = Vector{MatElem{coefficient_type}} - edge_data_eltype = Dict{QN,edge_matrix_type} - inbond_coefs = DataGraph(graph; edge_data_eltype) - # Default initialize the DataGraph + inbond_coefs = DataGraph(sites; edge_data_eltype) + # Default initialize the coefs DataGraph for e in edges(inbond_coefs) inbond_coefs[e] = edge_data_eltype() end @@ -119,19 +116,17 @@ function make_symbolic_ttn( # Temporary symbolic representation of TTN Hamiltonian vertex_data_eltype = Vector{QNArrElem{Scaled{coefficient_type,Prod{Op}}}} - symbolic_ttn = DataGraph(graph; vertex_data_eltype) - # Default initialize the DataGraph - for v in vertices(symbolic_ttn) - symbolic_ttn[v] = vertex_data_eltype() - end + symbolic_ttn = DataGraph(sites; vertex_data_eltype) # Build compressed finite state machine representation (symbolic_ttn) for v in ordered_verts - v_degree = degree(graph, v) + symbolic_ttn[v] = vertex_data_eltype() + v_degree = degree(sites, v) + # For every vertex, find all edges that contain this vertex # (align_and_reorder_edges makes the output of indicident edges match the # direction and ordering match that of ordered_edges) - edges = align_and_reorder_edges(incident_edges(graph, v), ordered_edges) + edges = align_and_reorder_edges(incident_edges(sites, v), ordered_edges) # Use the corresponding ordering as index order for tensor elements at this site dim_in = findfirst(e -> dst(e) == v, edges) @@ -144,8 +139,8 @@ function make_symbolic_ttn( # TODO: better way to make which_incident_edge below? # TODO: instead of split at vertex, should # be able to traverse subtrees formed by cutting incoming bond - subgraphs = split_at_vertex(graph, v) - _boundary_edges = [only(boundary_edges(graph, subgraph)) for subgraph in subgraphs] + subgraphs = split_at_vertex(sites, v) + _boundary_edges = [only(boundary_edges(sites, subgraph)) for subgraph in subgraphs] _boundary_edges = align_edges(_boundary_edges, edges) which_incident_edge = Dict( Iterators.flatten([ @@ -153,19 +148,15 @@ function make_symbolic_ttn( ]), ) - # Sanity check, leaves only have single incoming or outgoing edge - #@assert !isempty(dims_out) || !isnothing(dim_in) - #(isempty(dims_out) || isnothing(dim_in)) && @assert is_leaf_vertex(graph, v) - for term in opsum ops = terms(term) op_sites = ITensors.sites(term) # Only consider terms crossing current vertex - crosses_v((s1, s2),) = (v ∈ vertex_path(graph, s1, s2)) + crosses_v((s1, s2),) = (v ∈ vertex_path(sites, s1, s2)) v_crossed = any(crosses_v, partition(op_sites, 2, 1)) # TODO: seems we could make this just - # v_crossed = (v ∈ vertices(steiner_tree(graph,op_sites)))) + # v_crossed = (v ∈ vertices(steiner_tree(sites,op_sites)))) # but it is not working - steiner_tree seems to have spurious extra vertices? # If term doesn't cross vertex, skip it @@ -186,7 +177,6 @@ function make_symbolic_ttn( # Compute QNs incoming_qn = term_qn_map(incoming_ops) non_incoming_qn = term_qn_map(non_incoming_ops) - site_qn = term_qn_map(onsite_ops) # Initialize QNArrayElement indices and quantum numbers T_inds = fill(-1, v_degree) @@ -273,7 +263,8 @@ function compress_ttn( # Insert dummy indices on internal vertices, these will not show up in the final tensor # TODO: come up with a better solution for this sites = copy(sites0) - is_internal = Dict{vertextype(sites),Bool}() + + is_internal = DataGraph(sites; vertex_data_eltype=Bool) for v in ordered_verts is_internal[v] = isempty(sites[v]) if isempty(sites[v]) @@ -287,7 +278,7 @@ function compress_ttn( # Compress this symbolic_ttn representation into dense form thishasqns = any(v -> hasqns(sites[v]), vertices(sites)) - link_space = Dict{edgetype(sites),Index}() + link_space = DataGraph(sites; edge_data_eltype=Index) for e in ordered_edges operator_blocks = [q => size(Vq, 2) for (q, Vq) in Vs[e]] link_space[e] = Index( @@ -508,7 +499,7 @@ end # needed an extra `only` compared to ITensors version since IndsNetwork has Vector{<:Index} # as vertex data -function isfermionic(t::Vector{Op}, sites::IndsNetwork{V,<:Index}) where {V} +function isfermionic(t::Vector{Op}, sites::IndsNetwork) p = +1 for op in t if has_fermion_string(name(op), only(sites[site(op)])) From 984292094f3b4a662050473432b98c8c32278f71 Mon Sep 17 00:00:00 2001 From: Miles Date: Sun, 12 May 2024 00:18:16 -0400 Subject: [PATCH 09/14] More code cleeaning --- .../opsum_to_ttn/opsum_to_ttn.jl | 32 ++++++++----------- 1 file changed, 13 insertions(+), 19 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl index 8080985c..c1480387 100644 --- a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl @@ -105,10 +105,10 @@ function make_symbolic_ttn( #edge_matrix_type = SparseArrayDOK{coefficient_type} edge_matrix_type = Vector{MatElem{coefficient_type}} edge_data_eltype = Dict{QN,edge_matrix_type} - inbond_coefs = DataGraph(sites; edge_data_eltype) + bond_coefs = DataGraph(sites; edge_data_eltype) # Default initialize the coefs DataGraph - for e in edges(inbond_coefs) - inbond_coefs[e] = edge_data_eltype() + for e in edges(bond_coefs) + bond_coefs[e] = edge_data_eltype() end # List of terms for which the coefficient has been added to a site factor @@ -181,7 +181,7 @@ function make_symbolic_ttn( # Initialize QNArrayElement indices and quantum numbers T_inds = fill(-1, v_degree) T_qns = fill(QN(), v_degree) - # initialize ArrayElement indices for inbond_coefs + # Initialize ArrayElement indices for bond_coefs bond_row = -1 bond_col = -1 if !isempty(incoming_ops) @@ -199,7 +199,7 @@ function make_symbolic_ttn( #q_edge_coefs[bond_row,bond_col] = bond_coef # Version using MatElem - q_edge_coefs = get!(inbond_coefs[edge_in], incoming_qn, edge_matrix_type[]) + q_edge_coefs = get!(bond_coefs[edge_in], incoming_qn, edge_matrix_type[]) push!(q_edge_coefs, MatElem(bond_row, bond_col, bond_coef)) T_inds[dim_in] = bond_col @@ -234,24 +234,20 @@ function make_symbolic_ttn( _sort_unique!(symbolic_ttn[v]) end - return symbolic_ttn, inbond_coefs + return symbolic_ttn, bond_coefs end -function svd_bond_coefs( - coefficient_type, sites, inbond_coefs; ordered_verts, ordered_edges, kws... -) +function svd_bond_coefs(coefficient_type, sites, bond_coefs; kws...) edge_data_eltype = Dict{QN,Matrix{coefficient_type}} Vs = DataGraph(sites; edge_data_eltype) for e in edges(sites) Vs[e] = edge_data_eltype() - for (q, coefs) in inbond_coefs[e] + for (q, coefs) in bond_coefs[e] M = toMatrix(coefs) U, S, V = svd(M) P = S .^ 2 truncate!(P; kws...) - tdim = length(P) - nc = size(M, 2) - Vs[e][q] = Matrix{coefficient_type}(V[1:nc, 1:tdim]) + Vs[e][q] = Matrix(V[:, 1:length(P)]) end end return Vs @@ -265,7 +261,7 @@ function compress_ttn( sites = copy(sites0) is_internal = DataGraph(sites; vertex_data_eltype=Bool) - for v in ordered_verts + for v in vertices(sites) is_internal[v] = isempty(sites[v]) if isempty(sites[v]) # FIXME: This logic only works for trivial flux, breaks for nonzero flux @@ -279,7 +275,7 @@ function compress_ttn( thishasqns = any(v -> hasqns(sites[v]), vertices(sites)) link_space = DataGraph(sites; edge_data_eltype=Index) - for e in ordered_edges + for e in edges(sites) operator_blocks = [q => size(Vq, 2) for (q, Vq) in Vs[e]] link_space[e] = Index( QN() => 1, operator_blocks..., Hflux => 1; tags=edge_tag(e), dir=linkdir_ref @@ -474,13 +470,11 @@ function ttn_svd( # Store edges in fixed ordering relative to root ordered_edges = _default_edge_ordering(sites, root_vertex) - symbolic_ttn, inbond_coefs = make_symbolic_ttn( + symbolic_ttn, bond_coefs = make_symbolic_ttn( coefficient_type, os, sites; ordered_verts, ordered_edges, root_vertex, term_qn_map ) - Vs = svd_bond_coefs( - coefficient_type, sites, inbond_coefs; ordered_verts, ordered_edges, kws... - ) + Vs = svd_bond_coefs(coefficient_type, sites, bond_coefs; kws...) Hflux = -term_qn_map(terms(first(terms(os)))) From 46d731aadb51c3687eae637e90eac9d366ead521 Mon Sep 17 00:00:00 2001 From: Miles Date: Sun, 12 May 2024 00:22:18 -0400 Subject: [PATCH 10/14] Just iterate over vertices(sites) --- .../opsum_to_ttn/opsum_to_ttn.jl | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl index c1480387..06e6a6db 100644 --- a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl @@ -92,7 +92,6 @@ function make_symbolic_ttn( coefficient_type, opsum::OpSum, sites::IndsNetwork; - ordered_verts, ordered_edges, root_vertex, term_qn_map, @@ -119,7 +118,7 @@ function make_symbolic_ttn( symbolic_ttn = DataGraph(sites; vertex_data_eltype) # Build compressed finite state machine representation (symbolic_ttn) - for v in ordered_verts + for v in vertices(sites) symbolic_ttn[v] = vertex_data_eltype() v_degree = degree(sites, v) @@ -253,9 +252,7 @@ function svd_bond_coefs(coefficient_type, sites, bond_coefs; kws...) return Vs end -function compress_ttn( - coefficient_type, sites0, Hflux, symbolic_ttn, Vs; ordered_verts, ordered_edges -) +function compress_ttn(coefficient_type, sites0, Hflux, symbolic_ttn, Vs; ordered_edges) # Insert dummy indices on internal vertices, these will not show up in the final tensor # TODO: come up with a better solution for this sites = copy(sites0) @@ -291,7 +288,7 @@ function compress_ttn( end qnblockdim(i::Index, q::QN) = blockdim(i, qnblock(i, q)) - for v in ordered_verts + for v in vertices(sites) v_degree = degree(sites, v) # Redo the whole thing like before # TODO: use neighborhood instead of going through all edges, see above @@ -465,22 +462,18 @@ function ttn_svd( ) term_qn_map = TermQNMap{vertextype(sites)}(sites) - # Traverse tree outwards from root vertex - ordered_verts = _default_vertex_ordering(sites, root_vertex) # Store edges in fixed ordering relative to root ordered_edges = _default_edge_ordering(sites, root_vertex) symbolic_ttn, bond_coefs = make_symbolic_ttn( - coefficient_type, os, sites; ordered_verts, ordered_edges, root_vertex, term_qn_map + coefficient_type, os, sites; ordered_edges, root_vertex, term_qn_map ) Vs = svd_bond_coefs(coefficient_type, sites, bond_coefs; kws...) Hflux = -term_qn_map(terms(first(terms(os)))) - T = compress_ttn( - coefficient_type, sites, Hflux, symbolic_ttn, Vs; ordered_verts, ordered_edges - ) + T = compress_ttn(coefficient_type, sites, Hflux, symbolic_ttn, Vs; ordered_edges) return T end From 0fafbbfca795c7d9004a8e97e2d4e7ad48fa2c3a Mon Sep 17 00:00:00 2001 From: Miles Date: Sun, 12 May 2024 00:47:20 -0400 Subject: [PATCH 11/14] Removed ordered_verts array and simplified ttn_svd --- .../opsum_to_ttn/opsum_to_ttn.jl | 100 ++++++++---------- 1 file changed, 45 insertions(+), 55 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl index 06e6a6db..d7597a43 100644 --- a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl @@ -41,18 +41,6 @@ function determine_coefficient_type(terms) return typeof(coefficient(first(terms))) end -""" - ttn_svd(os::OpSum, sites::IndsNetwork, root_vertex, kwargs...) - -Construct a TreeTensorNetwork from a symbolic OpSum representation of a -Hamiltonian, compressing shared interaction channels. -""" -function ttn_svd(os::OpSum, sites::IndsNetwork, root_vertex; kwargs...) - # Function barrier to improve type stability - coefficient_type = determine_coefficient_type(terms(os)) - return ttn_svd(coefficient_type, os, sites, root_vertex; kwargs...) -end - # TODO: should be equivalent to `sort!(v); unique!(v)` however, # Base.unique! is giving incorrect results for data involving the # Prod type from LazyApply. This could be a combination of Base.unique! @@ -89,13 +77,9 @@ function pos_in_link!(linkmap::Dict, k) end function make_symbolic_ttn( - coefficient_type, - opsum::OpSum, - sites::IndsNetwork; - ordered_edges, - root_vertex, - term_qn_map, + coefficient_type, opsum::OpSum, sites::IndsNetwork; root_vertex, term_qn_map ) + ordered_edges = _default_edge_ordering(sites, root_vertex) inmaps = Dict{Pair{edgetype(sites),QN},Dict{Vector{Op},Int}}() outmaps = Dict{Pair{edgetype(sites),QN},Dict{Vector{Op},Int}}() @@ -125,13 +109,13 @@ function make_symbolic_ttn( # For every vertex, find all edges that contain this vertex # (align_and_reorder_edges makes the output of indicident edges match the # direction and ordering match that of ordered_edges) - edges = align_and_reorder_edges(incident_edges(sites, v), ordered_edges) + v_edges = align_and_reorder_edges(incident_edges(sites, v), ordered_edges) # Use the corresponding ordering as index order for tensor elements at this site - dim_in = findfirst(e -> dst(e) == v, edges) - edge_in = (isnothing(dim_in) ? nothing : edges[dim_in]) - dims_out = findall(e -> src(e) == v, edges) - edges_out = edges[dims_out] + dim_in = findfirst(e -> dst(e) == v, v_edges) + edge_in = (isnothing(dim_in) ? nothing : v_edges[dim_in]) + dims_out = findall(e -> src(e) == v, v_edges) + edges_out = v_edges[dims_out] # For every site v' except v, determine the incident edge to v # that lies in the edge_path(v',v) @@ -140,7 +124,7 @@ function make_symbolic_ttn( # be able to traverse subtrees formed by cutting incoming bond subgraphs = split_at_vertex(sites, v) _boundary_edges = [only(boundary_edges(sites, subgraph)) for subgraph in subgraphs] - _boundary_edges = align_edges(_boundary_edges, edges) + _boundary_edges = align_edges(_boundary_edges, v_edges) which_incident_edge = Dict( Iterators.flatten([ subgraphs[i] .=> ((_boundary_edges[i]),) for i in eachindex(subgraphs) @@ -205,7 +189,7 @@ function make_symbolic_ttn( T_qns[dim_in] = -incoming_qn end for dout in dims_out - out_edge = edges[dout] + out_edge = v_edges[dout] out_op = filter(t -> which_incident_edge[site(t)] == out_edge, non_onsite_ops) qn_outmap = get!(outmaps, out_edge => term_qn_map(out_op), Dict{Vector{Op},Int}()) # Add outgoing channel @@ -252,9 +236,12 @@ function svd_bond_coefs(coefficient_type, sites, bond_coefs; kws...) return Vs end -function compress_ttn(coefficient_type, sites0, Hflux, symbolic_ttn, Vs; ordered_edges) +function compress_ttn( + coefficient_type, sites0, symbolic_ttn, Vs; root_vertex, total_qn=QN() +) # Insert dummy indices on internal vertices, these will not show up in the final tensor # TODO: come up with a better solution for this + ordered_edges = _default_edge_ordering(sites0, root_vertex) sites = copy(sites0) is_internal = DataGraph(sites; vertex_data_eltype=Bool) @@ -263,7 +250,7 @@ function compress_ttn(coefficient_type, sites0, Hflux, symbolic_ttn, Vs; ordered if isempty(sites[v]) # FIXME: This logic only works for trivial flux, breaks for nonzero flux # TODO: add assert or fix and add test! - sites[v] = [Index(Hflux => 1)] + sites[v] = [Index(total_qn => 1)] end end @@ -275,7 +262,7 @@ function compress_ttn(coefficient_type, sites0, Hflux, symbolic_ttn, Vs; ordered for e in edges(sites) operator_blocks = [q => size(Vq, 2) for (q, Vq) in Vs[e]] link_space[e] = Index( - QN() => 1, operator_blocks..., Hflux => 1; tags=edge_tag(e), dir=linkdir_ref + QN() => 1, operator_blocks..., total_qn => 1; tags=edge_tag(e), dir=linkdir_ref ) end @@ -292,12 +279,12 @@ function compress_ttn(coefficient_type, sites0, Hflux, symbolic_ttn, Vs; ordered v_degree = degree(sites, v) # Redo the whole thing like before # TODO: use neighborhood instead of going through all edges, see above - edges = align_and_reorder_edges(incident_edges(sites, v), ordered_edges) - dim_in = findfirst(e -> dst(e) == v, edges) - dims_out = findall(e -> src(e) == v, edges) + v_edges = align_and_reorder_edges(incident_edges(sites, v), ordered_edges) + dim_in = findfirst(e -> dst(e) == v, v_edges) + dims_out = findall(e -> src(e) == v, v_edges) # slice isometries at this vertex - Vv = [Vs[e] for e in edges] - linkinds = [link_space[e] for e in edges] + Vv = [Vs[e] for e in v_edges] + linkinds = [link_space[e] for e in v_edges] # construct blocks blocks = Dict{Tuple{Block{v_degree},Vector{Op}},Array{coefficient_type,v_degree}}() @@ -358,14 +345,14 @@ function compress_ttn(coefficient_type, sites0, Hflux, symbolic_ttn, Vs; ordered end for ((b, q_op), m) in blocks - Op = computeSiteProd(sites, Prod(q_op)) + Op = compute_site_prod(sites, Prod(q_op)) if hasqns(Op) # FIXME: this may not be safe, we may want to check for the equivalent (zero tensor?) case in the dense case as well iszero(nnzblocks(Op)) && continue end sq = flux(Op) if !isnothing(sq) - rq = (b[1] == 1 ? Hflux : first(space(linkinds[1])[b[1]])) # get row (dim_in) QN + rq = (b[1] == 1 ? total_qn : first(space(linkinds[1])[b[1]])) # get row (dim_in) QN cq = rq - sq # get column (out_dims) QN if ITensors.using_auto_fermion() # we need to account for the direct product below ordering the physical indices as the last indices @@ -388,7 +375,7 @@ function compress_ttn(coefficient_type, sites0, Hflux, symbolic_ttn, Vs; ordered else #TODO: Remove this assert since it seems to be costly #if hasqns(iT) - # @assert flux(iT * Op) == Hflux + # @assert flux(iT * Op) == total_qn #end H[v] += (iT * Op) end @@ -424,7 +411,7 @@ function compress_ttn(coefficient_type, sites0, Hflux, symbolic_ttn, Vs; ordered if is_internal[v] H[v] += T else - H[v] += T * ITensorNetworks.computeSiteProd(sites, Prod([(Op("Id", v))])) + H[v] += T * compute_site_prod(sites, Prod([(Op("Id", v))])) end end return H @@ -457,23 +444,25 @@ function (t::TermQNMap)(term) return q end -function ttn_svd( - coefficient_type::Type{<:Number}, os::OpSum, sites::IndsNetwork, root_vertex; kws... -) - term_qn_map = TermQNMap{vertextype(sites)}(sites) +""" + ttn_svd(os::OpSum, sites::IndsNetwork, root_vertex, kwargs...) - # Store edges in fixed ordering relative to root - ordered_edges = _default_edge_ordering(sites, root_vertex) +Construct a TreeTensorNetwork from a symbolic OpSum representation of a +Hamiltonian, compressing shared interaction channels. +""" +function ttn_svd(os::OpSum, sites::IndsNetwork, root_vertex; kws...) + coefficient_type = determine_coefficient_type(terms(os)) + + term_qn_map = TermQNMap{vertextype(sites)}(sites) symbolic_ttn, bond_coefs = make_symbolic_ttn( - coefficient_type, os, sites; ordered_edges, root_vertex, term_qn_map + coefficient_type, os, sites; root_vertex, term_qn_map ) Vs = svd_bond_coefs(coefficient_type, sites, bond_coefs; kws...) - Hflux = -term_qn_map(terms(first(terms(os)))) - - T = compress_ttn(coefficient_type, sites, Hflux, symbolic_ttn, Vs; ordered_edges) + total_qn = -term_qn_map(terms(first(terms(os)))) + T = compress_ttn(coefficient_type, sites, symbolic_ttn, Vs; root_vertex, total_qn) return T end @@ -484,7 +473,7 @@ end # TODO: fix fermion support, definitely broken -# needed an extra `only` compared to ITensors version since IndsNetwork has Vector{<:Index} +# Needed an extra `only` compared to ITensors version since IndsNetwork has Vector{<:Index} # as vertex data function isfermionic(t::Vector{Op}, sites::IndsNetwork) p = +1 @@ -497,13 +486,14 @@ function isfermionic(t::Vector{Op}, sites::IndsNetwork) end # only(site(ops[1])) in ITensors breaks for Tuple site labels, had to drop the only -function computeSiteProd(sites::IndsNetwork{V,<:Index}, ops::Prod{Op})::ITensor where {V} - v = site(ops[1]) - T = op(sites[v], which_op(ops[1]); params(ops[1])...) - for j in 2:length(ops) - (site(ops[j]) != v) && error("Mismatch of vertex labels in computeSiteProd") - opj = op(sites[v], which_op(ops[j]); params(ops[j])...) - T = product(T, opj) +function compute_site_prod(sites::IndsNetwork, ops::Prod{Op}) + v = site(first(ops)) + s = sites[v] + T = op(s, which_op(ops[1]); params(ops[1])...) + for o in ops[2:end] + (site(o) != v) && error("Mismatch of vertex labels in compute_site_prod") + t = op(s, which_op(o); params(o)...) + T = product(T, t) end return T end From bd32fe2f8dd354b38afa36aa6de173809719098e Mon Sep 17 00:00:00 2001 From: Miles Date: Sun, 12 May 2024 01:01:44 -0400 Subject: [PATCH 12/14] Simplify MatElem type --- .../opsum_to_ttn/matelem.jl | 39 ++++++------------- .../opsum_to_ttn/opsum_to_ttn.jl | 2 +- 2 files changed, 13 insertions(+), 28 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn/matelem.jl b/src/treetensornetworks/opsum_to_ttn/matelem.jl index d1b98dc0..4b8fc397 100644 --- a/src/treetensornetworks/opsum_to_ttn/matelem.jl +++ b/src/treetensornetworks/opsum_to_ttn/matelem.jl @@ -1,41 +1,26 @@ -# -# MatElem -# - struct MatElem{T} row::Int col::Int val::T end -#function Base.show(io::IO,m::MatElem) -# print(io,"($(m.row),$(m.col),$(m.val))") -#end +Base.eltype(::MatElem{T}) where {T} = T -function toMatrix(els::Vector{MatElem{T}})::Matrix{T} where {T} - nr = 0 - nc = 0 - for el in els - nr = max(nr, el.row) - nc = max(nc, el.col) +function col_major_order(el1::MatElem, el2::MatElem) + if el1.col == el2.col + return el1.row < el2.row end - M = zeros(T, nr, nc) + return el1.col < el2.col +end + +function to_matrix(els::Vector{<:MatElem}) + els = sort(els; lt=col_major_order) + nr = maximum(el -> el.row, els) + nc = last(els).col + M = zeros(eltype(first(els)), nr, nc) for el in els M[el.row, el.col] = el.val end return M end - -function Base.:(==)(m1::MatElem{T}, m2::MatElem{T})::Bool where {T} - return (m1.row == m2.row && m1.col == m2.col && m1.val == m2.val) -end - -function Base.isless(m1::MatElem{T}, m2::MatElem{T})::Bool where {T} - if m1.row != m2.row - return m1.row < m2.row - elseif m1.col != m2.col - return m1.col < m2.col - end - return m1.val < m2.val -end diff --git a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl index d7597a43..78dc2a86 100644 --- a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl @@ -226,7 +226,7 @@ function svd_bond_coefs(coefficient_type, sites, bond_coefs; kws...) for e in edges(sites) Vs[e] = edge_data_eltype() for (q, coefs) in bond_coefs[e] - M = toMatrix(coefs) + M = to_matrix(coefs) U, S, V = svd(M) P = S .^ 2 truncate!(P; kws...) From ba5707a7f6aea69384875bd74de09b3c12808855 Mon Sep 17 00:00:00 2001 From: Miles Date: Sun, 12 May 2024 01:17:32 -0400 Subject: [PATCH 13/14] More code simplifications --- .../opsum_to_ttn/opsum_to_ttn.jl | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl index 78dc2a86..8eea4a58 100644 --- a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl @@ -137,13 +137,11 @@ function make_symbolic_ttn( # Only consider terms crossing current vertex crosses_v((s1, s2),) = (v ∈ vertex_path(sites, s1, s2)) - v_crossed = any(crosses_v, partition(op_sites, 2, 1)) + v_crossed = (v ∈ op_sites) || any(crosses_v, partition(op_sites, 2, 1)) # TODO: seems we could make this just # v_crossed = (v ∈ vertices(steiner_tree(sites,op_sites)))) # but it is not working - steiner_tree seems to have spurious extra vertices? - - # If term doesn't cross vertex, skip it - (v_crossed || (v ∈ op_sites)) || continue + v_crossed || continue # Filter out ops that acts on current vertex onsite_ops = filter(t -> (site(t) == v), ops) @@ -151,11 +149,8 @@ function make_symbolic_ttn( # Filter out ops that come in from the direction of the incoming edge incoming_ops = filter(t -> which_incident_edge[site(t)] == edge_in, non_onsite_ops) - # Also store all non-incoming ops in standard order, used for channel merging - non_incoming_ops = filter( - t -> (site(t) == v) || which_incident_edge[site(t)] != edge_in, ops - ) + non_incoming_ops = setdiff(ops, incoming_ops) # Compute QNs incoming_qn = term_qn_map(incoming_ops) @@ -190,11 +185,11 @@ function make_symbolic_ttn( end for dout in dims_out out_edge = v_edges[dout] - out_op = filter(t -> which_incident_edge[site(t)] == out_edge, non_onsite_ops) - qn_outmap = get!(outmaps, out_edge => term_qn_map(out_op), Dict{Vector{Op},Int}()) + out_ops = filter(t -> which_incident_edge[site(t)] == out_edge, non_onsite_ops) + qn_outmap = get!(outmaps, out_edge => term_qn_map(out_ops), Dict{Vector{Op},Int}()) # Add outgoing channel - T_inds[dout] = pos_in_link!(qn_outmap, out_op) - T_qns[dout] = term_qn_map(out_op) + T_inds[dout] = pos_in_link!(qn_outmap, out_ops) + T_qns[dout] = term_qn_map(out_ops) end # If term starts at this site, add its coefficient as a site factor site_coef = one(coefficient_type) From 9677fc00c9d143b79d96f4cb07c5535bebdd8e16 Mon Sep 17 00:00:00 2001 From: Miles Date: Sun, 12 May 2024 19:22:57 +0200 Subject: [PATCH 14/14] Remove a comment --- src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl index 8eea4a58..3c11446a 100644 --- a/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn/opsum_to_ttn.jl @@ -368,10 +368,6 @@ function compress_ttn( if is_internal[v] H[v] += iT else - #TODO: Remove this assert since it seems to be costly - #if hasqns(iT) - # @assert flux(iT * Op) == total_qn - #end H[v] += (iT * Op) end end