From 88af5c7dd8827805322cb9c6a61ea5d936177cc3 Mon Sep 17 00:00:00 2001 From: mtfishman Date: Wed, 8 May 2024 20:57:53 -0400 Subject: [PATCH] Remove examples and NEWS --- NEWS.md | 71 -------------------------------------- examples/dmrg.jl | 24 ------------- examples/gate_evolution.jl | 46 ------------------------ 3 files changed, 141 deletions(-) delete mode 100644 NEWS.md delete mode 100644 examples/dmrg.jl delete mode 100644 examples/gate_evolution.jl diff --git a/NEWS.md b/NEWS.md deleted file mode 100644 index 3c6eddd..0000000 --- a/NEWS.md +++ /dev/null @@ -1,71 +0,0 @@ -This file is a (mostly) comprehensive list of changes made in each release of ITensorGPU.jl. For a completely comprehensive but more verbose list, see the [commit history on Github](https://github.com/ITensor/ITensors.jl/commits/main/ITensorGPU). - -While we are in v0.x of the package, we will follow the convention that updating from v0.x.y to v0.x.(y+1) (for example v0.1.15 to v0.1.16) should not break your code, unless you are using internal/undocumented features of the code, while updating from `v0.x.y` to `v0.(x+1).y` might break your code, though we will try to add deprecation warnings when possible, such as for simple cases where the name of a function changes. - -Note that as of Julia v1.5, in order to see deprecation warnings you will need to start Julia with `julia --depwarn=yes` (previously they were on by default). Please run your code like this before upgrading between minor versions of the code (for example from v0.1.41 to v0.2.0). - -After we release v1 of the package, we will start following [semantic versioning](https://semver.org). - -ITensorGPU v0.0.7 Release Notes -=============================== - -Bugs: - -Enhancements: - -- Bump version compat for dependencies. - -ITensorGPU v0.0.6 Release Notes -=============================== - -Bugs: - -Enhancements: - -ITensorGPU v0.0.5 Release Notes -=============================== - -Bugs: - -Enhancements: - -- Clean up `outer` and add GEMM routing for CUDA (#887) - -ITensorGPU v0.0.4 Release Notes -=============================== - -Bugs: - -Enhancements: - -- `cu([[A, B], [C]])` -> `[[cu(A), cu(B)], [cu(C)]]` and same for cpu (#898). -- Allow cutruncate to work for Float32s (#897). - -ITensorGPU v0.0.3 Release Notes -=============================== - -Bugs: - -- Fix bugs in complex SVD on GPU (with and without truncations) (#871) - -Enhancements: - -- Remove some unnecessary contract code (#860) - -ITensorGPU v0.0.2 Release Notes -=============================== - -Bugs: - -- Remove unnecessary `CuDense` type equality definition (#823) - -Enhancements: - -ITensorGPU v0.0.1 Release Notes -=============================== - -Bugs: - -Enhancements: - -- Register ITensorGPU package, code in ITensors.jl repository diff --git a/examples/dmrg.jl b/examples/dmrg.jl deleted file mode 100644 index e3448cc..0000000 --- a/examples/dmrg.jl +++ /dev/null @@ -1,24 +0,0 @@ -using ITensors -using ITensorGPU - -# Set to identity to run on CPU -device = cu - -N = 50 -sites = siteinds("S=1", N) - -opsum = OpSum() -for j in 1:(N - 1) - opsum .+= 0.5, "S+", j, "S-", j + 1 - opsum .+= 0.5, "S-", j, "S+", j + 1 - opsum .+= "Sz", j, "Sz", j + 1 -end -H = device(MPO(opsum, sites)) - -ψ₀ = device(randomMPS(sites)) - -dmrg_kwargs = (; - nsweeps=6, maxdim=[10, 20, 40, 100], mindim=[1, 10], cutoff=1e-11, noise=1e-10 -) -energy, ψ = @time dmrg(H, ψ₀; dmrg_kwargs...) -@show energy diff --git a/examples/gate_evolution.jl b/examples/gate_evolution.jl deleted file mode 100644 index d570039..0000000 --- a/examples/gate_evolution.jl +++ /dev/null @@ -1,46 +0,0 @@ -using ITensors -using ITensorGPU - -import ITensors: space, state, op - -space(::SiteType"Qubit") = 2 -state(::SiteType"Qubit", ::StateName"0") = 1 -state(::SiteType"Qubit", ::StateName"1") = 2 - -op_matrix(s::String) = op_matrix(OpName(s)) - -op_matrix(::OpName"Id") = [ - 1 0 - 0 1 -] - -op_matrix(::OpName"I") = op_matrix("Id") - -op_matrix(::OpName"X") = [ - 0 1 - 1 0 -] - -function op_matrix(on::OpName, s::Index...; kwargs...) - rs = reverse(s) - return itensor(op_matrix(on; kwargs...), prime.(rs)..., dag.(rs)...) -end - -op_matrix(gn::String, s::Index...; kwargs...) = op_matrix(OpName(gn), s...; kwargs...) - -function op_matrix(gn::String, s::Vector{<:Index}, ns::Int...; kwargs...) - return op_matrix(OpName(gn), s[[ns...]]...; kwargs...) -end - -op(gn::OpName, ::SiteType"Qubit", s::Index...; kwargs...) = op_matrix(gn, s...; kwargs...) - -N = 10 -s = siteinds("Qubit", N) -X = cu.(ops(s, [("X", n) for n in 1:N])) - -initstate = fill("0", N) - -ψ0 = productCuMPS(s, initstate) - -gates = [X[n] for n in 1:2:N] -ψ = apply(gates, ψ0)