Skip to content

Commit cf050da

Browse files
authored
[ITensorsNamedDimsArraysExt] Convert symmetric tensors (ITensor#1578)
* [ITensorsNamedDimsArraysExt] Convert symmetric tensors * [NDTensors] Bump to v0.3.63 * [ITensors] Bump to v0.7.5
1 parent 4996dca commit cf050da

File tree

12 files changed

+107
-11
lines changed

12 files changed

+107
-11
lines changed

NDTensors/Project.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
name = "NDTensors"
22
uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf"
33
authors = ["Matthew Fishman <[email protected]>"]
4-
version = "0.3.62"
4+
version = "0.3.63"
55

66
[deps]
77
Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697"

NDTensors/src/lib/GradedAxes/src/gradedunitrangedual.jl

+15
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,21 @@ function blockedunitrange_getindices(
9494
return flip_blockvector(v)
9595
end
9696

97+
# Fixes ambiguity error.
98+
# TODO: Write this in terms of `blockedunitrange_getindices(dual(a), indices)`.
99+
function blockedunitrange_getindices(
100+
a::GradedUnitRangeDual, indices::AbstractBlockVector{<:Block{1}}
101+
)
102+
blks = map(bs -> mortar(map(b -> a[b], bs)), blocks(indices))
103+
# We pass `length.(blks)` to `mortar` in order
104+
# to pass block labels to the axes of the output,
105+
# if they exist. This makes it so that
106+
# `only(axes(a[indices])) isa `GradedUnitRange`
107+
# if `a isa `GradedUnitRange`, for example.
108+
v = mortar(blks, labelled_length.(blks))
109+
return flip_blockvector(v)
110+
end
111+
97112
function flip_blockvector(v::BlockVector)
98113
block_axes = flip.(axes(v))
99114
flipped = mortar(vec.(blocks(v)), block_axes)

Project.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
name = "ITensors"
22
uuid = "9136182c-28ba-11e9-034c-db9fb085ebd5"
33
authors = ["Matthew Fishman <[email protected]>", "Miles Stoudenmire <[email protected]>"]
4-
version = "0.7.4"
4+
version = "0.7.5"
55

66
[deps]
77
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"

src/lib/ITensorsNamedDimsArraysExt/examples/example_dmrg.jl

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
using Adapt: adapt
2-
using ITensors: MPO, dmrg, random_mps, siteinds
3-
using ITensors.Ops: OpSum
2+
using ITensorMPS: MPO, OpSum, dmrg, random_mps, siteinds
43
using ITensors.ITensorsNamedDimsArraysExt: to_nameddimsarray
54

65
function main(; n, conserve_qns=false, nsweeps=3, cutoff=1e-4, arraytype=Array)

src/lib/ITensorsNamedDimsArraysExt/src/to_nameddimsarray.jl

+23-5
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
using ..NDTensors: data, inds
21
using ITensors: ITensor
2+
using ..NDTensors: data, inds
33

44
# TODO: Delete this, it is a hack to decide
55
# if an Index is blocked.
@@ -34,21 +34,39 @@ function to_nameddimsarray(x::DiagTensor)
3434
return named(DiagonalArray(data(x), size(x)), name.(inds(x)))
3535
end
3636

37-
using ..NDTensors: BlockSparseTensor
37+
using ITensors: ITensors, dir, qn
38+
using ..NDTensors: BlockSparseTensor, array, blockdim, datatype, nblocks, nzblocks
3839
using ..NDTensors.BlockSparseArrays: BlockSparseArray
40+
using ..NDTensors.BlockSparseArrays.BlockArrays: BlockArrays, blockedrange
41+
using ..NDTensors.GradedAxes: dual, gradedrange
42+
using ..NDTensors.TypeParameterAccessors: set_ndims
3943
# TODO: Delete once `BlockSparse` is removed.
4044
function to_nameddimsarray(x::BlockSparseTensor)
41-
blockinds = map(i -> [blockdim(i, b) for b in 1:nblocks(i)], inds(x))
45+
blockinds = map(inds(x)) do i
46+
r = gradedrange([qn(i, b) => blockdim(i, b) for b in 1:nblocks(i)])
47+
if dir(i) == ITensors.In
48+
return dual(r)
49+
end
50+
return r
51+
end
4252
blocktype = set_ndims(datatype(x), ndims(x))
4353
# TODO: Make a simpler constructor:
4454
# BlockSparseArray(blocktype, blockinds)
45-
arraystorage = BlockSparseArray{eltype(x),ndims(x),blocktype}(blockinds)
55+
arraystorage = BlockSparseArray{eltype(x),ndims(x),blocktype}(undef, blockinds)
4656
for b in nzblocks(x)
47-
arraystorage[BlockArrays.Block(Tuple(b)...)] = x[b]
57+
arraystorage[BlockArrays.Block(Int.(Tuple(b))...)] = array(x[b])
4858
end
4959
return named(arraystorage, name.(inds(x)))
5060
end
5161

62+
using ITensors: QN
63+
using ..NDTensors.GradedAxes: GradedAxes
64+
GradedAxes.fuse_labels(l1::QN, l2::QN) = l1 + l2
65+
66+
using ITensors: QN
67+
using ..NDTensors.SymmetrySectors: SymmetrySectors
68+
SymmetrySectors.dual(l::QN) = -l
69+
5270
## TODO: Add this back, define `CombinerArrays` library in NDTensors!
5371
## using ..NDTensors: CombinerTensor, CombinerArray, storage
5472
## # TODO: Delete when we directly use `CombinerArray` as storage.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
[deps]
2+
BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e"
3+
ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5"
4+
NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf"
5+
Suppressor = "fd094767-a336-5f1f-9728-57cf17d0bbfb"
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
@eval module $(gensym())
2+
using BlockArrays: blocklengths
3+
using ITensors: ITensor, Index, QN, dag, inds, plev, random_itensor
4+
using ITensors.ITensorsNamedDimsArraysExt: to_nameddimsarray
5+
using NDTensors: tensor
6+
using NDTensors.BlockSparseArrays: BlockSparseArray, block_nstored
7+
using NDTensors.GradedAxes: isdual
8+
using NDTensors.LabelledNumbers: label
9+
using NDTensors.NamedDimsArrays: NamedDimsArray, unname
10+
using Test: @test, @testset
11+
@testset "to_nameddimsarray" begin
12+
i = Index([QN(0) => 2, QN(1) => 3])
13+
a = random_itensor(i', dag(i))
14+
b = to_nameddimsarray(a)
15+
@test b isa ITensor
16+
@test plev(inds(b)[1]) == 1
17+
@test plev(inds(b)[2]) == 0
18+
@test inds(b)[1] == i'
19+
@test inds(b)[2] == dag(i)
20+
nb = tensor(b)
21+
@test nb isa NamedDimsArray{Float64}
22+
bb = unname(nb)
23+
@test bb isa BlockSparseArray{Float64}
24+
@test !isdual(axes(bb, 1))
25+
@test isdual(axes(bb, 2))
26+
@test blocklengths(axes(bb, 1)) == [2, 3]
27+
@test blocklengths(axes(bb, 2)) == [2, 3]
28+
@test label.(blocklengths(axes(bb, 1))) == [QN(0), QN(1)]
29+
@test label.(blocklengths(axes(bb, 2))) == [QN(0), QN(-1)]
30+
@test block_nstored(bb) == 2
31+
@test b' * b to_nameddimsarray(a' * a)
32+
end
33+
end
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,17 @@
1+
@eval module $(gensym())
2+
using ITensors: ITensors
3+
using Suppressor: @suppress
14
using Test: @testset
2-
35
@testset "examples" begin
4-
include("../examples/example_readme.jl")
6+
@suppress include(
7+
joinpath(
8+
pkgdir(ITensors),
9+
"src",
10+
"lib",
11+
"ITensorsNamedDimsArraysExt",
12+
"examples",
13+
"example_readme.jl",
14+
),
15+
)
16+
end
517
end

test/Project.toml

+1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
[deps]
2+
BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e"
23
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
34
ChainRulesTestUtils = "cdddcdb0-9152-4a09-a978-84456f9df70a"
45
Combinatorics = "861a8166-3701-5b0c-9a16-15d98fcdc6aa"
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
[deps]
2+
ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5"
3+
NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf"
4+
Suppressor = "fd094767-a336-5f1f-9728-57cf17d0bbfb"
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
@eval module $(gensym())
2+
using ITensors: ITensors
3+
include(
4+
joinpath(
5+
pkgdir(ITensors), "src", "lib", "ITensorsNamedDimsArraysExt", "test", "runtests.jl"
6+
),
7+
)
8+
end

test/runtests.jl

+1
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ ITensors.disable_threaded_blocksparse()
1919
"base",
2020
"threading",
2121
"lib/ContractionSequenceOptimization",
22+
"lib/ITensorsNamedDimsArraysExt",
2223
"ext/ITensorsChainRulesCoreExt",
2324
"ext/ITensorsVectorInterfaceExt",
2425
"ext/NDTensorsMappedArraysExt",

0 commit comments

Comments
 (0)