Skip to content
This repository was archived by the owner on Apr 14, 2025. It is now read-only.

Commit a8c095e

Browse files
authored
Adapt to TensorProducts.jl (#24)
1 parent c88635b commit a8c095e

File tree

9 files changed

+96
-150
lines changed

9 files changed

+96
-150
lines changed

Project.toml

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,15 @@
11
name = "GradedUnitRanges"
22
uuid = "e2de450a-8a67-46c7-b59c-01d5a3d041c5"
33
authors = ["ITensor developers <support@itensor.org> and contributors"]
4-
version = "0.1.7"
4+
version = "0.2.0"
55

66
[deps]
77
BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e"
88
Compat = "34da2185-b29b-5c13-b0c7-acf172513d20"
99
FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b"
1010
LabelledNumbers = "f856a3a6-4152-4ec4-b2a7-02c1a55d7993"
1111
SplitApplyCombine = "03a91e81-4c3e-53e1-a0a4-9c0c8f19dd66"
12+
TensorProducts = "decf83d6-1968-43f4-96dc-fdb3fe15fc6d"
1213

1314
[weakdeps]
1415
SymmetrySectors = "f8a8ad64-adbc-4fce-92f7-ffe2bb36a86e"
@@ -23,4 +24,5 @@ FillArrays = "1.13.0"
2324
LabelledNumbers = "0.1.0"
2425
SplitApplyCombine = "1.2.3"
2526
SymmetrySectors = "0.1.4"
27+
TensorProducts = "0.1.2"
2628
julia = "1.10"

docs/Project.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,6 @@ Literate = "98b081ad-f1c9-55d3-8b20-4c87d4299306"
66

77
[compat]
88
Documenter = "1.8.1"
9-
GradedUnitRanges = "0.1.6"
9+
GradedUnitRanges = "0.2"
1010
LabelledNumbers = "0.1.0"
1111
Literate = "2.20.1"

src/GradedUnitRanges.jl

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@ include("gradedunitrange.jl")
77
include("dual.jl")
88
include("labelledunitrangedual.jl")
99
include("gradedunitrangedual.jl")
10-
include("onetoone.jl")
1110
include("fusion.jl")
1211

1312
end

src/fusion.jl

Lines changed: 15 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -1,58 +1,30 @@
11
using BlockArrays: AbstractBlockedUnitRange, blocklengths
22
using LabelledNumbers: LabelledInteger, label, labelled
33
using SplitApplyCombine: groupcount
4-
5-
# https://github.com/ITensor/ITensors.jl/blob/v0.3.57/NDTensors/src/lib/GradedAxes/src/tensor_product.jl
6-
# https://en.wikipedia.org/wiki/Tensor_product
7-
# https://github.com/KeitaNakamura/Tensorial.jl
8-
function tensor_product(
9-
a1::AbstractUnitRange,
10-
a2::AbstractUnitRange,
11-
a3::AbstractUnitRange,
12-
a_rest::Vararg{AbstractUnitRange},
13-
)
14-
return foldl(tensor_product, (a1, a2, a3, a_rest...))
15-
end
4+
using TensorProducts: TensorProducts, OneToOne, tensor_product
165

176
flip_dual(r::AbstractUnitRange) = r
187
flip_dual(r::GradedUnitRangeDual) = flip(r)
19-
function tensor_product(a1::AbstractUnitRange, a2::AbstractUnitRange)
20-
return tensor_product(flip_dual(a1), flip_dual(a2))
21-
end
22-
23-
function tensor_product(a1::Base.OneTo, a2::Base.OneTo)
24-
return Base.OneTo(length(a1) * length(a2))
25-
end
26-
27-
function tensor_product(::OneToOne, a2::AbstractUnitRange)
28-
return a2
29-
end
30-
31-
function tensor_product(a1::AbstractUnitRange, ::OneToOne)
32-
return a1
33-
end
34-
35-
function tensor_product(::OneToOne, ::OneToOne)
36-
return OneToOne()
37-
end
388

399
function fuse_labels(x, y)
4010
return error(
4111
"`fuse_labels` not implemented for object of type `$(typeof(x))` and `$(typeof(y))`."
4212
)
4313
end
4414

45-
function fuse_blocklengths(x::Integer, y::Integer)
46-
# return blocked unit range to keep non-abelian interface
47-
return blockedrange([x * y])
48-
end
49-
5015
function fuse_blocklengths(x::LabelledInteger, y::LabelledInteger)
5116
# return blocked unit range to keep non-abelian interface
5217
return blockedrange([labelled(x * y, fuse_labels(label(x), label(y)))])
5318
end
5419

55-
function tensor_product(a1::AbstractBlockedUnitRange, a2::AbstractBlockedUnitRange)
20+
unmerged_tensor_product() = OneToOne()
21+
unmerged_tensor_product(a) = a
22+
unmerged_tensor_product(a1, a2) = tensor_product(a1, a2)
23+
function unmerged_tensor_product(a1, a2, as...)
24+
return unmerged_tensor_product(unmerged_tensor_product(a1, a2), as...)
25+
end
26+
27+
function unmerged_tensor_product(a1::AbstractGradedUnitRange, a2::AbstractGradedUnitRange)
5628
nested = map(Iterators.flatten((Iterators.product(blocks(a1), blocks(a2)),))) do it
5729
return mapreduce(length, fuse_blocklengths, it)
5830
end
@@ -96,15 +68,11 @@ end
9668
blockmergesort(g::GradedUnitRangeDual) = flip(blockmergesort(flip(g)))
9769
blockmergesort(g::AbstractUnitRange) = g
9870

99-
# fusion_product produces a sorted, non-dual GradedUnitRange
100-
function fusion_product(g1, g2)
101-
return blockmergesort(tensor_product(g1, g2))
102-
end
71+
# tensor_product produces a sorted, non-dual GradedUnitRange
72+
TensorProducts.tensor_product(g::AbstractGradedUnitRange) = blockmergesort(flip_dual(g))
10373

104-
fusion_product(g::AbstractUnitRange) = blockmergesort(g)
105-
fusion_product(g::GradedUnitRangeDual) = fusion_product(flip(g))
106-
107-
# recursive fusion_product. Simpler than reduce + fix type stability issues with reduce
108-
function fusion_product(g1, g2, g3...)
109-
return fusion_product(fusion_product(g1, g2), g3...)
74+
function TensorProducts.tensor_product(
75+
g1::AbstractGradedUnitRange, g2::AbstractGradedUnitRange
76+
)
77+
return blockmergesort(unmerged_tensor_product(g1, g2))
11078
end

src/onetoone.jl

Lines changed: 0 additions & 8 deletions
This file was deleted.

test/Project.toml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,19 @@
11
[deps]
22
Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595"
33
BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e"
4-
BlockSparseArrays = "2c9a651f-6452-4ace-a6ac-809f4280fbb4"
54
GradedUnitRanges = "e2de450a-8a67-46c7-b59c-01d5a3d041c5"
65
LabelledNumbers = "f856a3a6-4152-4ec4-b2a7-02c1a55d7993"
76
SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f"
87
Suppressor = "fd094767-a336-5f1f-9728-57cf17d0bbfb"
8+
TensorProducts = "decf83d6-1968-43f4-96dc-fdb3fe15fc6d"
99
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
1010

1111
[compat]
1212
Aqua = "0.8.9"
1313
BlockArrays = "1.4.0"
14-
BlockSparseArrays = "0.2.28, 0.3"
15-
GradedUnitRanges = "0.1.6"
14+
GradedUnitRanges = "0.2"
1615
LabelledNumbers = "0.1.0"
1716
SafeTestsets = "0.1"
1817
Suppressor = "0.2"
18+
TensorProducts = "0.1.0"
1919
Test = "1.10"

test/test_basics.jl

Lines changed: 2 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
@eval module $(gensym())
21
using BlockArrays:
32
Block,
43
BlockRange,
@@ -13,34 +12,13 @@ using BlockArrays:
1312
combine_blockaxes,
1413
mortar
1514
using GradedUnitRanges:
16-
GradedOneTo,
17-
GradedUnitRange,
18-
OneToOne,
19-
blocklabels,
20-
gradedrange,
21-
sector_type,
22-
space_isequal
15+
GradedOneTo, GradedUnitRange, blocklabels, gradedrange, sector_type, space_isequal
2316
using LabelledNumbers:
2417
LabelledUnitRange, islabelled, label, labelled, labelled_isequal, unlabel
2518
using Test: @test, @test_broken, @testset
2619

27-
@testset "OneToOne" begin
28-
a0 = OneToOne()
29-
@test a0 isa OneToOne{Bool}
30-
@test eltype(a0) == Bool
31-
@test length(a0) == 1
32-
@test labelled_isequal(a0, a0)
33-
@test a0[1] == true
34-
@test a0[[1]] == [true]
35-
36-
@test labelled_isequal(a0, 1:1)
37-
@test labelled_isequal(1:1, a0)
38-
@test !labelled_isequal(a0, 1:2)
39-
@test !labelled_isequal(1:2, a0)
40-
end
41-
4220
@testset "GradedUnitRanges basics" begin
43-
a0 = OneToOne()
21+
a0 = Base.OneTo(1)
4422
for a in (
4523
blockedrange([labelled(2, "x"), labelled(3, "y")]),
4624
gradedrange([labelled(2, "x"), labelled(3, "y")]),
@@ -260,4 +238,3 @@ end
260238
@test length(a) == 1
261239
@test label(first(a)) == "x"
262240
end
263-
end

test/test_dual.jl

Lines changed: 2 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
@eval module $(gensym())
21
using BlockArrays:
32
Block,
43
BlockedOneTo,
@@ -13,13 +12,11 @@ using BlockArrays:
1312
findblock,
1413
mortar,
1514
combine_blockaxes
16-
using BlockSparseArrays: BlockSparseArray
1715
using GradedUnitRanges:
1816
AbstractGradedUnitRange,
1917
GradedUnitRanges,
2018
GradedUnitRangeDual,
2119
LabelledUnitRangeDual,
22-
OneToOne,
2320
blocklabels,
2421
blockmergesortperm,
2522
blocksortperm,
@@ -36,6 +33,8 @@ using GradedUnitRanges:
3633
using LabelledNumbers:
3734
LabelledInteger, LabelledUnitRange, label, label_type, labelled, labelled_isequal, unlabel
3835
using Test: @test, @test_broken, @testset
36+
using TensorProducts: OneToOne, tensor_product
37+
3938
struct U1
4039
n::Int
4140
end
@@ -306,16 +305,3 @@ end
306305
@test !isdual(dual(flip(a)))
307306
end
308307
end
309-
310-
@testset "dag" begin
311-
elt = ComplexF64
312-
r = gradedrange([U1(0) => 2, U1(1) => 3])
313-
a = BlockSparseArray{elt}(undef, r, dual(r))
314-
a[Block(1, 1)] = randn(elt, 2, 2)
315-
a[Block(2, 2)] = randn(elt, 3, 3)
316-
@test isdual.(axes(a)) == (false, true)
317-
ad = dag(a)
318-
@test Array(ad) == conj(Array(a))
319-
@test isdual.(axes(ad)) == (true, false)
320-
end
321-
end

0 commit comments

Comments
 (0)