Skip to content

Commit 4996dca

Browse files
authored
[SparseArrayInterface] [BlockSparseArrays] Sparse and block sparse dot products (ITensor#1577)
* [SparseArrayInterface] [BlockSparseArrays] Sparse and block sparse dot products * [NDTensors] Bump to v0.3.62
1 parent a92459e commit 4996dca

File tree

12 files changed

+175
-22
lines changed

12 files changed

+175
-22
lines changed

NDTensors/Project.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
name = "NDTensors"
22
uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf"
33
authors = ["Matthew Fishman <[email protected]>"]
4-
version = "0.3.61"
4+
version = "0.3.62"
55

66
[deps]
77
Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697"

NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/arraylayouts.jl

+15-2
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,27 @@
1-
using ArrayLayouts: ArrayLayouts, MemoryLayout, MulAdd
1+
using ArrayLayouts: ArrayLayouts, DualLayout, MemoryLayout, MulAdd
22
using BlockArrays: BlockLayout
33
using ..SparseArrayInterface: SparseLayout
4-
using ..TypeParameterAccessors: similartype
4+
using ..TypeParameterAccessors: parenttype, similartype
55

66
function ArrayLayouts.MemoryLayout(arraytype::Type{<:BlockSparseArrayLike})
77
outer_layout = typeof(MemoryLayout(blockstype(arraytype)))
88
inner_layout = typeof(MemoryLayout(blocktype(arraytype)))
99
return BlockLayout{outer_layout,inner_layout}()
1010
end
1111

12+
# TODO: Generalize to `BlockSparseVectorLike`/`AnyBlockSparseVector`.
13+
function ArrayLayouts.MemoryLayout(
14+
arraytype::Type{<:Adjoint{<:Any,<:AbstractBlockSparseVector}}
15+
)
16+
return DualLayout{typeof(MemoryLayout(parenttype(arraytype)))}()
17+
end
18+
# TODO: Generalize to `BlockSparseVectorLike`/`AnyBlockSparseVector`.
19+
function ArrayLayouts.MemoryLayout(
20+
arraytype::Type{<:Transpose{<:Any,<:AbstractBlockSparseVector}}
21+
)
22+
return DualLayout{typeof(MemoryLayout(parenttype(arraytype)))}()
23+
end
24+
1225
function Base.similar(
1326
mul::MulAdd{<:BlockLayout{<:SparseLayout},<:BlockLayout{<:SparseLayout},<:Any,<:Any,A,B},
1427
elt::Type,

NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl

+8-1
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,13 @@ function Base.similar(
158158
return similar(arraytype, eltype(arraytype), axes)
159159
end
160160

161+
# Fixes ambiguity error.
162+
function Base.similar(
163+
arraytype::Type{<:BlockSparseArrayLike}, axes::Tuple{Base.OneTo,Vararg{Base.OneTo}}
164+
)
165+
return similar(arraytype, eltype(arraytype), axes)
166+
end
167+
161168
# Needed by `BlockArrays` matrix multiplication interface
162169
# TODO: This fixes an ambiguity error with `OffsetArrays.jl`, but
163170
# is only appears to be needed in older versions of Julia like v1.6.
@@ -221,7 +228,7 @@ function Base.similar(
221228
end
222229

223230
# Fixes ambiguity error.
224-
function Base.similar(a::BlockSparseArrayLike{<:Any,0}, elt::Type, axes::Tuple{})
231+
function Base.similar(a::BlockSparseArrayLike, elt::Type, axes::Tuple{})
225232
return blocksparse_similar(a, elt, axes)
226233
end
227234

Original file line numberDiff line numberDiff line change
@@ -1,23 +1,48 @@
1-
using ArrayLayouts: ArrayLayouts, MatMulMatAdd
1+
using ArrayLayouts: ArrayLayouts, Dot, MatMulMatAdd, MatMulVecAdd, MulAdd
22
using BlockArrays: BlockLayout
33
using ..SparseArrayInterface: SparseLayout
4-
using LinearAlgebra: mul!
4+
using LinearAlgebra: dot, mul!
55

66
function blocksparse_muladd!(
7-
α::Number, a1::AbstractMatrix, a2::AbstractMatrix, β::Number, a_dest::AbstractMatrix
7+
α::Number, a1::AbstractArray, a2::AbstractArray, β::Number, a_dest::AbstractArray
88
)
99
mul!(blocks(a_dest), blocks(a1), blocks(a2), α, β)
1010
return a_dest
1111
end
1212

13+
function blocksparse_matmul!(m::MulAdd)
14+
α, a1, a2, β, a_dest = m.α, m.A, m.B, m.β, m.C
15+
blocksparse_muladd!(α, a1, a2, β, a_dest)
16+
return a_dest
17+
end
18+
1319
function ArrayLayouts.materialize!(
1420
m::MatMulMatAdd{
1521
<:BlockLayout{<:SparseLayout},
1622
<:BlockLayout{<:SparseLayout},
1723
<:BlockLayout{<:SparseLayout},
1824
},
1925
)
20-
α, a1, a2, β, a_dest = m.α, m.A, m.B, m.β, m.C
21-
blocksparse_muladd!(α, a1, a2, β, a_dest)
22-
return a_dest
26+
blocksparse_matmul!(m)
27+
return m.C
28+
end
29+
function ArrayLayouts.materialize!(
30+
m::MatMulVecAdd{
31+
<:BlockLayout{<:SparseLayout},
32+
<:BlockLayout{<:SparseLayout},
33+
<:BlockLayout{<:SparseLayout},
34+
},
35+
)
36+
blocksparse_matmul!(m)
37+
return m.C
38+
end
39+
40+
function blocksparse_dot(a1::AbstractArray, a2::AbstractArray)
41+
# TODO: Add a check that the blocking of `a1` and `a2` are
42+
# the same, or the same up to a reshape.
43+
return dot(blocks(a1), blocks(a2))
44+
end
45+
46+
function Base.copy(d::Dot{<:BlockLayout{<:SparseLayout},<:BlockLayout{<:SparseLayout}})
47+
return blocksparse_dot(d.A, d.B)
2348
end

NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl

+2-2
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,7 @@ reverse_index(index::CartesianIndex) = CartesianIndex(reverse(Tuple(index)))
184184

185185
# Represents the array of arrays of a `Transpose`
186186
# wrapping a block spare array, i.e. `blocks(array)` where `a` is a `Transpose`.
187-
struct SparseTransposeBlocks{T,BlockType<:AbstractMatrix{T},Array<:Transpose{T}} <:
187+
struct SparseTransposeBlocks{T,BlockType<:AbstractArray{T},Array<:Transpose{T}} <:
188188
AbstractSparseMatrix{BlockType}
189189
array::Array
190190
end
@@ -219,7 +219,7 @@ end
219219

220220
# Represents the array of arrays of a `Adjoint`
221221
# wrapping a block spare array, i.e. `blocks(array)` where `a` is a `Adjoint`.
222-
struct SparseAdjointBlocks{T,BlockType<:AbstractMatrix{T},Array<:Adjoint{T}} <:
222+
struct SparseAdjointBlocks{T,BlockType<:AbstractArray{T},Array<:Adjoint{T}} <:
223223
AbstractSparseMatrix{BlockType}
224224
array::Array
225225
end

NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl

+15-1
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ using BlockArrays:
1616
mortar
1717
using Compat: @compat
1818
using GPUArraysCore: @allowscalar
19-
using LinearAlgebra: Adjoint, mul!, norm
19+
using LinearAlgebra: Adjoint, dot, mul!, norm
2020
using NDTensors.BlockSparseArrays:
2121
@view!,
2222
BlockSparseArray,
@@ -575,7 +575,11 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype
575575
a[b] = randn(elt, size(a[b]))
576576
end
577577
@test isassigned(a, 1, 1)
578+
@test isassigned(a, 1, 1, 1)
579+
@test !isassigned(a, 1, 1, 2)
578580
@test isassigned(a, 5, 7)
581+
@test isassigned(a, 5, 7, 1)
582+
@test !isassigned(a, 5, 7, 2)
579583
@test !isassigned(a, 0, 1)
580584
@test !isassigned(a, 5, 8)
581585
@test isassigned(a, Block(1), Block(1))
@@ -852,6 +856,16 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype
852856
@allowscalar @test Array(a_dest) Array(a1′) * Array(a2′)
853857
end
854858
end
859+
@testset "Dot product" begin
860+
a1 = dev(BlockSparseArray{elt}([2, 3, 4]))
861+
a1[Block(1)] = dev(randn(elt, size(@view(a1[Block(1)]))))
862+
a1[Block(3)] = dev(randn(elt, size(@view(a1[Block(3)]))))
863+
a2 = dev(BlockSparseArray{elt}([2, 3, 4]))
864+
a2[Block(2)] = dev(randn(elt, size(@view(a1[Block(2)]))))
865+
a2[Block(3)] = dev(randn(elt, size(@view(a1[Block(3)]))))
866+
@test a1' * a2 Array(a1)' * Array(a2)
867+
@test dot(a1, a2) a1' * a2
868+
end
855869
@testset "TensorAlgebra" begin
856870
a1 = dev(BlockSparseArray{elt}([2, 3], [2, 3]))
857871
a1[Block(1, 1)] = dev(randn(elt, size(@view(a1[Block(1, 1)]))))
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,41 @@
1-
using ArrayLayouts: ArrayLayouts, MatMulMatAdd
1+
using ArrayLayouts: ArrayLayouts, Dot, DualLayout, MatMulMatAdd, MatMulVecAdd, MulAdd
2+
using LinearAlgebra: Adjoint, Transpose
3+
using ..TypeParameterAccessors: parenttype
24

35
function ArrayLayouts.MemoryLayout(arraytype::Type{<:SparseArrayLike})
46
return SparseLayout()
57
end
68

7-
function ArrayLayouts.materialize!(
8-
m::MatMulMatAdd{<:AbstractSparseLayout,<:AbstractSparseLayout,<:AbstractSparseLayout}
9+
# TODO: Generalize to `SparseVectorLike`/`AnySparseVector`.
10+
function ArrayLayouts.MemoryLayout(arraytype::Type{<:Adjoint{<:Any,<:AbstractSparseVector}})
11+
return DualLayout{typeof(MemoryLayout(parenttype(arraytype)))}()
12+
end
13+
# TODO: Generalize to `SparseVectorLike`/`AnySparseVector`.
14+
function ArrayLayouts.MemoryLayout(
15+
arraytype::Type{<:Transpose{<:Any,<:AbstractSparseVector}}
916
)
17+
return DualLayout{typeof(MemoryLayout(parenttype(arraytype)))}()
18+
end
19+
20+
function sparse_matmul!(m::MulAdd)
1021
α, a1, a2, β, a_dest = m.α, m.A, m.B, m.β, m.C
1122
sparse_mul!(a_dest, a1, a2, α, β)
1223
return a_dest
1324
end
25+
26+
function ArrayLayouts.materialize!(
27+
m::MatMulMatAdd{<:AbstractSparseLayout,<:AbstractSparseLayout,<:AbstractSparseLayout}
28+
)
29+
sparse_matmul!(m)
30+
return m.C
31+
end
32+
function ArrayLayouts.materialize!(
33+
m::MatMulVecAdd{<:AbstractSparseLayout,<:AbstractSparseLayout,<:AbstractSparseLayout}
34+
)
35+
sparse_matmul!(m)
36+
return m.C
37+
end
38+
39+
function Base.copy(d::Dot{<:SparseLayout,<:SparseLayout})
40+
return sparse_dot(d.A, d.B)
41+
end

NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/SparseArrayInterfaceLinearAlgebraExt.jl

+35-4
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
using LinearAlgebra: mul!, norm
1+
using LinearAlgebra: dot, mul!, norm
22

33
sparse_norm(a::AbstractArray, p::Real=2) = norm(sparse_storage(a))
44

@@ -9,6 +9,14 @@ function mul_indices(I1::CartesianIndex{2}, I2::CartesianIndex{2})
99
return CartesianIndex(I1[1], I2[2])
1010
end
1111

12+
# TODO: Is this needed? Maybe when multiplying vectors?
13+
function mul_indices(I1::CartesianIndex{1}, I2::CartesianIndex{1})
14+
if I1 I2
15+
return nothing
16+
end
17+
return CartesianIndex(I1)
18+
end
19+
1220
function default_mul!!(
1321
a_dest::AbstractMatrix,
1422
a1::AbstractMatrix,
@@ -28,9 +36,9 @@ end
2836

2937
# a1 * a2 * α + a_dest * β
3038
function sparse_mul!(
31-
a_dest::AbstractMatrix,
32-
a1::AbstractMatrix,
33-
a2::AbstractMatrix,
39+
a_dest::AbstractArray,
40+
a1::AbstractArray,
41+
a2::AbstractArray,
3442
α::Number=true,
3543
β::Number=false;
3644
(mul!!)=(default_mul!!),
@@ -45,3 +53,26 @@ function sparse_mul!(
4553
end
4654
return a_dest
4755
end
56+
57+
function sparse_dot(a1::AbstractArray, a2::AbstractArray)
58+
# This requires that `a1` and `a2` have the same shape.
59+
# TODO: Generalize (Base supports dot products of
60+
# arrays with the same length but different sizes).
61+
size(a1) == size(a2) ||
62+
throw(DimensionMismatch("Sizes $(size(a1)) and $(size(a2)) don't match."))
63+
dot_dest = zero(Base.promote_op(dot, eltype(a1), eltype(a2)))
64+
# TODO: First check if the number of stored elements (`nstored`, to be renamed
65+
# `stored_length`) is smaller in `a1` or `a2` and use whicheven one is smallar
66+
# as the outer loop.
67+
for I1 in stored_indices(a1)
68+
# TODO: Overload and use `Base.isstored(a, I) = I in stored_indices(a)` instead.
69+
# TODO: This assumes fast lookup of indices, which may not always be the case.
70+
# It could be better to loop over `stored_indices(a2)` and check that
71+
# `I1 == I2` instead (say using `mul_indices(I1, I2)`. We could have a trait
72+
# `HasFastIsStored(a::AbstractArray)` to choose between the two.
73+
if I1 in stored_indices(a2)
74+
dot_dest += dot(a1[I1], a2[I1])
75+
end
76+
end
77+
return dot_dest
78+
end

NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/indexing.jl

+4-1
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,10 @@ end
166166
function sparse_isassigned(a::AbstractArray{<:Any,N}, I::CartesianIndex{N}) where {N}
167167
return sparse_isassigned(a, Tuple(I)...)
168168
end
169-
function sparse_isassigned(a::AbstractArray{<:Any,N}, I::Vararg{Integer,N}) where {N}
169+
function sparse_isassigned(a::AbstractArray, I::Integer...)
170+
# Check trailing dimensions are one. This is needed in generic
171+
# AbstractArray show when `a isa AbstractVector`.
172+
all(d -> isone(I[d]), (ndims(a) + 1):length(I)) || return false
170173
return all(dim -> I[dim] axes(a, dim), 1:ndims(a))
171174
end
172175

NDTensors/src/lib/SparseArrayInterface/test/SparseArrayInterfaceTestUtils/SparseArrays.jl

+4
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,10 @@ function LinearAlgebra.mul!(
3232
return a_dest
3333
end
3434

35+
function LinearAlgebra.dot(a1::SparseArray, a2::SparseArray)
36+
return SparseArrayInterface.sparse_dot(a1, a2)
37+
end
38+
3539
# AbstractArray interface
3640
Base.size(a::SparseArray) = a.dims
3741
function Base.similar(a::SparseArray, elt::Type, dims::Tuple{Vararg{Int}})

NDTensors/src/lib/SparseArrayInterface/test/test_abstractsparsearray.jl

+13-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
@eval module $(gensym())
2-
using LinearAlgebra: mul!, norm
2+
using LinearAlgebra: dot, mul!, norm
33
using NDTensors.SparseArrayInterface: SparseArrayInterface
44
include("SparseArrayInterfaceTestUtils/SparseArrayInterfaceTestUtils.jl")
55
using .SparseArrayInterfaceTestUtils.AbstractSparseArrays: AbstractSparseArrays
@@ -299,6 +299,18 @@ using Test: @test, @testset
299299
@test a_dest isa SparseArray{elt}
300300
@test SparseArrayInterface.nstored(a_dest) == 2
301301

302+
# Dot product
303+
a1 = SparseArray{elt}(4)
304+
a1[1] = randn()
305+
a1[3] = randn()
306+
a2 = SparseArray{elt}(4)
307+
a2[2] = randn()
308+
a2[3] = randn()
309+
a_dest = a1' * a2
310+
@test a_dest isa elt
311+
@test a_dest Array(a1)' * Array(a2)
312+
@test a_dest dot(a1, a2)
313+
302314
# In-place matrix multiplication
303315
a1 = SparseArray{elt}(2, 3)
304316
a1[1, 2] = 12

NDTensors/src/lib/TensorAlgebra/src/LinearAlgebraExtensions/qr.jl

+16
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
using ArrayLayouts: LayoutMatrix
12
using LinearAlgebra: LinearAlgebra, qr
23
using ..TensorAlgebra:
34
TensorAlgebra,
@@ -8,6 +9,8 @@ using ..TensorAlgebra:
89
fusedims,
910
splitdims
1011

12+
# TODO: Define as `tensor_qr`.
13+
# TODO: This look generic but doesn't work for `BlockSparseArrays`.
1114
function _qr(a::AbstractArray, biperm::BlockedPermutation{2})
1215
a_matricized = fusedims(a, biperm)
1316

@@ -38,6 +41,12 @@ function LinearAlgebra.qr(a::AbstractMatrix, biperm::BlockedPermutation{2})
3841
return _qr(a, biperm)
3942
end
4043

44+
# Fix ambiguity error with `ArrayLayouts`.
45+
function LinearAlgebra.qr(a::LayoutMatrix, biperm::BlockedPermutation{2})
46+
return _qr(a, biperm)
47+
end
48+
49+
# TODO: Define in terms of an inner function `_qr` or `tensor_qr`.
4150
function LinearAlgebra.qr(
4251
a::AbstractArray, labels_a::Tuple, labels_q::Tuple, labels_r::Tuple
4352
)
@@ -50,3 +59,10 @@ function LinearAlgebra.qr(
5059
)
5160
return qr(a, blockedperm_indexin(labels_a, labels_q, labels_r))
5261
end
62+
63+
# Fix ambiguity error with `ArrayLayouts`.
64+
function LinearAlgebra.qr(
65+
a::LayoutMatrix, labels_a::Tuple, labels_q::Tuple, labels_r::Tuple
66+
)
67+
return qr(a, blockedperm_indexin(labels_a, labels_q, labels_r))
68+
end

0 commit comments

Comments
 (0)