From c3eb3e7f67b37f83d5f0c62e771e8a905a179ab2 Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Mon, 3 Jun 2024 15:30:42 -0400 Subject: [PATCH 1/8] [BlockSparseArrays] Improve the design of block views (#1481) * [NDTensors] Bump to v0.3.19 --- NDTensors/Project.toml | 2 +- .../test/runtests.jl | 4 +- .../BlockArraysExtensions.jl | 39 ++++++++++++++ .../abstractblocksparsearray/arraylayouts.jl | 11 ++++ .../src/abstractblocksparsearray/map.jl | 10 +++- .../src/abstractblocksparsearray/view.jl | 4 -- .../wrappedabstractblocksparsearray.jl | 21 +++++++- .../blocksparsearrayinterface.jl | 38 +++++++++++-- .../lib/BlockSparseArrays/test/test_basics.jl | 54 +++++++++++++------ .../src/lib/GradedAxes/src/gradedunitrange.jl | 5 ++ .../src/lib/GradedAxes/src/unitrangedual.jl | 11 ++++ .../LabelledNumbers/src/labelledinteger.jl | 29 ++++++++++ .../src/lib/LabelledNumbers/test/runtests.jl | 24 +++++++++ 13 files changed, 222 insertions(+), 30 deletions(-) diff --git a/NDTensors/Project.toml b/NDTensors/Project.toml index 57392bc89e..fc6b36d9f1 100644 --- a/NDTensors/Project.toml +++ b/NDTensors/Project.toml @@ -1,7 +1,7 @@ name = "NDTensors" uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" authors = ["Matthew Fishman "] -version = "0.3.18" +version = "0.3.19" [deps] Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" diff --git a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl b/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl index e66c45aaef..d496ee57c6 100644 --- a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl +++ b/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl @@ -80,8 +80,8 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) @testset "dual axes" begin r = gradedrange([U1(0) => 2, U1(1) => 2]) a = BlockSparseArray{elt}(dual(r), r) - a[Block(1, 1)] = randn(size(a[Block(1, 1)])) - a[Block(2, 2)] = randn(size(a[Block(2, 2)])) + a[Block(1, 1)] = randn(elt, size(a[Block(1, 1)])) + a[Block(2, 2)] = randn(elt, size(a[Block(2, 2)])) a_dense = Array(a) @test eachindex(a) == CartesianIndices(size(a)) for I in eachindex(a) diff --git a/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl b/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl index 04f37f0f18..d0710d6209 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl @@ -6,6 +6,7 @@ using BlockArrays: BlockRange, BlockedUnitRange, BlockVector, + BlockSlice, block, blockaxes, blockedrange, @@ -29,6 +30,36 @@ function sub_axis(a::AbstractUnitRange, indices::AbstractUnitRange) return only(axes(blockedunitrange_getindices(a, indices))) end +# TODO: Use `GradedAxes.blockedunitrange_getindices`. +# Outputs a `BlockUnitRange`. +function sub_axis(a::AbstractUnitRange, indices::BlockSlice{<:BlockRange{1}}) + return sub_axis(a, indices.block) +end + +# TODO: Use `GradedAxes.blockedunitrange_getindices`. +# Outputs a `BlockUnitRange`. +function sub_axis(a::AbstractUnitRange, indices::BlockSlice{<:Block{1}}) + return sub_axis(a, Block(indices)) +end + +# TODO: Use `GradedAxes.blockedunitrange_getindices`. +# Outputs a `BlockUnitRange`. +function sub_axis(a::AbstractUnitRange, indices::BlockSlice{<:BlockIndexRange{1}}) + return sub_axis(a, indices.block) +end + +# TODO: Use `GradedAxes.blockedunitrange_getindices`. +# Outputs a `BlockUnitRange`. +function sub_axis(a::AbstractUnitRange, indices::Block) + return only(axes(blockedunitrange_getindices(a, indices))) +end + +# TODO: Use `GradedAxes.blockedunitrange_getindices`. +# Outputs a `BlockUnitRange`. +function sub_axis(a::AbstractUnitRange, indices::BlockIndexRange) + return only(axes(blockedunitrange_getindices(a, indices))) +end + # TODO: Use `GradedAxes.blockedunitrange_getindices`. # Outputs a `BlockUnitRange`. function sub_axis(a::AbstractUnitRange, indices::AbstractVector{<:Block}) @@ -131,6 +162,14 @@ function blockrange(axis::AbstractUnitRange, r::BlockSlice) return blockrange(axis, r.block) end +function blockrange(axis::AbstractUnitRange, r::Block{1}) + return r:r +end + +function blockrange(axis::AbstractUnitRange, r::BlockIndexRange) + return Block(r):Block(r) +end + function blockrange(axis::AbstractUnitRange, r) return error("Slicing not implemented for range of type `$(typeof(r))`.") end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/arraylayouts.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/arraylayouts.jl index c9cbf33cdc..dfe4052aee 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/arraylayouts.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/arraylayouts.jl @@ -16,7 +16,18 @@ end # Materialize a SubArray view. function ArrayLayouts.sub_materialize(layout::BlockLayout{<:SparseLayout}, a, axes) + # TODO: Make more generic for GPU. a_dest = BlockSparseArray{eltype(a)}(axes) a_dest .= a return a_dest end + +# Materialize a SubArray view. +function ArrayLayouts.sub_materialize( + layout::BlockLayout{<:SparseLayout}, a, axes::Tuple{Vararg{Base.OneTo}} +) + # TODO: Make more generic for GPU. + a_dest = Array{eltype(a)}(undef, length.(axes)) + a_dest .= a + return a_dest +end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/map.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/map.jl index 22f9350605..6a7cb96ac1 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/map.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/map.jl @@ -32,8 +32,14 @@ function SparseArrayInterface.sparse_map!( for I in union_stored_blocked_cartesianindices(a_dest, a_srcs...) BI_dest = blockindexrange(a_dest, I) BI_srcs = map(a_src -> blockindexrange(a_src, I), a_srcs) - block_dest = @view a_dest[_block(BI_dest)] - block_srcs = ntuple(i -> @view(a_srcs[i][_block(BI_srcs[i])]), length(a_srcs)) + # TODO: Investigate why this doesn't work: + # block_dest = @view a_dest[_block(BI_dest)] + block_dest = blocks(a_dest)[Int.(Tuple(_block(BI_dest)))...] + # TODO: Investigate why this doesn't work: + # block_srcs = ntuple(i -> @view(a_srcs[i][_block(BI_srcs[i])]), length(a_srcs)) + block_srcs = ntuple(length(a_srcs)) do i + return blocks(a_srcs[i])[Int.(Tuple(_block(BI_srcs[i])))...] + end subblock_dest = @view block_dest[BI_dest.indices...] subblock_srcs = ntuple(i -> @view(block_srcs[i][BI_srcs[i].indices...]), length(a_srcs)) # TODO: Use `map!!` to handle immutable blocks. diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/view.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/view.jl index e2e5c8acb9..bbe586d771 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/view.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/view.jl @@ -1,9 +1,5 @@ using BlockArrays: BlockIndexRange, BlockRange, BlockSlice, block -function blocksparse_view(a::AbstractArray, index::Block) - return blocks(a)[Int.(Tuple(index))...] -end - # TODO: Define `AnyBlockSparseVector`. function Base.view(a::BlockSparseArrayLike{<:Any,N}, index::Block{N}) where {N} return blocksparse_view(a, index) diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl index 286847992e..618a1794bb 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl @@ -55,7 +55,20 @@ function Base.getindex(a::BlockSparseArrayLike{<:Any,2}, I::Vararg{AbstractUnitR return ArrayLayouts.layout_getindex(a, I...) end -function Base.isassigned(a::BlockSparseArrayLike, index::Vararg{Block}) +function Base.getindex(a::BlockSparseArrayLike{<:Any,N}, block::Block{N}) where {N} + return blocksparse_getindex(a, block) +end +function Base.getindex( + a::BlockSparseArrayLike{<:Any,N}, block::Vararg{Block{1},N} +) where {N} + return blocksparse_getindex(a, block...) +end + +# TODO: Define `issasigned(a, ::Block{N})`. +function Base.isassigned( + a::BlockSparseArrayLike{<:Any,N}, index::Vararg{Block{1},N} +) where {N} + # TODO: Define `blocksparse_isassigned`. return isassigned(blocks(a), Int.(index)...) end @@ -64,6 +77,12 @@ function Base.setindex!(a::BlockSparseArrayLike{<:Any,N}, value, I::BlockIndex{N return a end +function Base.setindex!( + a::BlockSparseArrayLike{<:Any,N}, value, I::Vararg{Block{1},N} +) where {N} + a[Block(Int.(I))] = value + return a +end function Base.setindex!(a::BlockSparseArrayLike{<:Any,N}, value, I::Block{N}) where {N} blocksparse_setindex!(a, value, I) return a diff --git a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl index b93fb5b9b2..4b36a5e887 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl @@ -19,6 +19,14 @@ function blocksparse_getindex(a::AbstractArray{<:Any,N}, I::Vararg{Int,N}) where return a[findblockindex.(axes(a), I)...] end +function blocksparse_getindex(a::AbstractArray{<:Any,N}, I::Block{N}) where {N} + return blocksparse_getindex(a, Tuple(I)...) +end +function blocksparse_getindex(a::AbstractArray{<:Any,N}, I::Vararg{Block{1},N}) where {N} + # TODO: Avoid copy if the block isn't stored. + return copy(blocks(a)[Int.(I)...]) +end + # TODO: Implement as `copy(@view a[I...])`, which is then implemented # through `ArrayLayouts.sub_materialize`. using ..SparseArrayInterface: set_getindex_zero_function @@ -59,21 +67,41 @@ function blocksparse_setindex!(a::AbstractArray{<:Any,N}, value, I::Vararg{Int,N end function blocksparse_setindex!(a::AbstractArray{<:Any,N}, value, I::BlockIndex{N}) where {N} - a_b = view(a, block(I)) + i = Int.(Tuple(block(I))) + a_b = blocks(a)[i...] a_b[I.α...] = value - # Set the block, required if it is structurally zero - a[block(I)] = a_b + # Set the block, required if it is structurally zero. + blocks(a)[i...] = a_b return a end function blocksparse_setindex!(a::AbstractArray{<:Any,N}, value, I::Block{N}) where {N} - # TODO: Create a conversion function, say `CartesianIndex(Int.(Tuple(I)))`. - i = I.n + blocksparse_setindex!(a, value, Tuple(I)...) + return a +end +function blocksparse_setindex!( + a::AbstractArray{<:Any,N}, value, I::Vararg{Block{1},N} +) where {N} + i = Int.(I) @boundscheck blockcheckbounds(a, i...) + # TODO: Use `blocksizes(a)[i...]` when we upgrade to + # BlockArrays.jl v1. + if size(value) ≠ size(view(a, I...)) + return throw( + DimensionMismatch("Trying to set a block with an array of the wrong size.") + ) + end blocks(a)[i...] = value return a end +function blocksparse_view(a::AbstractArray{<:Any,N}, I::Block{N}) where {N} + return blocksparse_view(a, Tuple(I)...) +end +function blocksparse_view(a::AbstractArray{<:Any,N}, I::Vararg{Block{1},N}) where {N} + return SubArray(a, to_indices(a, I)) +end + function blocksparse_viewblock(a::AbstractArray{<:Any,N}, I::Block{N}) where {N} # TODO: Create a conversion function, say `CartesianIndex(Int.(Tuple(I)))`. i = I.n diff --git a/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl b/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl index 565d349c33..d8d7191fbd 100644 --- a/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl +++ b/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl @@ -4,7 +4,7 @@ using LinearAlgebra: mul! using NDTensors.BlockSparseArrays: BlockSparseArray, block_nstored, block_reshape using NDTensors.SparseArrayInterface: nstored using NDTensors.TensorAlgebra: contract -using Test: @test, @testset, @test_broken +using Test: @test, @test_broken, @test_throws, @testset include("TestBlockSparseArraysUtils.jl") @testset "BlockSparseArrays (eltype=$elt)" for elt in (Float32, Float64, ComplexF32, ComplexF64) @@ -20,6 +20,7 @@ include("TestBlockSparseArraysUtils.jl") @test block_nstored(a) == 0 @test iszero(a) @test all(I -> iszero(a[I]), eachindex(a)) + @test_throws DimensionMismatch a[Block(1, 1)] = randn(elt, 2, 3) a = BlockSparseArray{elt}([2, 3], [2, 3]) a[3, 3] = 33 @@ -225,36 +226,59 @@ include("TestBlockSparseArraysUtils.jl") @test block_nstored(c) == 2 @test Array(c) == 2 * transpose(Array(a)) - ## Broken, need to fix. - a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) - @test_broken a[Block(1), Block(1):Block(2)] + b = a[Block(1), Block(1):Block(2)] + @test size(b) == (2, 7) + @test blocksize(b) == (1, 2) + @test b[Block(1, 1)] == a[Block(1, 1)] + @test b[Block(1, 2)] == a[Block(1, 2)] - # This is outputting only zero blocks. a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) - b = a[Block(2):Block(2), Block(1):Block(2)] - @test_broken block_nstored(b) == 1 - @test_broken b == Array(a)[3:5, 1:end] + b = copy(a) + x = randn(elt, size(@view(a[Block(2, 2)]))) + b[Block(2), Block(2)] = x + @test b[Block(2, 2)] == x a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) b = copy(a) - x = randn(size(@view(a[Block(2, 2)]))) - b[Block(2), Block(2)] = x - @test_broken b[Block(2, 2)] == x + b[Block(1, 1)] .= 1 + # TODO: Use `blocksizes(b)[1, 1]` once we upgrade to + # BlockArrays.jl v1. + @test b[Block(1, 1)] == trues(size(@view(b[Block(1, 1)]))) - # Doesnt' set the block + a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) + x = randn(elt, 1, 2) + @view(a[Block(2, 2)])[1:1, 1:2] = x + @test @view(a[Block(2, 2)])[1:1, 1:2] == x + @test a[Block(2, 2)][1:1, 1:2] == x + + # TODO: This is broken, fix! + @test_broken a[3:3, 4:5] == x + + a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) + x = randn(elt, 1, 2) + @views a[Block(2, 2)][1:1, 1:2] = x + @test @view(a[Block(2, 2)])[1:1, 1:2] == x + @test a[Block(2, 2)][1:1, 1:2] == x + + # TODO: This is broken, fix! + @test_broken a[3:3, 4:5] == x + + ## Broken, need to fix. + + # This is outputting only zero blocks. a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) - b = copy(a) - b[Block(1, 1)] .= 1 - @test_broken b[1, 1] == trues(size(@view(b[1, 1]))) + b = a[Block(2):Block(2), Block(1):Block(2)] + @test_broken block_nstored(b) == 1 + @test_broken b == Array(a)[3:5, 1:end] end @testset "LinearAlgebra" begin a1 = BlockSparseArray{elt}([2, 3], [2, 3]) diff --git a/NDTensors/src/lib/GradedAxes/src/gradedunitrange.jl b/NDTensors/src/lib/GradedAxes/src/gradedunitrange.jl index 8a19b6e04f..6e414cbc8a 100644 --- a/NDTensors/src/lib/GradedAxes/src/gradedunitrange.jl +++ b/NDTensors/src/lib/GradedAxes/src/gradedunitrange.jl @@ -221,6 +221,11 @@ function blockedunitrange_getindices( return mortar(map(index -> a[index], indices)) end +# TODO: Move this to a `BlockArraysExtensions` library. +function blockedunitrange_getindices(a::BlockedUnitRange, indices::Block{1}) + return a[indices] +end + # TODO: Move this to a `BlockArraysExtensions` library. function blockedunitrange_getindices(a::BlockedUnitRange, indices) return error("Not implemented.") diff --git a/NDTensors/src/lib/GradedAxes/src/unitrangedual.jl b/NDTensors/src/lib/GradedAxes/src/unitrangedual.jl index 52d8042736..8f1a86f1b8 100644 --- a/NDTensors/src/lib/GradedAxes/src/unitrangedual.jl +++ b/NDTensors/src/lib/GradedAxes/src/unitrangedual.jl @@ -38,6 +38,11 @@ function unitrangedual_getindices_blocks(a, indices) return mortar([dual(b) for b in blocks(a_indices)]) end +# TODO: Move this to a `BlockArraysExtensions` library. +function blockedunitrange_getindices(a::UnitRangeDual, indices::Block{1}) + return a[indices] +end + function Base.getindex(a::UnitRangeDual, indices::Vector{<:Block{1}}) return unitrangedual_getindices_blocks(a, indices) end @@ -54,6 +59,12 @@ function BlockArrays.BlockSlice(b::Block, a::LabelledUnitRange) return BlockSlice(b, unlabel(a)) end +using BlockArrays: BlockArrays, BlockSlice +using NDTensors.GradedAxes: UnitRangeDual, dual +function BlockArrays.BlockSlice(b::Block, r::UnitRangeDual) + return BlockSlice(b, dual(r)) +end + using NDTensors.LabelledNumbers: LabelledNumbers, label LabelledNumbers.label(a::UnitRangeDual) = dual(label(nondual(a))) diff --git a/NDTensors/src/lib/LabelledNumbers/src/labelledinteger.jl b/NDTensors/src/lib/LabelledNumbers/src/labelledinteger.jl index f5e2d58f3d..323d252b0c 100644 --- a/NDTensors/src/lib/LabelledNumbers/src/labelledinteger.jl +++ b/NDTensors/src/lib/LabelledNumbers/src/labelledinteger.jl @@ -86,3 +86,32 @@ Base.:-(x::LabelledInteger) = labelled_minus(x) # TODO: This is only needed for older Julia versions, like Julia 1.6. # Delete once we drop support for older Julia versions. Base.hash(x::LabelledInteger, h::UInt64) = labelled_hash(x, h) + +using Random: AbstractRNG, default_rng +default_eltype() = Float64 +for f in [:rand, :randn] + @eval begin + function Base.$f( + rng::AbstractRNG, + elt::Type{<:Number}, + dims::Tuple{LabelledInteger,Vararg{LabelledInteger}}, + ) + return a = $f(rng, elt, unlabel.(dims)) + end + function Base.$f( + rng::AbstractRNG, + elt::Type{<:Number}, + dim1::LabelledInteger, + dims::Vararg{LabelledInteger}, + ) + return $f(rng, elt, (dim1, dims...)) + end + Base.$f(elt::Type{<:Number}, dims::Tuple{LabelledInteger,Vararg{LabelledInteger}}) = + $f(default_rng(), elt, dims) + Base.$f(elt::Type{<:Number}, dim1::LabelledInteger, dims::Vararg{LabelledInteger}) = + $f(elt, (dim1, dims...)) + Base.$f(dims::Tuple{LabelledInteger,Vararg{LabelledInteger}}) = + $f(default_eltype(), dims) + Base.$f(dim1::LabelledInteger, dims::Vararg{LabelledInteger}) = $f((dim1, dims...)) + end +end diff --git a/NDTensors/src/lib/LabelledNumbers/test/runtests.jl b/NDTensors/src/lib/LabelledNumbers/test/runtests.jl index cf3f87e86d..6fc1ac4231 100644 --- a/NDTensors/src/lib/LabelledNumbers/test/runtests.jl +++ b/NDTensors/src/lib/LabelledNumbers/test/runtests.jl @@ -1,4 +1,5 @@ @eval module $(gensym()) +using LinearAlgebra: norm using NDTensors.LabelledNumbers: LabelledInteger, islabelled, label, labelled, unlabel using Test: @test, @testset @testset "LabelledNumbers" begin @@ -48,6 +49,29 @@ using Test: @test, @testset @test one(typeof(x)) == true @test !islabelled(one(typeof(x))) end + @testset "randn" begin + d = labelled(2, "x") + + a = randn(Float32, d, d) + @test eltype(a) === Float32 + @test size(a) == (2, 2) + @test norm(a) > 0 + + a = rand(Float32, d, d) + @test eltype(a) === Float32 + @test size(a) == (2, 2) + @test norm(a) > 0 + + a = randn(d, d) + @test eltype(a) === Float64 + @test size(a) == (2, 2) + @test norm(a) > 0 + + a = rand(d, d) + @test eltype(a) === Float64 + @test size(a) == (2, 2) + @test norm(a) > 0 + end @testset "Labelled array ($a)" for a in (collect(2:5), 2:5) x = labelled(a, "x") @test eltype(x) == LabelledInteger{Int,String} From 8eed9800313f895cc8a38b37f10c94859fd27ac6 Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Thu, 6 Jun 2024 13:57:03 -0400 Subject: [PATCH 2/8] [BlockSparseArrays] Fix more slicing operations (#1486) * [BlockSparseArrays] Fix more slicing operations * [NDTensors] Bump to v0.3.20 --- NDTensors/Project.toml | 2 +- .../BlockArraysExtensions.jl | 54 ++++++++++ .../wrappedabstractblocksparsearray.jl | 20 +++- .../blocksparsearrayinterface.jl | 36 ------- .../lib/BlockSparseArrays/test/test_basics.jl | 30 +++++- .../src/lib/GradedAxes/src/GradedAxes.jl | 1 + .../lib/GradedAxes/src/blockedunitrange.jl | 98 +++++++++++++++++++ .../src/lib/GradedAxes/src/gradedunitrange.jl | 89 ----------------- .../LabelledNumbers/src/labelledinteger.jl | 5 + .../src/lib/LabelledNumbers/test/runtests.jl | 16 +++ .../src/lib/TensorAlgebra/test/test_basics.jl | 2 +- 11 files changed, 222 insertions(+), 131 deletions(-) create mode 100644 NDTensors/src/lib/GradedAxes/src/blockedunitrange.jl diff --git a/NDTensors/Project.toml b/NDTensors/Project.toml index fc6b36d9f1..1e69997836 100644 --- a/NDTensors/Project.toml +++ b/NDTensors/Project.toml @@ -1,7 +1,7 @@ name = "NDTensors" uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" authors = ["Matthew Fishman "] -version = "0.3.19" +version = "0.3.20" [deps] Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" diff --git a/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl b/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl index d0710d6209..31fb71e104 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl @@ -19,6 +19,15 @@ using Dictionaries: Dictionary, Indices using ..GradedAxes: blockedunitrange_getindices using ..SparseArrayInterface: stored_indices +struct BlockIndices{B,T<:Integer,I<:AbstractVector{T}} <: AbstractVector{T} + blocks::B + indices::I +end +for f in (:axes, :unsafe_indices, :axes1, :first, :last, :size, :length, :unsafe_length) + @eval Base.$f(S::BlockIndices) = Base.$f(S.indices) +end +Base.getindex(S::BlockIndices, i::Integer) = getindex(S.indices, i) + # Outputs a `BlockUnitRange`. function sub_axis(a::AbstractUnitRange, indices) return error("Not implemented") @@ -48,6 +57,10 @@ function sub_axis(a::AbstractUnitRange, indices::BlockSlice{<:BlockIndexRange{1} return sub_axis(a, indices.block) end +function sub_axis(a::AbstractUnitRange, indices::BlockIndices) + return sub_axis(a, indices.blocks) +end + # TODO: Use `GradedAxes.blockedunitrange_getindices`. # Outputs a `BlockUnitRange`. function sub_axis(a::AbstractUnitRange, indices::Block) @@ -162,6 +175,10 @@ function blockrange(axis::AbstractUnitRange, r::BlockSlice) return blockrange(axis, r.block) end +function blockrange(a::AbstractUnitRange, r::BlockIndices) + return blockrange(a, r.blocks) +end + function blockrange(axis::AbstractUnitRange, r::Block{1}) return r:r end @@ -174,6 +191,43 @@ function blockrange(axis::AbstractUnitRange, r) return error("Slicing not implemented for range of type `$(typeof(r))`.") end +# This takes a range of indices `indices` of array `a` +# and maps it to the range of indices within block `block`. +function blockindices(a::AbstractArray, block::Block, indices::Tuple) + return blockindices(axes(a), block, indices) +end + +function blockindices(axes::Tuple, block::Block, indices::Tuple) + return blockindices.(axes, Tuple(block), indices) +end + +function blockindices(axis::AbstractUnitRange, block::Block, indices::AbstractUnitRange) + indices_within_block = intersect(indices, axis[block]) + if iszero(length(indices_within_block)) + # Falls outside of block + return 1:0 + end + return only(blockindexrange(axis, indices_within_block).indices) +end + +# This catches the case of `Vector{<:Block{1}}`. +# `BlockRange` gets wrapped in a `BlockSlice`, which is handled properly +# by the version with `indices::AbstractUnitRange`. +# TODO: This should get fixed in a better way inside of `BlockArrays`. +function blockindices( + axis::AbstractUnitRange, block::Block, indices::AbstractVector{<:Block{1}} +) + if block ∉ indices + # Falls outside of block + return 1:0 + end + return Base.OneTo(length(axis[block])) +end + +function blockindices(a::AbstractUnitRange, b::Block, r::BlockIndices) + return blockindices(a, b, r.blocks) +end + function cartesianindices(a::AbstractArray, b::Block) return cartesianindices(axes(a), b) end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl index 618a1794bb..877f3d3d53 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl @@ -1,7 +1,6 @@ -using BlockArrays: BlockedUnitRange, blockedrange -using SplitApplyCombine: groupcount - using Adapt: Adapt, WrappedArray +using BlockArrays: BlockArrays, BlockedUnitRange, blockedrange, unblock +using SplitApplyCombine: groupcount const WrappedAbstractBlockSparseArray{T,N} = WrappedArray{ T,N,AbstractBlockSparseArray,AbstractBlockSparseArray{T,N} @@ -19,6 +18,21 @@ function Base.axes(a::SubArray{<:Any,<:Any,<:AbstractBlockSparseArray}) return ntuple(i -> sub_axis(axes(parent(a), i), a.indices[i]), ndims(a)) end +# Used when making views. +function Base.to_indices( + a::BlockSparseArrayLike, inds, I::Tuple{Vector{<:Block{1}},Vararg{Any}} +) + return (unblock(a, inds, I), to_indices(a, BlockArrays._maybetail(inds), Base.tail(I))...) +end +function Base.to_indices(a::BlockSparseArrayLike, I::Tuple{Vector{<:Block{1}},Vararg{Any}}) + return to_indices(a, axes(a), I) +end + +# Used inside `Base.to_indices` when making views. +function BlockArrays.unblock(a, inds, I::Tuple{Vector{<:Block{1}},Vararg{Any}}) + return BlockIndices(I[1], blockedunitrange_getindices(inds[1], I[1])) +end + # BlockArrays `AbstractBlockArray` interface BlockArrays.blocks(a::BlockSparseArrayLike) = blocksparse_blocks(a) diff --git a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl index 4b36a5e887..e6855d4227 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl @@ -227,42 +227,6 @@ function SparseArrayInterface.sparse_storage(a::SparseAdjointBlocks) return error("Not implemented") end -# TODO: Move to `BlockArraysExtensions`. -# This takes a range of indices `indices` of array `a` -# and maps it to the range of indices within block `block`. -function blockindices(a::AbstractArray, block::Block, indices::Tuple) - return blockindices(axes(a), block, indices) -end - -# TODO: Move to `BlockArraysExtensions`. -function blockindices(axes::Tuple, block::Block, indices::Tuple) - return blockindices.(axes, Tuple(block), indices) -end - -# TODO: Move to `BlockArraysExtensions`. -function blockindices(axis::AbstractUnitRange, block::Block, indices::AbstractUnitRange) - indices_within_block = intersect(indices, axis[block]) - if iszero(length(indices_within_block)) - # Falls outside of block - return 1:0 - end - return only(blockindexrange(axis, indices_within_block).indices) -end - -# This catches the case of `Vector{<:Block{1}}`. -# `BlockRange` gets wrapped in a `BlockSlice`, which is handled properly -# by the version with `indices::AbstractUnitRange`. -# TODO: This should get fixed in a better way inside of `BlockArrays`. -function blockindices( - axis::AbstractUnitRange, block::Block, indices::AbstractVector{<:Block{1}} -) - if block ∉ indices - # Falls outside of block - return 1:0 - end - return Base.OneTo(length(axis[block])) -end - # Represents the array of arrays of a `SubArray` # wrapping a block spare array, i.e. `blocks(array)` where `a` is a `SubArray`. struct SparseSubArrayBlocks{T,N,Array<:SubArray{T,N}} <: AbstractSparseArray{T,N} diff --git a/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl b/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl index d8d7191fbd..8970a20dda 100644 --- a/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl +++ b/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl @@ -1,5 +1,5 @@ @eval module $(gensym()) -using BlockArrays: Block, BlockedUnitRange, blockedrange, blocklength, blocksize +using BlockArrays: Block, BlockRange, BlockedUnitRange, blockedrange, blocklength, blocksize using LinearAlgebra: mul! using NDTensors.BlockSparseArrays: BlockSparseArray, block_nstored, block_reshape using NDTensors.SparseArrayInterface: nstored @@ -270,6 +270,34 @@ include("TestBlockSparseArraysUtils.jl") # TODO: This is broken, fix! @test_broken a[3:3, 4:5] == x + a = BlockSparseArray{elt}([2, 3], [2, 3]) + @views for b in [Block(1, 1), Block(2, 2)] + # TODO: Use `blocksizes(a)[Int.(Tuple(b))...]` once available. + a[b] = randn(elt, size(a[b])) + end + b = @view a[[Block(1), Block(2)], [Block(1), Block(2)]] + for I in CartesianIndices(a) + @test b[I] == a[I] + end + for block in BlockRange(a) + @test b[block] == a[block] + end + + a = BlockSparseArray{elt}([2, 3], [2, 3]) + @views for b in [Block(1, 1), Block(2, 2)] + # TODO: Use `blocksizes(a)[Int.(Tuple(b))...]` once available. + a[b] = randn(elt, size(a[b])) + end + b = @view a[[Block(2), Block(1)], [Block(2), Block(1)]] + @test b[Block(1, 1)] == a[Block(2, 2)] + @test b[Block(2, 1)] == a[Block(1, 2)] + @test b[Block(1, 2)] == a[Block(2, 1)] + @test b[Block(2, 2)] == a[Block(1, 1)] + @test b[1, 1] == a[3, 3] + @test b[4, 4] == a[1, 1] + b[4, 4] = 44 + @test b[4, 4] == 44 + ## Broken, need to fix. # This is outputting only zero blocks. diff --git a/NDTensors/src/lib/GradedAxes/src/GradedAxes.jl b/NDTensors/src/lib/GradedAxes/src/GradedAxes.jl index 3524abcaba..7756b71575 100644 --- a/NDTensors/src/lib/GradedAxes/src/GradedAxes.jl +++ b/NDTensors/src/lib/GradedAxes/src/GradedAxes.jl @@ -1,4 +1,5 @@ module GradedAxes +include("blockedunitrange.jl") include("gradedunitrange.jl") include("fusion.jl") include("dual.jl") diff --git a/NDTensors/src/lib/GradedAxes/src/blockedunitrange.jl b/NDTensors/src/lib/GradedAxes/src/blockedunitrange.jl new file mode 100644 index 0000000000..f9785da1a1 --- /dev/null +++ b/NDTensors/src/lib/GradedAxes/src/blockedunitrange.jl @@ -0,0 +1,98 @@ +using BlockArrays: + BlockArrays, + Block, + BlockIndexRange, + BlockRange, + BlockedUnitRange, + block, + blockindex, + findblock, + findblockindex + +# Custom `BlockedUnitRange` constructor that takes a unit range +# and a set of block lengths, similar to `BlockArray(::AbstractArray, blocklengths...)`. +function blockedunitrange(a::AbstractUnitRange, blocklengths) + blocklengths_shifted = copy(blocklengths) + blocklengths_shifted[1] += (first(a) - 1) + blocklasts = cumsum(blocklengths_shifted) + return BlockArrays._BlockedUnitRange(first(a), blocklasts) +end + +# TODO: Move this to a `BlockArraysExtensions` library. +# TODO: Rename this. `BlockArrays.findblock(a, k)` finds the +# block of the value `k`, while this finds the block of the index `k`. +# This could make use of the `BlockIndices` object, i.e. `block(BlockIndices(a)[index])`. +function blockedunitrange_findblock(a::BlockedUnitRange, index::Integer) + @boundscheck index in 1:length(a) || throw(BoundsError(a, index)) + return @inbounds findblock(a, index + first(a) - 1) +end + +# TODO: Move this to a `BlockArraysExtensions` library. +# TODO: Rename this. `BlockArrays.findblockindex(a, k)` finds the +# block index of the value `k`, while this finds the block index of the index `k`. +# This could make use of the `BlockIndices` object, i.e. `BlockIndices(a)[index]`. +function blockedunitrange_findblockindex(a::BlockedUnitRange, index::Integer) + @boundscheck index in 1:length(a) || throw(BoundsError()) + return @inbounds findblockindex(a, index + first(a) - 1) +end + +# TODO: Move this to a `BlockArraysExtensions` library. +# Like `a[indices]` but preserves block structure. +function blockedunitrange_getindices( + a::BlockedUnitRange, indices::AbstractUnitRange{<:Integer} +) + first_blockindex = blockedunitrange_findblockindex(a, first(indices)) + last_blockindex = blockedunitrange_findblockindex(a, last(indices)) + first_block = block(first_blockindex) + last_block = block(last_blockindex) + blocklengths = if first_block == last_block + [length(indices)] + else + map(first_block:last_block) do block + if block == first_block + return length(a[first_block]) - blockindex(first_blockindex) + 1 + end + if block == last_block + return blockindex(last_blockindex) + end + return length(a[block]) + end + end + return blockedunitrange(indices .+ (first(a) - 1), blocklengths) +end + +# TODO: Move this to a `BlockArraysExtensions` library. +function blockedunitrange_getindices(a::BlockedUnitRange, indices::BlockIndexRange) + return a[block(indices)][only(indices.indices)] +end + +# TODO: Move this to a `BlockArraysExtensions` library. +function blockedunitrange_getindices(a::BlockedUnitRange, indices::Vector{<:Integer}) + return map(index -> a[index], indices) +end + +# TODO: Move this to a `BlockArraysExtensions` library. +function blockedunitrange_getindices( + a::BlockedUnitRange, indices::Vector{<:Union{Block{1},BlockIndexRange{1}}} +) + return mortar(map(index -> a[index], indices)) +end + +# TODO: Move this to a `BlockArraysExtensions` library. +function blockedunitrange_getindices(a::BlockedUnitRange, indices::Block{1}) + return a[indices] +end + +# TODO: Move this to a `BlockArraysExtensions` library. +function blockedunitrange_getindices(a::BlockedUnitRange, indices) + return error("Not implemented.") +end + +# The blocks of the corresponding slice. +_blocks(a::AbstractUnitRange, indices) = error("Not implemented") +function _blocks(a::AbstractUnitRange, indices::AbstractUnitRange) + return findblock(a, first(indices)):findblock(a, last(indices)) +end +function _blocks(a::AbstractUnitRange, indices::BlockRange) + return indices +end diff --git a/NDTensors/src/lib/GradedAxes/src/gradedunitrange.jl b/NDTensors/src/lib/GradedAxes/src/gradedunitrange.jl index 6e414cbc8a..851ef284f9 100644 --- a/NDTensors/src/lib/GradedAxes/src/gradedunitrange.jl +++ b/NDTensors/src/lib/GradedAxes/src/gradedunitrange.jl @@ -16,33 +16,6 @@ using BlockArrays: mortar using ..LabelledNumbers: LabelledNumbers, LabelledInteger, label, labelled, unlabel -# Custom `BlockedUnitRange` constructor that takes a unit range -# and a set of block lengths, similar to `BlockArray(::AbstractArray, blocklengths...)`. -function blockedunitrange(a::AbstractUnitRange, blocklengths) - blocklengths_shifted = copy(blocklengths) - blocklengths_shifted[1] += (first(a) - 1) - blocklasts = cumsum(blocklengths_shifted) - return BlockArrays._BlockedUnitRange(first(a), blocklasts) -end - -# TODO: Move this to a `BlockArraysExtensions` library. -# TODO: Rename this. `BlockArrays.findblock(a, k)` finds the -# block of the value `k`, while this finds the block of the index `k`. -# This could make use of the `BlockIndices` object, i.e. `block(BlockIndices(a)[index])`. -function blockedunitrange_findblock(a::BlockedUnitRange, index::Integer) - @boundscheck index in 1:length(a) || throw(BoundsError(a, index)) - return @inbounds findblock(a, index + first(a) - 1) -end - -# TODO: Move this to a `BlockArraysExtensions` library. -# TODO: Rename this. `BlockArrays.findblockindex(a, k)` finds the -# block index of the value `k`, while this finds the block index of the index `k`. -# This could make use of the `BlockIndices` object, i.e. `BlockIndices(a)[index]`. -function blockedunitrange_findblockindex(a::BlockedUnitRange, index::Integer) - @boundscheck index in 1:length(a) || throw(BoundsError()) - return @inbounds findblockindex(a, index + first(a) - 1) -end - const GradedUnitRange{BlockLasts<:Vector{<:LabelledInteger}} = BlockedUnitRange{BlockLasts} # TODO: Use `TypeParameterAccessors`. @@ -178,68 +151,6 @@ function blockedunitrange_getindex(a::GradedUnitRange, index) return labelled(unlabel_blocks(a)[index], get_label(a, index)) end -# TODO: Move this to a `BlockArraysExtensions` library. -# Like `a[indices]` but preserves block structure. -using BlockArrays: block, blockindex -function blockedunitrange_getindices( - a::BlockedUnitRange, indices::AbstractUnitRange{<:Integer} -) - first_blockindex = blockedunitrange_findblockindex(a, first(indices)) - last_blockindex = blockedunitrange_findblockindex(a, last(indices)) - first_block = block(first_blockindex) - last_block = block(last_blockindex) - blocklengths = if first_block == last_block - [length(indices)] - else - map(first_block:last_block) do block - if block == first_block - return length(a[first_block]) - blockindex(first_blockindex) + 1 - end - if block == last_block - return blockindex(last_blockindex) - end - return length(a[block]) - end - end - return blockedunitrange(indices .+ (first(a) - 1), blocklengths) -end - -# TODO: Move this to a `BlockArraysExtensions` library. -function blockedunitrange_getindices(a::BlockedUnitRange, indices::BlockIndexRange) - return a[block(indices)][only(indices.indices)] -end - -# TODO: Move this to a `BlockArraysExtensions` library. -function blockedunitrange_getindices(a::BlockedUnitRange, indices::Vector{<:Integer}) - return map(index -> a[index], indices) -end - -# TODO: Move this to a `BlockArraysExtensions` library. -function blockedunitrange_getindices( - a::BlockedUnitRange, indices::Vector{<:Union{Block{1},BlockIndexRange{1}}} -) - return mortar(map(index -> a[index], indices)) -end - -# TODO: Move this to a `BlockArraysExtensions` library. -function blockedunitrange_getindices(a::BlockedUnitRange, indices::Block{1}) - return a[indices] -end - -# TODO: Move this to a `BlockArraysExtensions` library. -function blockedunitrange_getindices(a::BlockedUnitRange, indices) - return error("Not implemented.") -end - -# The blocks of the corresponding slice. -_blocks(a::AbstractUnitRange, indices) = error("Not implemented") -function _blocks(a::AbstractUnitRange, indices::AbstractUnitRange) - return findblock(a, first(indices)):findblock(a, last(indices)) -end -function _blocks(a::AbstractUnitRange, indices::BlockRange) - return indices -end - # The block labels of the corresponding slice. function blocklabels(a::AbstractUnitRange, indices) return map(_blocks(a, indices)) do block diff --git a/NDTensors/src/lib/LabelledNumbers/src/labelledinteger.jl b/NDTensors/src/lib/LabelledNumbers/src/labelledinteger.jl index 323d252b0c..16b606e71c 100644 --- a/NDTensors/src/lib/LabelledNumbers/src/labelledinteger.jl +++ b/NDTensors/src/lib/LabelledNumbers/src/labelledinteger.jl @@ -38,6 +38,11 @@ Base.:(==)(x::LabelledInteger, y::LabelledInteger) = labelled_isequal(x, y) Base.:(==)(x::LabelledInteger, y::Number) = labelled_isequal(x, y) Base.:(==)(x::Number, y::LabelledInteger) = labelled_isequal(x, y) Base.:<(x::LabelledInteger, y::LabelledInteger) = labelled_isless(x, y) +# This is only needed on older versions of Julia, like Julia 1.6. +# TODO: Delete once we drop support for Julia 1.6. +function Base.:<=(x::LabelledInteger, y::LabelledInteger) + return labelled_isless(x, y) || labelled_isequal(x, y) +end # TODO: Define `labelled_colon`. (::Base.Colon)(start::LabelledInteger, stop::LabelledInteger) = unlabel(start):unlabel(stop) Base.zero(lobject::LabelledInteger) = labelled_zero(lobject) diff --git a/NDTensors/src/lib/LabelledNumbers/test/runtests.jl b/NDTensors/src/lib/LabelledNumbers/test/runtests.jl index 6fc1ac4231..4457e4fcb8 100644 --- a/NDTensors/src/lib/LabelledNumbers/test/runtests.jl +++ b/NDTensors/src/lib/LabelledNumbers/test/runtests.jl @@ -12,6 +12,22 @@ using Test: @test, @testset @test unlabel(x) == 2 @test !islabelled(unlabel(x)) + @test labelled(1, "x") < labelled(2, "x") + @test !(labelled(2, "x") < labelled(2, "x")) + @test !(labelled(3, "x") < labelled(2, "x")) + + @test !(labelled(1, "x") > labelled(2, "x")) + @test !(labelled(2, "x") > labelled(2, "x")) + @test labelled(3, "x") > labelled(2, "x") + + @test labelled(1, "x") <= labelled(2, "x") + @test labelled(2, "x") <= labelled(2, "x") + @test !(labelled(3, "x") <= labelled(2, "x")) + + @test !(labelled(1, "x") >= labelled(2, "x")) + @test labelled(2, "x") >= labelled(2, "x") + @test labelled(3, "x") >= labelled(2, "x") + @test x * 2 == 4 @test !islabelled(x * 2) @test 2 * x == 4 diff --git a/NDTensors/src/lib/TensorAlgebra/test/test_basics.jl b/NDTensors/src/lib/TensorAlgebra/test/test_basics.jl index 22feb8c2aa..bafabdf0b9 100644 --- a/NDTensors/src/lib/TensorAlgebra/test/test_basics.jl +++ b/NDTensors/src/lib/TensorAlgebra/test/test_basics.jl @@ -183,7 +183,7 @@ end ## Here we loosened the tolerance because of some floating point roundoff issue. ## with Float32 numbers @test a_dest ≈ α * a_dest_tensoroperations + β * a_dest_init rtol = - 10 * default_rtol(elt_dest) + 50 * default_rtol(elt_dest) end end end From 1cad3e2e056bdbe1bb1e6d206ec787750eed0d98 Mon Sep 17 00:00:00 2001 From: Karl Pierce Date: Thu, 6 Jun 2024 14:45:23 -0400 Subject: [PATCH 3/8] [NDTensorscuTENSORExt] Temporary fix for block sparse contractions (#1485) --- .../ext/NDTensorscuTENSORExt/contract.jl | 20 +++++++++++++------ .../TestITensorDMRG/TestITensorDMRG.jl | 5 ----- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/NDTensors/ext/NDTensorscuTENSORExt/contract.jl b/NDTensors/ext/NDTensorscuTENSORExt/contract.jl index daf5c14025..6a0109bcea 100644 --- a/NDTensors/ext/NDTensorscuTENSORExt/contract.jl +++ b/NDTensors/ext/NDTensorscuTENSORExt/contract.jl @@ -3,18 +3,26 @@ using NDTensors.Expose: Exposed, unexpose using cuTENSOR: cuTENSOR, CuArray, CuTensor function NDTensors.contract!( - R::Exposed{<:CuArray,<:DenseTensor}, + exposedR::Exposed{<:CuArray,<:DenseTensor}, labelsR, - T1::Exposed{<:CuArray,<:DenseTensor}, + exposedT1::Exposed{<:CuArray,<:DenseTensor}, labelsT1, - T2::Exposed{<:CuArray,<:DenseTensor}, + exposedT2::Exposed{<:CuArray,<:DenseTensor}, labelsT2, α::Number=one(Bool), β::Number=zero(Bool), ) - cuR = CuTensor(array(unexpose(R)), collect(labelsR)) - cuT1 = CuTensor(array(unexpose(T1)), collect(labelsT1)) - cuT2 = CuTensor(array(unexpose(T2)), collect(labelsT2)) + R, T1, T2 = unexpose.((exposedR, exposedT1, exposedT2)) + zoffR = iszero(array(R).offset) + arrayR = zoffR ? array(R) : copy(array(R)) + arrayT1 = iszero(array(T1).offset) ? array(T1) : copy(array(T1)) + arrayT2 = iszero(array(T2).offset) ? array(T2) : copy(array(T2)) + cuR = CuTensor(arrayR, collect(labelsR)) + cuT1 = CuTensor(arrayT1, collect(labelsT1)) + cuT2 = CuTensor(arrayT2, collect(labelsT2)) cuTENSOR.mul!(cuR, cuT1, cuT2, α, β) + if !zoffR + array(R) .= cuR.data + end return R end diff --git a/NDTensors/test/ITensors/TestITensorDMRG/TestITensorDMRG.jl b/NDTensors/test/ITensors/TestITensorDMRG/TestITensorDMRG.jl index 32789dffed..fd441d7845 100644 --- a/NDTensors/test/ITensors/TestITensorDMRG/TestITensorDMRG.jl +++ b/NDTensors/test/ITensors/TestITensorDMRG/TestITensorDMRG.jl @@ -14,11 +14,6 @@ reference_energies = Dict([ ]) is_broken(dev, elt::Type, conserve_qns::Val) = false -## Currently there is an issue in blocksparse cutensor, seems to be related to using @view, I am still working to fix this issue. -## For some reason ComplexF64 works and throws an error in `test_broken` -function is_broken(dev::typeof(cu), elt::Type, conserve_qns::Val{true}) - return ("cutensor" ∈ ARGS && elt != ComplexF64) -end include("dmrg.jl") From 82cfd7687696e12bb45ea1f8d9f13a1152e3b3d8 Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Thu, 6 Jun 2024 14:46:20 -0400 Subject: [PATCH 4/8] [NDTensors] Bump to v0.3.21 --- NDTensors/Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/NDTensors/Project.toml b/NDTensors/Project.toml index 1e69997836..c6e6f49138 100644 --- a/NDTensors/Project.toml +++ b/NDTensors/Project.toml @@ -1,7 +1,7 @@ name = "NDTensors" uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" authors = ["Matthew Fishman "] -version = "0.3.20" +version = "0.3.21" [deps] Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" From 7d42d069693d80c4c7f6afdacc0a2e4d5caca8ae Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Fri, 7 Jun 2024 18:02:55 -0400 Subject: [PATCH 5/8] [BlockSparseArrays] Redesign and fix slicing with unit ranges (#1487) * [BlockSparseArrays] Redesign and fix slicing with unit ranges * [NDTensors] Bump to v0.3.22 --- NDTensors/Project.toml | 2 +- .../test/runtests.jl | 20 ++++- .../src/abstractblocksparsearray/map.jl | 5 +- .../wrappedabstractblocksparsearray.jl | 70 ++++++++++++--- .../blocksparsearrayinterface.jl | 2 +- .../lib/BlockSparseArrays/test/test_basics.jl | 87 +++++++++++++------ .../lib/GradedAxes/src/blockedunitrange.jl | 22 ++++- .../src/lib/GradedAxes/test/test_basics.jl | 46 +++++++++- 8 files changed, 201 insertions(+), 53 deletions(-) diff --git a/NDTensors/Project.toml b/NDTensors/Project.toml index c6e6f49138..fe455e9ecd 100644 --- a/NDTensors/Project.toml +++ b/NDTensors/Project.toml @@ -1,7 +1,7 @@ name = "NDTensors" uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" authors = ["Matthew Fishman "] -version = "0.3.21" +version = "0.3.22" [deps] Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" diff --git a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl b/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl index d496ee57c6..bed17b7928 100644 --- a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl +++ b/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl @@ -3,7 +3,8 @@ using Compat: Returns using Test: @test, @testset, @test_broken using BlockArrays: Block, blocksize using NDTensors.BlockSparseArrays: BlockSparseArray, block_nstored -using NDTensors.GradedAxes: GradedAxes, GradedUnitRange, UnitRangeDual, dual, gradedrange +using NDTensors.GradedAxes: + GradedAxes, GradedUnitRange, UnitRangeDual, blocklabels, dual, gradedrange using NDTensors.LabelledNumbers: label using NDTensors.SparseArrayInterface: nstored using NDTensors.TensorAlgebra: fusedims, splitdims @@ -68,13 +69,26 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) a = BlockSparseArray{elt}(d1, d2, d1, d2) blockdiagonal!(randn!, a) m = fusedims(a, (1, 2), (3, 4)) - @test axes(m, 1) isa GradedUnitRange - @test axes(m, 2) isa GradedUnitRange + # TODO: Once block merging is implemented, this should + # be the real test. + for ax in axes(m) + @test ax isa GradedUnitRange + @test_broken blocklabels(ax) == [U1(0), U1(1), U1(2)] + @test blocklabels(ax) == [U1(0), U1(1), U1(1), U1(2)] + end + for I in CartesianIndices(m) + if I ∈ CartesianIndex.([(1, 1), (4, 4)]) + @test !iszero(m[I]) + else + @test iszero(m[I]) + end + end @test a[1, 1, 1, 1] == m[1, 1] @test a[2, 2, 2, 2] == m[4, 4] # TODO: Current `fusedims` doesn't merge # common sectors, need to fix. @test_broken blocksize(m) == (3, 3) + @test blocksize(m) == (4, 4) @test a == splitdims(m, (d1, d2), (d1, d2)) end @testset "dual axes" begin diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/map.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/map.jl index 6a7cb96ac1..582d4193a3 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/map.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/map.jl @@ -14,10 +14,9 @@ using ..SparseArrayInterface: # Returns `Vector{<:CartesianIndices}` function union_stored_blocked_cartesianindices(as::Vararg{AbstractArray}) + combined_axes = combine_axes(axes.(as)...) stored_blocked_cartesianindices_as = map(as) do a - return blocked_cartesianindices( - axes(a), combine_axes(axes.(as)...), block_stored_indices(a) - ) + return blocked_cartesianindices(axes(a), combined_axes, block_stored_indices(a)) end return ∪(stored_blocked_cartesianindices_as...) end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl index 877f3d3d53..2a5350f3ec 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl @@ -1,5 +1,5 @@ using Adapt: Adapt, WrappedArray -using BlockArrays: BlockArrays, BlockedUnitRange, blockedrange, unblock +using BlockArrays: BlockArrays, BlockedUnitRange, BlockRange, blockedrange, unblock using SplitApplyCombine: groupcount const WrappedAbstractBlockSparseArray{T,N} = WrappedArray{ @@ -11,28 +11,76 @@ const BlockSparseArrayLike{T,N} = Union{ <:AbstractBlockSparseArray{T,N},<:WrappedAbstractBlockSparseArray{T,N} } -# AbstractArray interface -# TODO: Use `BlockSparseArrayLike`. -# TODO: Need to handle block indexing. -function Base.axes(a::SubArray{<:Any,<:Any,<:AbstractBlockSparseArray}) - return ntuple(i -> sub_axis(axes(parent(a), i), a.indices[i]), ndims(a)) +# Used when making views. +# TODO: Move to blocksparsearrayinterface. +function blocksparse_to_indices(a, inds, I) + return (unblock(a, inds, I), to_indices(a, BlockArrays._maybetail(inds), Base.tail(I))...) +end + +# TODO: Move to blocksparsearrayinterface. +function blocksparse_to_indices(a, I) + return to_indices(a, axes(a), I) end # Used when making views. function Base.to_indices( - a::BlockSparseArrayLike, inds, I::Tuple{Vector{<:Block{1}},Vararg{Any}} + a::BlockSparseArrayLike, inds, I::Tuple{AbstractVector{<:Block{1}},Vararg{Any}} ) - return (unblock(a, inds, I), to_indices(a, BlockArrays._maybetail(inds), Base.tail(I))...) + return blocksparse_to_indices(a, inds, I) end -function Base.to_indices(a::BlockSparseArrayLike, I::Tuple{Vector{<:Block{1}},Vararg{Any}}) - return to_indices(a, axes(a), I) + +function Base.to_indices( + a::BlockSparseArrayLike, inds, I::Tuple{AbstractUnitRange{<:Integer},Vararg{Any}} +) + return blocksparse_to_indices(a, inds, I) +end + +# Fixes ambiguity error with BlockArrays. +function Base.to_indices(a::BlockSparseArrayLike, inds, I::Tuple{BlockRange{1},Vararg{Any}}) + return blocksparse_to_indices(a, inds, I) +end + +function Base.to_indices( + a::BlockSparseArrayLike, I::Tuple{AbstractVector{<:Block{1}},Vararg{Any}} +) + return blocksparse_to_indices(a, I) +end + +# Fixes ambiguity error with BlockArrays. +function Base.to_indices(a::BlockSparseArrayLike, I::Tuple{BlockRange{1},Vararg{Any}}) + return blocksparse_to_indices(a, I) +end + +function Base.to_indices( + a::BlockSparseArrayLike, I::Tuple{AbstractUnitRange{<:Integer},Vararg{Any}} +) + return blocksparse_to_indices(a, I) end # Used inside `Base.to_indices` when making views. -function BlockArrays.unblock(a, inds, I::Tuple{Vector{<:Block{1}},Vararg{Any}}) +# TODO: Move to blocksparsearrayinterface. +# TODO: Make a special definition for `BlockedVector{<:Block{1}}` in order +# to merge blocks. +function blocksparse_unblock(a, inds, I::Tuple{AbstractVector{<:Block{1}},Vararg{Any}}) return BlockIndices(I[1], blockedunitrange_getindices(inds[1], I[1])) end +# TODO: Move to blocksparsearrayinterface. +function blocksparse_unblock(a, inds, I::Tuple{AbstractUnitRange{<:Integer},Vararg{Any}}) + bs = blockrange(inds[1], I[1]) + return BlockSlice(bs, blockedunitrange_getindices(inds[1], I[1])) +end + +function BlockArrays.unblock(a, inds, I::Tuple{AbstractVector{<:Block{1}},Vararg{Any}}) + return blocksparse_unblock(a, inds, I) +end + +function BlockArrays.unblock( + a::BlockSparseArrayLike, inds, I::Tuple{AbstractUnitRange{<:Integer},Vararg{Any}} +) + return blocksparse_unblock(a, inds, I) +end + # BlockArrays `AbstractBlockArray` interface BlockArrays.blocks(a::BlockSparseArrayLike) = blocksparse_blocks(a) diff --git a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl index e6855d4227..f2e9326219 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl @@ -267,7 +267,7 @@ function Base.isassigned(a::SparseSubArrayBlocks{<:Any,N}, I::Vararg{Int,N}) whe return true end function SparseArrayInterface.stored_indices(a::SparseSubArrayBlocks) - return stored_indices(view(blocks(parent(a.array)), axes(a)...)) + return stored_indices(view(blocks(parent(a.array)), blockrange(a)...)) end # TODO: Either make this the generic interface or define # `SparseArrayInterface.sparse_storage`, which is used diff --git a/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl b/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl index 8970a20dda..7c4f998e65 100644 --- a/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl +++ b/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl @@ -1,5 +1,6 @@ @eval module $(gensym()) -using BlockArrays: Block, BlockRange, BlockedUnitRange, blockedrange, blocklength, blocksize +using BlockArrays: + Block, BlockRange, BlockedUnitRange, BlockVector, blockedrange, blocklength, blocksize using LinearAlgebra: mul! using NDTensors.BlockSparseArrays: BlockSparseArray, block_nstored, block_reshape using NDTensors.SparseArrayInterface: nstored @@ -8,6 +9,27 @@ using Test: @test, @test_broken, @test_throws, @testset include("TestBlockSparseArraysUtils.jl") @testset "BlockSparseArrays (eltype=$elt)" for elt in (Float32, Float64, ComplexF32, ComplexF64) + @testset "Broken" begin + # TODO: These are broken, need to fix. + a = BlockSparseArray{elt}([2, 3], [2, 3]) + for I in (Block.(1:2), [Block(1), Block(2)]) + b = @view a[I, I] + x = randn(elt, 2, 2) + b[Block(1, 1)] = x + # These outputs a block of zeros, + # for some reason the block + # is not getting set. + # I think the issue is that: + # ```julia + # @view(@view(a[I, I]))[Block(1, 1)] + # ``` + # creates a doubly-wrapped SubArray + # instead of flattening down to a + # single SubArray wrapper. + @test_broken a[Block(1, 1)] == x + @test_broken b[Block(1, 1)] == x + end + end @testset "Basics" begin a = BlockSparseArray{elt}([2, 3], [2, 3]) @test a == BlockSparseArray{elt}(blockedrange([2, 3]), blockedrange([2, 3])) @@ -255,32 +277,36 @@ include("TestBlockSparseArraysUtils.jl") a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) x = randn(elt, 1, 2) @view(a[Block(2, 2)])[1:1, 1:2] = x - @test @view(a[Block(2, 2)])[1:1, 1:2] == x @test a[Block(2, 2)][1:1, 1:2] == x - - # TODO: This is broken, fix! - @test_broken a[3:3, 4:5] == x + @test @view(a[Block(2, 2)])[1:1, 1:2] == x + @test a[3:3, 4:5] == x a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) x = randn(elt, 1, 2) @views a[Block(2, 2)][1:1, 1:2] = x - @test @view(a[Block(2, 2)])[1:1, 1:2] == x @test a[Block(2, 2)][1:1, 1:2] == x - - # TODO: This is broken, fix! - @test_broken a[3:3, 4:5] == x + @test @view(a[Block(2, 2)])[1:1, 1:2] == x + @test a[3:3, 4:5] == x a = BlockSparseArray{elt}([2, 3], [2, 3]) @views for b in [Block(1, 1), Block(2, 2)] # TODO: Use `blocksizes(a)[Int.(Tuple(b))...]` once available. a[b] = randn(elt, size(a[b])) end - b = @view a[[Block(1), Block(2)], [Block(1), Block(2)]] - for I in CartesianIndices(a) - @test b[I] == a[I] - end - for block in BlockRange(a) - @test b[block] == a[block] + for I in ( + Block.(1:2), + [Block(1), Block(2)], + BlockVector([Block(1), Block(2)], [1, 1]), + # TODO: This should merge blocks. + BlockVector([Block(1), Block(2)], [2]), + ) + b = @view a[I, I] + for I in CartesianIndices(a) + @test b[I] == a[I] + end + for block in BlockRange(a) + @test b[block] == a[block] + end end a = BlockSparseArray{elt}([2, 3], [2, 3]) @@ -288,25 +314,30 @@ include("TestBlockSparseArraysUtils.jl") # TODO: Use `blocksizes(a)[Int.(Tuple(b))...]` once available. a[b] = randn(elt, size(a[b])) end - b = @view a[[Block(2), Block(1)], [Block(2), Block(1)]] - @test b[Block(1, 1)] == a[Block(2, 2)] - @test b[Block(2, 1)] == a[Block(1, 2)] - @test b[Block(1, 2)] == a[Block(2, 1)] - @test b[Block(2, 2)] == a[Block(1, 1)] - @test b[1, 1] == a[3, 3] - @test b[4, 4] == a[1, 1] - b[4, 4] = 44 - @test b[4, 4] == 44 - - ## Broken, need to fix. + for I in ( + [Block(2), Block(1)], + BlockVector([Block(2), Block(1)], [1, 1]), + # TODO: This should merge blocks. + BlockVector([Block(2), Block(1)], [2]), + ) + b = @view a[I, I] + @test b[Block(1, 1)] == a[Block(2, 2)] + @test b[Block(2, 1)] == a[Block(1, 2)] + @test b[Block(1, 2)] == a[Block(2, 1)] + @test b[Block(2, 2)] == a[Block(1, 1)] + @test b[1, 1] == a[3, 3] + @test b[4, 4] == a[1, 1] + b[4, 4] = 44 + @test b[4, 4] == 44 + end # This is outputting only zero blocks. a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) b = a[Block(2):Block(2), Block(1):Block(2)] - @test_broken block_nstored(b) == 1 - @test_broken b == Array(a)[3:5, 1:end] + @test block_nstored(b) == 1 + @test b == Array(a)[3:5, 1:end] end @testset "LinearAlgebra" begin a1 = BlockSparseArray{elt}([2, 3], [2, 3]) diff --git a/NDTensors/src/lib/GradedAxes/src/blockedunitrange.jl b/NDTensors/src/lib/GradedAxes/src/blockedunitrange.jl index f9785da1a1..1a2b4c2a19 100644 --- a/NDTensors/src/lib/GradedAxes/src/blockedunitrange.jl +++ b/NDTensors/src/lib/GradedAxes/src/blockedunitrange.jl @@ -36,8 +36,15 @@ function blockedunitrange_findblockindex(a::BlockedUnitRange, index::Integer) return @inbounds findblockindex(a, index + first(a) - 1) end +function blockedunitrange_getindices(a::AbstractUnitRange, indices) + return a[indices] +end + # TODO: Move this to a `BlockArraysExtensions` library. # Like `a[indices]` but preserves block structure. +# TODO: Consider calling this something else, for example +# `blocked_getindex`. See the discussion here: +# https://github.com/JuliaArrays/BlockArrays.jl/issues/347 function blockedunitrange_getindices( a::BlockedUnitRange, indices::AbstractUnitRange{<:Integer} ) @@ -72,10 +79,21 @@ function blockedunitrange_getindices(a::BlockedUnitRange, indices::Vector{<:Inte end # TODO: Move this to a `BlockArraysExtensions` library. +# TODO: Make a special definition for `BlockedVector{<:Block{1}}` in order +# to merge blocks. function blockedunitrange_getindices( - a::BlockedUnitRange, indices::Vector{<:Union{Block{1},BlockIndexRange{1}}} + a::BlockedUnitRange, indices::AbstractVector{<:Union{Block{1},BlockIndexRange{1}}} ) - return mortar(map(index -> a[index], indices)) + # Without converting `indices` to `Vector`, + # mapping `indices` outputs a `BlockVector` + # which is harder to reason about. + blocks = map(index -> a[index], Vector(indices)) + # We pass `length.(blocks)` to `mortar` in order + # to pass block labels to the axes of the output, + # if they exist. This makes it so that + # `only(axes(a[indices])) isa `GradedUnitRange` + # if `a isa `GradedUnitRange`, for example. + return mortar(blocks, length.(blocks)) end # TODO: Move this to a `BlockArraysExtensions` library. diff --git a/NDTensors/src/lib/GradedAxes/test/test_basics.jl b/NDTensors/src/lib/GradedAxes/test/test_basics.jl index 41815db4d1..c230c755f4 100644 --- a/NDTensors/src/lib/GradedAxes/test/test_basics.jl +++ b/NDTensors/src/lib/GradedAxes/test/test_basics.jl @@ -81,9 +81,11 @@ using Test: @test, @test_broken, @testset @test label(a[Block(1)]) == "x" @test a[Block(2)] == 3:4 @test label(a[Block(2)]) == "y" - @test isone(first(only(axes(a)))) - @test length(only(axes(a))) == length(a) - @test blocklengths(only(axes(a))) == blocklengths(a) + ax = only(axes(a)) + @test ax == 1:length(a) + @test length(ax) == length(a) + @test blocklengths(ax) == blocklengths(a) + @test blocklabels(ax) == blocklabels(a) x = gradedrange(["x" => 2, "y" => 3]) a = x[3:4] @@ -92,6 +94,11 @@ using Test: @test, @test_broken, @testset @test blocklength(a) == 1 @test a[Block(1)] == 3:4 @test label(a[Block(1)]) == "y" + ax = only(axes(a)) + @test ax == 1:length(a) + @test length(ax) == length(a) + @test blocklengths(ax) == blocklengths(a) + @test blocklabels(ax) == blocklabels(a) x = gradedrange(["x" => 2, "y" => 3]) a = x[2:4][1:2] @@ -102,6 +109,11 @@ using Test: @test, @test_broken, @testset @test label(a[Block(1)]) == "x" @test a[Block(2)] == 3:3 @test label(a[Block(2)]) == "y" + ax = only(axes(a)) + @test ax == 1:length(a) + @test length(ax) == length(a) + @test blocklengths(ax) == blocklengths(a) + @test blocklabels(ax) == blocklabels(a) x = gradedrange(["x" => 2, "y" => 3]) a = x[Block(2)[2:3]] @@ -109,6 +121,10 @@ using Test: @test, @test_broken, @testset @test length(a) == 2 @test a == 4:5 @test label(a) == "y" + ax = only(axes(a)) + @test ax == 1:length(a) + @test length(ax) == length(a) + @test label(ax) == label(a) x = gradedrange(["x" => 2, "y" => 3, "z" => 4]) a = x[Block(2):Block(3)] @@ -119,6 +135,11 @@ using Test: @test, @test_broken, @testset @test blocklabels(a) == ["y", "z"] @test a[Block(1)] == 3:5 @test a[Block(2)] == 6:9 + ax = only(axes(a)) + @test ax == 1:length(a) + @test length(ax) == length(a) + @test blocklengths(ax) == blocklengths(a) + @test blocklabels(ax) == blocklabels(a) x = gradedrange(["x" => 2, "y" => 3, "z" => 4]) a = x[[Block(3), Block(2)]] @@ -126,11 +147,20 @@ using Test: @test, @test_broken, @testset @test length(a) == 7 @test blocklength(a) == 2 # TODO: `BlockArrays` doesn't define `blocklengths` - # for `BlockVector`, should it? + # `blocklengths(::BlockVector)`, unbrake this test + # once it does. @test_broken blocklengths(a) == [4, 3] @test blocklabels(a) == ["z", "y"] @test a[Block(1)] == 6:9 @test a[Block(2)] == 3:5 + ax = only(axes(a)) + @test ax == 1:length(a) + @test length(ax) == length(a) + # TODO: Change to: + # @test blocklengths(ax) == blocklengths(a) + # once `blocklengths(::BlockVector)` is defined. + @test blocklengths(ax) == [4, 3] + @test blocklabels(ax) == blocklabels(a) x = gradedrange(["x" => 2, "y" => 3, "z" => 4]) a = x[[Block(3)[2:3], Block(2)[2:3]]] @@ -143,5 +173,13 @@ using Test: @test, @test_broken, @testset @test blocklabels(a) == ["z", "y"] @test a[Block(1)] == 7:8 @test a[Block(2)] == 4:5 + ax = only(axes(a)) + @test ax == 1:length(a) + @test length(ax) == length(a) + # TODO: Change to: + # @test blocklengths(ax) == blocklengths(a) + # once `blocklengths(::BlockVector)` is defined. + @test blocklengths(ax) == [2, 2] + @test blocklabels(ax) == blocklabels(a) end end From d70b89ed5967988a120a4fe207f3bd95d1e352b9 Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Sun, 9 Jun 2024 10:35:41 -0400 Subject: [PATCH 6/8] [BlockSparseArrays] Fix some bugs involving BlockSparseArrays with dual axes (#1488) * [BlockSparseArrays] Fix some bugs involving BlockSparseArrays with dual axes * [NDTensors] Bump to v0.3.23 --- NDTensors/Project.toml | 2 +- .../test/runtests.jl | 55 ++++++++++++++++++- .../BlockArraysExtensions.jl | 2 +- .../src/lib/GradedAxes/src/unitrangedual.jl | 18 +++++- 4 files changed, 71 insertions(+), 6 deletions(-) diff --git a/NDTensors/Project.toml b/NDTensors/Project.toml index fe455e9ecd..17a14ceb90 100644 --- a/NDTensors/Project.toml +++ b/NDTensors/Project.toml @@ -1,7 +1,7 @@ name = "NDTensors" uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" authors = ["Matthew Fishman "] -version = "0.3.22" +version = "0.3.23" [deps] Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" diff --git a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl b/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl index bed17b7928..fe177167dd 100644 --- a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl +++ b/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl @@ -1,7 +1,7 @@ @eval module $(gensym()) using Compat: Returns using Test: @test, @testset, @test_broken -using BlockArrays: Block, blocksize +using BlockArrays: Block, blockedrange, blocksize using NDTensors.BlockSparseArrays: BlockSparseArray, block_nstored using NDTensors.GradedAxes: GradedAxes, GradedUnitRange, UnitRangeDual, blocklabels, dual, gradedrange @@ -73,6 +73,8 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) # be the real test. for ax in axes(m) @test ax isa GradedUnitRange + # TODO: Current `fusedims` doesn't merge + # common sectors, need to fix. @test_broken blocklabels(ax) == [U1(0), U1(1), U1(2)] @test blocklabels(ax) == [U1(0), U1(1), U1(1), U1(2)] end @@ -94,8 +96,13 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) @testset "dual axes" begin r = gradedrange([U1(0) => 2, U1(1) => 2]) a = BlockSparseArray{elt}(dual(r), r) - a[Block(1, 1)] = randn(elt, size(a[Block(1, 1)])) - a[Block(2, 2)] = randn(elt, size(a[Block(2, 2)])) + @views for b in [Block(1, 1), Block(2, 2)] + a[b] = randn(elt, size(a[b])) + end + # TODO: Define and use `isdual` here. + @test axes(a, 1) isa UnitRangeDual + @test axes(a, 2) isa GradedUnitRange + @test !(axes(a, 2) isa UnitRangeDual) a_dense = Array(a) @test eachindex(a) == CartesianIndices(size(a)) for I in eachindex(a) @@ -104,8 +111,50 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) @test axes(a') == dual.(reverse(axes(a))) # TODO: Define and use `isdual` here. @test axes(a', 1) isa UnitRangeDual + @test axes(a', 2) isa GradedUnitRange @test !(axes(a', 2) isa UnitRangeDual) @test isnothing(show(devnull, MIME("text/plain"), a)) + + # Check preserving dual in tensor algebra. + for b in (a + a, 2 * a, 3 * a - a) + @test Array(b) ≈ 2 * Array(a) + # TODO: Define and use `isdual` here. + @test axes(b, 1) isa UnitRangeDual + @test axes(b, 2) isa GradedUnitRange + @test !(axes(b, 2) isa UnitRangeDual) + end + + @test isnothing(show(devnull, MIME("text/plain"), @view(a[Block(1, 1)]))) + @test @view(a[Block(1, 1)]) == a[Block(1, 1)] + + # Test case when all axes are dual. + for r in (gradedrange([U1(0) => 2, U1(1) => 2]), blockedrange([2, 2])) + a = BlockSparseArray{elt}(dual(r), dual(r)) + @views for i in [Block(1, 1), Block(2, 2)] + a[i] = randn(elt, size(a[i])) + end + b = 2 * a + @test block_nstored(b) == 2 + @test Array(b) == 2 * Array(a) + for ax in axes(b) + @test ax isa UnitRangeDual + end + end + + # Test case when all axes are dual + # from taking the adjoint. + for r in (gradedrange([U1(0) => 2, U1(1) => 2]), blockedrange([2, 2])) + a = BlockSparseArray{elt}(r, r) + @views for i in [Block(1, 1), Block(2, 2)] + a[i] = randn(elt, size(a[i])) + end + b = 2 * a' + @test block_nstored(b) == 2 + @test Array(b) == 2 * Array(a)' + for ax in axes(b) + @test ax isa UnitRangeDual + end + end end @testset "Matrix multiplication" begin r = gradedrange([U1(0) => 2, U1(1) => 3]) diff --git a/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl b/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl index 31fb71e104..43b8afab24 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl @@ -129,7 +129,7 @@ function cartesianindices(axes::Tuple, b::Block) end # Get the range within a block. -function blockindexrange(axis::AbstractUnitRange, r::UnitRange) +function blockindexrange(axis::AbstractUnitRange, r::AbstractUnitRange) bi1 = findblockindex(axis, first(r)) bi2 = findblockindex(axis, last(r)) b = block(bi1) diff --git a/NDTensors/src/lib/GradedAxes/src/unitrangedual.jl b/NDTensors/src/lib/GradedAxes/src/unitrangedual.jl index 8f1a86f1b8..358542856c 100644 --- a/NDTensors/src/lib/GradedAxes/src/unitrangedual.jl +++ b/NDTensors/src/lib/GradedAxes/src/unitrangedual.jl @@ -68,8 +68,24 @@ end using NDTensors.LabelledNumbers: LabelledNumbers, label LabelledNumbers.label(a::UnitRangeDual) = dual(label(nondual(a))) -using BlockArrays: BlockArrays, blockaxes, blocklasts, findblock +using BlockArrays: BlockArrays, blockaxes, blocklasts, combine_blockaxes, findblock BlockArrays.blockaxes(a::UnitRangeDual) = blockaxes(nondual(a)) BlockArrays.blockfirsts(a::UnitRangeDual) = label_dual.(blockfirsts(nondual(a))) BlockArrays.blocklasts(a::UnitRangeDual) = label_dual.(blocklasts(nondual(a))) BlockArrays.findblock(a::UnitRangeDual, index::Integer) = findblock(nondual(a), index) +function BlockArrays.combine_blockaxes(a1::UnitRangeDual, a2::UnitRangeDual) + return dual(combine_blockaxes(dual(a1), dual(a2))) +end + +# This is needed when constructing `CartesianIndices` from +# a tuple of unit ranges that have this kind of dual unit range. +# TODO: See if we can find some more elegant way of constructing +# `CartesianIndices`, maybe by defining conversion of `LabelledInteger` +# to `Int`, defining a more general `convert` function, etc. +function Base.OrdinalRange{Int,Int}( + r::UnitRangeDual{<:LabelledInteger{Int},<:LabelledUnitRange{Int,UnitRange{Int}}} +) + # TODO: Implement this broadcasting operation and use it here. + # return Int.(r) + return unlabel(nondual(r)) +end From 1bc091a03ca2b9fd5b205c785d01f4626c66d267 Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Sun, 9 Jun 2024 22:13:20 -0400 Subject: [PATCH 7/8] [BlockSparseArrays] Sub-slices of multiple blocks (#1489) * [BlockSparseArrays] Sub-slices of multiple blocks * [NDTensors] Bump to v0.3.24 --- NDTensors/Project.toml | 2 +- .../BlockArraysExtensions.jl | 38 +++++++++- .../wrappedabstractblocksparsearray.jl | 27 ++++++- .../lib/BlockSparseArrays/test/test_basics.jl | 72 +++++++++++++++++-- .../src/sparsearrayinterface/indexing.jl | 13 ++-- .../test/test_abstractsparsearray.jl | 16 +++++ 6 files changed, 149 insertions(+), 19 deletions(-) diff --git a/NDTensors/Project.toml b/NDTensors/Project.toml index 17a14ceb90..407445f8d7 100644 --- a/NDTensors/Project.toml +++ b/NDTensors/Project.toml @@ -1,7 +1,7 @@ name = "NDTensors" uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" authors = ["Matthew Fishman "] -version = "0.3.23" +version = "0.3.24" [deps] Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" diff --git a/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl b/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl index 43b8afab24..57f2eb2846 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl @@ -159,8 +159,8 @@ function blockrange(axis::AbstractUnitRange, r::UnitRange) end function blockrange(axis::AbstractUnitRange, r::Int) - error("Slicing with integer values isn't supported.") - return findblock(axis, r) + ## return findblock(axis, r) + return error("Slicing with integer values isn't supported.") end function blockrange(axis::AbstractUnitRange, r::AbstractVector{<:Block{1}}) @@ -187,6 +187,24 @@ function blockrange(axis::AbstractUnitRange, r::BlockIndexRange) return Block(r):Block(r) end +function blockrange(axis::AbstractUnitRange, r::AbstractVector{<:BlockIndexRange{1}}) + return error("Slicing not implemented for range of type `$(typeof(r))`.") +end + +function blockrange( + axis::AbstractUnitRange, + r::BlockVector{BlockIndex{1},<:AbstractVector{<:BlockIndexRange{1}}}, +) + return map(b -> Block(b), blocks(r)) +end + +# This handles slicing with `:`/`Colon()`. +function blockrange(axis::AbstractUnitRange, r::Base.Slice) + # TODO: Maybe use `BlockRange`, but that doesn't output + # the same thing. + return only(blockaxes(axis)) +end + function blockrange(axis::AbstractUnitRange, r) return error("Slicing not implemented for range of type `$(typeof(r))`.") end @@ -228,6 +246,22 @@ function blockindices(a::AbstractUnitRange, b::Block, r::BlockIndices) return blockindices(a, b, r.blocks) end +function blockindices( + a::AbstractUnitRange, + b::Block, + r::BlockVector{BlockIndex{1},<:AbstractVector{<:BlockIndexRange{1}}}, +) + # TODO: Change to iterate over `BlockRange(r)` + # once https://github.com/JuliaArrays/BlockArrays.jl/issues/404 + # is fixed. + for bl in blocks(r) + if b == Block(bl) + return only(bl.indices) + end + end + return error("Block not found.") +end + function cartesianindices(a::AbstractArray, b::Block) return cartesianindices(axes(a), b) end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl index 2a5350f3ec..5499c957eb 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl @@ -1,5 +1,6 @@ using Adapt: Adapt, WrappedArray -using BlockArrays: BlockArrays, BlockedUnitRange, BlockRange, blockedrange, unblock +using BlockArrays: + BlockArrays, BlockedUnitRange, BlockIndexRange, BlockRange, blockedrange, mortar, unblock using SplitApplyCombine: groupcount const WrappedAbstractBlockSparseArray{T,N} = WrappedArray{ @@ -46,6 +47,15 @@ function Base.to_indices( return blocksparse_to_indices(a, I) end +# Handle case of indexing with `[Block(1)[1:2], Block(2)[1:2]]` +# by converting it to a `BlockVector` with +# `mortar([Block(1)[1:2], Block(2)[1:2]])`. +function Base.to_indices( + a::BlockSparseArrayLike, inds, I::Tuple{AbstractVector{<:BlockIndexRange{1}},Vararg{Any}} +) + return to_indices(a, inds, (mortar(I[1]), Base.tail(I)...)) +end + # Fixes ambiguity error with BlockArrays. function Base.to_indices(a::BlockSparseArrayLike, I::Tuple{BlockRange{1},Vararg{Any}}) return blocksparse_to_indices(a, I) @@ -126,14 +136,25 @@ function Base.getindex( return blocksparse_getindex(a, block...) end -# TODO: Define `issasigned(a, ::Block{N})`. +# TODO: Define `blocksparse_isassigned`. function Base.isassigned( a::BlockSparseArrayLike{<:Any,N}, index::Vararg{Block{1},N} ) where {N} - # TODO: Define `blocksparse_isassigned`. return isassigned(blocks(a), Int.(index)...) end +function Base.isassigned(a::BlockSparseArrayLike{<:Any,N}, index::Block{N}) where {N} + return isassigned(a, Tuple(index)...) +end + +# TODO: Define `blocksparse_isassigned`. +function Base.isassigned( + a::BlockSparseArrayLike{<:Any,N}, index::Vararg{BlockIndex{1},N} +) where {N} + b = block.(index) + return isassigned(a, b...) && isassigned(@view(a[b...]), blockindex.(index)...) +end + function Base.setindex!(a::BlockSparseArrayLike{<:Any,N}, value, I::BlockIndex{N}) where {N} blocksparse_setindex!(a, value, I) return a diff --git a/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl b/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl index 7c4f998e65..00ff6b70df 100644 --- a/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl +++ b/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl @@ -1,6 +1,14 @@ @eval module $(gensym()) using BlockArrays: - Block, BlockRange, BlockedUnitRange, BlockVector, blockedrange, blocklength, blocksize + Block, + BlockRange, + BlockedUnitRange, + BlockVector, + blockedrange, + blocklength, + blocklengths, + blocksize, + mortar using LinearAlgebra: mul! using NDTensors.BlockSparseArrays: BlockSparseArray, block_nstored, block_reshape using NDTensors.SparseArrayInterface: nstored @@ -331,13 +339,69 @@ include("TestBlockSparseArraysUtils.jl") @test b[4, 4] == 44 end - # This is outputting only zero blocks. a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) - a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end b = a[Block(2):Block(2), Block(1):Block(2)] @test block_nstored(b) == 1 @test b == Array(a)[3:5, 1:end] + + a = BlockSparseArray{elt}(undef, ([2, 3, 4], [2, 3, 4])) + # TODO: Define `block_diagindices`. + @views for b in [Block(1, 1), Block(2, 2), Block(3, 3)] + a[b] = randn(elt, size(a[b])) + end + for (I1, I2) in ( + (mortar([Block(2)[2:3], Block(3)[1:3]]), mortar([Block(2)[2:3], Block(3)[2:3]])), + ([Block(2)[2:3], Block(3)[1:3]], [Block(2)[2:3], Block(3)[2:3]]), + ) + for b in (a[I1, I2], @view(a[I1, I2])) + # TODO: Rename `block_stored_length`. + @test block_nstored(b) == 2 + @test b[Block(1, 1)] == a[Block(2, 2)[2:3, 2:3]] + @test b[Block(2, 2)] == a[Block(3, 3)[1:3, 2:3]] + end + end + + a = BlockSparseArray{elt}(undef, ([3, 3], [3, 3])) + # TODO: Define `block_diagindices`. + @views for b in [Block(1, 1), Block(2, 2)] + a[b] = randn(elt, size(a[b])) + end + I = mortar([Block(1)[1:2], Block(2)[1:2]]) + b = a[:, I] + @test b[Block(1, 1)] == a[Block(1, 1)][:, 1:2] + @test b[Block(2, 1)] == a[Block(2, 1)][:, 1:2] + @test b[Block(1, 2)] == a[Block(1, 2)][:, 1:2] + @test b[Block(2, 2)] == a[Block(2, 2)][:, 1:2] + @test blocklengths.(axes(b)) == ([3, 3], [2, 2]) + # TODO: Rename `block_stored_length`. + @test blocksize(b) == (2, 2) + @test block_nstored(b) == 2 + + a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end + @test isassigned(a, 1, 1) + @test isassigned(a, 5, 7) + @test !isassigned(a, 0, 1) + @test !isassigned(a, 5, 8) + @test isassigned(a, Block(1), Block(1)) + @test isassigned(a, Block(2), Block(2)) + @test !isassigned(a, Block(1), Block(0)) + @test !isassigned(a, Block(3), Block(2)) + @test isassigned(a, Block(1, 1)) + @test isassigned(a, Block(2, 2)) + @test !isassigned(a, Block(1, 0)) + @test !isassigned(a, Block(3, 2)) + @test isassigned(a, Block(1)[1], Block(1)[1]) + @test isassigned(a, Block(2)[3], Block(2)[4]) + @test !isassigned(a, Block(1)[0], Block(1)[1]) + @test !isassigned(a, Block(2)[3], Block(2)[5]) + @test !isassigned(a, Block(1)[1], Block(0)[1]) + @test !isassigned(a, Block(3)[3], Block(2)[4]) end @testset "LinearAlgebra" begin a1 = BlockSparseArray{elt}([2, 3], [2, 3]) diff --git a/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/indexing.jl b/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/indexing.jl index 6f755dd405..6ab1f77a88 100644 --- a/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/indexing.jl +++ b/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/indexing.jl @@ -160,16 +160,11 @@ function sparse_setindex!(a::AbstractArray, value, I::NotStoredIndex) end # isassigned -function sparse_isassigned(a::AbstractArray, I::Integer...) - return sparse_isassigned(a, CartesianIndex(I)) +function sparse_isassigned(a::AbstractArray{<:Any,N}, I::CartesianIndex{N}) where {N} + return sparse_isassigned(a, Tuple(I)...) end -sparse_isassigned(a::AbstractArray, I::NotStoredIndex) = true -sparse_isassigned(a::AbstractArray, I::StoredIndex) = sparse_isassigned(a, StorageIndex(I)) -function sparse_isassigned(a::AbstractArray, I::StorageIndex) - return isassigned(sparse_storage(a), index(I)) -end -function sparse_isassigned(a::AbstractArray, I::CartesianIndex) - return sparse_isassigned(a, storage_index(a, I)) +function sparse_isassigned(a::AbstractArray{<:Any,N}, I::Vararg{Integer,N}) where {N} + return all(dim -> I[dim] ∈ axes(a, dim), 1:ndims(a)) end # A set of indices into the storage of the sparse array. diff --git a/NDTensors/src/lib/SparseArrayInterface/test/test_abstractsparsearray.jl b/NDTensors/src/lib/SparseArrayInterface/test/test_abstractsparsearray.jl index 1be4d5d8dc..78bab9aecf 100644 --- a/NDTensors/src/lib/SparseArrayInterface/test/test_abstractsparsearray.jl +++ b/NDTensors/src/lib/SparseArrayInterface/test/test_abstractsparsearray.jl @@ -21,6 +21,14 @@ using Test: @test, @testset for I in eachindex(a) @test iszero(a) end + for I in CartesianIndices(a) + @test isassigned(a, Tuple(I)...) + @test isassigned(a, I) + end + @test !isassigned(a, 0, 1) + @test !isassigned(a, CartesianIndex(0, 1)) + @test !isassigned(a, 1, 4) + @test !isassigned(a, CartesianIndex(1, 4)) a = SparseArray{elt}(2, 3) fill!(a, 0) @@ -60,6 +68,14 @@ using Test: @test, @testset @test iszero(a[I]) end end + for I in CartesianIndices(a) + @test isassigned(a, Tuple(I)...) + @test isassigned(a, I) + end + @test !isassigned(a, 0, 1) + @test !isassigned(a, CartesianIndex(0, 1)) + @test !isassigned(a, 1, 4) + @test !isassigned(a, CartesianIndex(1, 4)) a = SparseArray{elt}(2, 3) a[1, 2] = 12 From de78e9fa45af3fae8470d85de2938be77ba09540 Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Tue, 11 Jun 2024 10:46:44 -0400 Subject: [PATCH 8/8] [GradedAxes] [BlockSparseArrays] Fix ambiguity error when slicing GradedUnitRange with BlockSlice (#1491) * [GradedAxes] [BlockSparseArrays] Fix ambiguity issue when slicing GradedUnitRange with BlockSlice * [NDTensors] Bump to v0.3.25 --- NDTensors/Project.toml | 2 +- .../test/runtests.jl | 69 +++++++++++-------- .../BlockArraysExtensions.jl | 22 ++++++ .../wrappedabstractblocksparsearray.jl | 5 +- .../lib/GradedAxes/src/blockedunitrange.jl | 7 ++ .../src/lib/GradedAxes/src/gradedunitrange.jl | 23 +++++++ .../src/lib/GradedAxes/test/test_basics.jl | 15 ++++ .../LabelledNumbers/src/LabelledNumbers.jl | 1 + .../src/LabelledNumbersBlockArraysExt.jl | 22 ++++++ .../src/lib/LabelledNumbers/test/Project.toml | 1 + .../src/lib/LabelledNumbers/test/runtests.jl | 15 +++- 11 files changed, 150 insertions(+), 32 deletions(-) create mode 100644 NDTensors/src/lib/LabelledNumbers/src/LabelledNumbersBlockArraysExt.jl diff --git a/NDTensors/Project.toml b/NDTensors/Project.toml index 407445f8d7..48e44340de 100644 --- a/NDTensors/Project.toml +++ b/NDTensors/Project.toml @@ -1,7 +1,7 @@ name = "NDTensors" uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" authors = ["Matthew Fishman "] -version = "0.3.24" +version = "0.3.25" [deps] Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" diff --git a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl b/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl index fe177167dd..16458fde18 100644 --- a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl +++ b/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl @@ -95,37 +95,48 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) end @testset "dual axes" begin r = gradedrange([U1(0) => 2, U1(1) => 2]) - a = BlockSparseArray{elt}(dual(r), r) - @views for b in [Block(1, 1), Block(2, 2)] - a[b] = randn(elt, size(a[b])) - end - # TODO: Define and use `isdual` here. - @test axes(a, 1) isa UnitRangeDual - @test axes(a, 2) isa GradedUnitRange - @test !(axes(a, 2) isa UnitRangeDual) - a_dense = Array(a) - @test eachindex(a) == CartesianIndices(size(a)) - for I in eachindex(a) - @test a[I] == a_dense[I] - end - @test axes(a') == dual.(reverse(axes(a))) - # TODO: Define and use `isdual` here. - @test axes(a', 1) isa UnitRangeDual - @test axes(a', 2) isa GradedUnitRange - @test !(axes(a', 2) isa UnitRangeDual) - @test isnothing(show(devnull, MIME("text/plain"), a)) - - # Check preserving dual in tensor algebra. - for b in (a + a, 2 * a, 3 * a - a) - @test Array(b) ≈ 2 * Array(a) + for ax in ((r, r), (dual(r), r), (r, dual(r)), (dual(r), dual(r))) + a = BlockSparseArray{elt}(ax...) + @views for b in [Block(1, 1), Block(2, 2)] + a[b] = randn(elt, size(a[b])) + end # TODO: Define and use `isdual` here. - @test axes(b, 1) isa UnitRangeDual - @test axes(b, 2) isa GradedUnitRange - @test !(axes(b, 2) isa UnitRangeDual) - end + for dim in 1:ndims(a) + @test typeof(ax[dim]) === typeof(axes(a, dim)) + end + @test @view(a[Block(1, 1)])[1, 1] == a[1, 1] + @test @view(a[Block(1, 1)])[2, 1] == a[2, 1] + @test @view(a[Block(1, 1)])[1, 2] == a[1, 2] + @test @view(a[Block(1, 1)])[2, 2] == a[2, 2] + @test @view(a[Block(2, 2)])[1, 1] == a[3, 3] + @test @view(a[Block(2, 2)])[2, 1] == a[4, 3] + @test @view(a[Block(2, 2)])[1, 2] == a[3, 4] + @test @view(a[Block(2, 2)])[2, 2] == a[4, 4] + @test @view(a[Block(1, 1)])[1:2, 1:2] == a[1:2, 1:2] + @test @view(a[Block(2, 2)])[1:2, 1:2] == a[3:4, 3:4] + a_dense = Array(a) + @test eachindex(a) == CartesianIndices(size(a)) + for I in eachindex(a) + @test a[I] == a_dense[I] + end + @test axes(a') == dual.(reverse(axes(a))) + # TODO: Define and use `isdual` here. + @test typeof(axes(a', 1)) === typeof(dual(axes(a, 2))) + @test typeof(axes(a', 2)) === typeof(dual(axes(a, 1))) + @test isnothing(show(devnull, MIME("text/plain"), a)) - @test isnothing(show(devnull, MIME("text/plain"), @view(a[Block(1, 1)]))) - @test @view(a[Block(1, 1)]) == a[Block(1, 1)] + # Check preserving dual in tensor algebra. + for b in (a + a, 2 * a, 3 * a - a) + @test Array(b) ≈ 2 * Array(a) + # TODO: Define and use `isdual` here. + for dim in 1:ndims(a) + @test typeof(axes(b, dim)) === typeof(axes(b, dim)) + end + end + + @test isnothing(show(devnull, MIME("text/plain"), @view(a[Block(1, 1)]))) + @test @view(a[Block(1, 1)]) == a[Block(1, 1)] + end # Test case when all axes are dual. for r in (gradedrange([U1(0) => 2, U1(1) => 2]), blockedrange([2, 2])) diff --git a/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl b/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl index 57f2eb2846..2f24823317 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl @@ -19,6 +19,21 @@ using Dictionaries: Dictionary, Indices using ..GradedAxes: blockedunitrange_getindices using ..SparseArrayInterface: stored_indices +# GenericBlockSlice works around an issue that the indices of BlockSlice +# are restricted to Int element type. +# TODO: Raise an issue/make a pull request in BlockArrays.jl. +struct GenericBlockSlice{B,T<:Integer,I<:AbstractUnitRange{T}} <: AbstractUnitRange{T} + block::B + indices::I +end +BlockArrays.Block(bs::GenericBlockSlice{<:Block}) = bs.block +for f in (:axes, :unsafe_indices, :axes1, :first, :last, :size, :length, :unsafe_length) + @eval Base.$f(S::GenericBlockSlice) = Base.$f(S.indices) +end +Base.getindex(S::GenericBlockSlice, i::Integer) = getindex(S.indices, i) + +# BlockIndices works around an issue that the indices of BlockSlice +# are restricted to AbstractUnitRange{Int}. struct BlockIndices{B,T<:Integer,I<:AbstractVector{T}} <: AbstractVector{T} blocks::B indices::I @@ -175,6 +190,13 @@ function blockrange(axis::AbstractUnitRange, r::BlockSlice) return blockrange(axis, r.block) end +# GenericBlockSlice works around an issue that the indices of BlockSlice +# are restricted to Int element type. +# TODO: Raise an issue/make a pull request in BlockArrays.jl. +function blockrange(axis::AbstractUnitRange, r::GenericBlockSlice) + return blockrange(axis, r.block) +end + function blockrange(a::AbstractUnitRange, r::BlockIndices) return blockrange(a, r.blocks) end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl index 5499c957eb..d73665f2bf 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl @@ -78,7 +78,10 @@ end # TODO: Move to blocksparsearrayinterface. function blocksparse_unblock(a, inds, I::Tuple{AbstractUnitRange{<:Integer},Vararg{Any}}) bs = blockrange(inds[1], I[1]) - return BlockSlice(bs, blockedunitrange_getindices(inds[1], I[1])) + # GenericBlockSlice works around an issue that the indices of BlockSlice + # are restricted to Int element type. + # TODO: Raise an issue/make a pull request in BlockArrays.jl. + return GenericBlockSlice(bs, blockedunitrange_getindices(inds[1], I[1])) end function BlockArrays.unblock(a, inds, I::Tuple{AbstractVector{<:Block{1}},Vararg{Any}}) diff --git a/NDTensors/src/lib/GradedAxes/src/blockedunitrange.jl b/NDTensors/src/lib/GradedAxes/src/blockedunitrange.jl index 1a2b4c2a19..2829043b7d 100644 --- a/NDTensors/src/lib/GradedAxes/src/blockedunitrange.jl +++ b/NDTensors/src/lib/GradedAxes/src/blockedunitrange.jl @@ -3,6 +3,7 @@ using BlockArrays: Block, BlockIndexRange, BlockRange, + BlockSlice, BlockedUnitRange, block, blockindex, @@ -73,6 +74,12 @@ function blockedunitrange_getindices(a::BlockedUnitRange, indices::BlockIndexRan return a[block(indices)][only(indices.indices)] end +# TODO: Move this to a `BlockArraysExtensions` library. +function blockedunitrange_getindices(a::BlockedUnitRange, indices::BlockSlice) + # TODO: Is this a good definition? It ignores `indices.indices`. + return a[indices.block] +end + # TODO: Move this to a `BlockArraysExtensions` library. function blockedunitrange_getindices(a::BlockedUnitRange, indices::Vector{<:Integer}) return map(index -> a[index], indices) diff --git a/NDTensors/src/lib/GradedAxes/src/gradedunitrange.jl b/NDTensors/src/lib/GradedAxes/src/gradedunitrange.jl index 851ef284f9..1cb7d5abe0 100644 --- a/NDTensors/src/lib/GradedAxes/src/gradedunitrange.jl +++ b/NDTensors/src/lib/GradedAxes/src/gradedunitrange.jl @@ -4,6 +4,7 @@ using BlockArrays: BlockedUnitRange, BlockIndex, BlockRange, + BlockSlice, BlockVector, blockedrange, BlockIndexRange, @@ -165,6 +166,15 @@ function blockedunitrange_getindices( return labelled_blocks(a_indices, blocklabels(ga, indices)) end +# Fixes ambiguity error with: +# ```julia +# blockedunitrange_getindices(::GradedUnitRange, ::AbstractUnitRange{<:Integer}) +# ``` +# TODO: Try removing once GradedAxes is rewritten for BlockArrays v1. +function blockedunitrange_getindices(a::GradedUnitRange, indices::BlockSlice) + return a[indices.block] +end + function blockedunitrange_getindices(ga::GradedUnitRange, indices::BlockRange) return labelled_blocks(unlabel_blocks(ga)[indices], blocklabels(ga, indices)) end @@ -200,6 +210,19 @@ function Base.getindex(a::GradedUnitRange, indices::BlockIndex{1}) return blockedunitrange_getindices(a, indices) end +# Fixes ambiguity issues with: +# ```julia +# getindex(::BlockedUnitRange, ::BlockSlice) +# getindex(::GradedUnitRange, ::AbstractUnitRange{<:Integer}) +# getindex(::GradedUnitRange, ::Any) +# getindex(::AbstractUnitRange, ::AbstractUnitRange{<:Integer}) +# ``` +# TODO: Maybe not needed once GradedAxes is rewritten +# for BlockArrays v1. +function Base.getindex(a::GradedUnitRange, indices::BlockSlice) + return blockedunitrange_getindices(a, indices) +end + function Base.getindex(a::GradedUnitRange, indices) return blockedunitrange_getindices(a, indices) end diff --git a/NDTensors/src/lib/GradedAxes/test/test_basics.jl b/NDTensors/src/lib/GradedAxes/test/test_basics.jl index c230c755f4..dcf9d078c4 100644 --- a/NDTensors/src/lib/GradedAxes/test/test_basics.jl +++ b/NDTensors/src/lib/GradedAxes/test/test_basics.jl @@ -1,6 +1,7 @@ @eval module $(gensym()) using BlockArrays: Block, + BlockSlice, BlockVector, blockedrange, blockfirsts, @@ -87,6 +88,20 @@ using Test: @test, @test_broken, @testset @test blocklengths(ax) == blocklengths(a) @test blocklabels(ax) == blocklabels(a) + # Regression test for ambiguity error. + x = gradedrange(["x" => 2, "y" => 3]) + a = x[BlockSlice(Block(1), Base.OneTo(2))] + @test length(a) == 2 + @test a == 1:2 + @test blocklength(a) == 1 + # TODO: Should this be a `GradedUnitRange`, + # or maybe just a `LabelledUnitRange`? + @test a isa LabelledUnitRange + @test length(a[Block(1)]) == 2 + @test label(a) == "x" + @test a[Block(1)] == 1:2 + @test label(a[Block(1)]) == "x" + x = gradedrange(["x" => 2, "y" => 3]) a = x[3:4] @test a isa GradedUnitRange diff --git a/NDTensors/src/lib/LabelledNumbers/src/LabelledNumbers.jl b/NDTensors/src/lib/LabelledNumbers/src/LabelledNumbers.jl index 5578d0d1e3..7f54a02f4f 100644 --- a/NDTensors/src/lib/LabelledNumbers/src/LabelledNumbers.jl +++ b/NDTensors/src/lib/LabelledNumbers/src/LabelledNumbers.jl @@ -4,4 +4,5 @@ include("labellednumber.jl") include("labelledinteger.jl") include("labelledarray.jl") include("labelledunitrange.jl") +include("LabelledNumbersBlockArraysExt.jl") end diff --git a/NDTensors/src/lib/LabelledNumbers/src/LabelledNumbersBlockArraysExt.jl b/NDTensors/src/lib/LabelledNumbers/src/LabelledNumbersBlockArraysExt.jl new file mode 100644 index 0000000000..38c8b4e555 --- /dev/null +++ b/NDTensors/src/lib/LabelledNumbers/src/LabelledNumbersBlockArraysExt.jl @@ -0,0 +1,22 @@ +using BlockArrays: BlockArrays, Block, blockaxes, blockfirsts, blocklasts + +# Fixes ambiguity error with: +# ```julia +# getindex(::LabelledUnitRange, ::Any...) +# getindex(::AbstractArray{<:Any,N}, ::Block{N}) where {N} +# getindex(::AbstractArray, ::Block{1}, ::Any...) +# ``` +function Base.getindex(a::LabelledUnitRange, index::Block{1}) + @boundscheck index == Block(1) || throw(BlockBoundsError(a, index)) + return a +end + +function BlockArrays.blockaxes(a::LabelledUnitRange) + return blockaxes(unlabel(a)) +end +function BlockArrays.blockfirsts(a::LabelledUnitRange) + return blockfirsts(unlabel(a)) +end +function BlockArrays.blocklasts(a::LabelledUnitRange) + return blocklasts(unlabel(a)) +end diff --git a/NDTensors/src/lib/LabelledNumbers/test/Project.toml b/NDTensors/src/lib/LabelledNumbers/test/Project.toml index ef491a529c..d1bf575ce0 100644 --- a/NDTensors/src/lib/LabelledNumbers/test/Project.toml +++ b/NDTensors/src/lib/LabelledNumbers/test/Project.toml @@ -1,3 +1,4 @@ [deps] +BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/NDTensors/src/lib/LabelledNumbers/test/runtests.jl b/NDTensors/src/lib/LabelledNumbers/test/runtests.jl index 4457e4fcb8..9ee43c6746 100644 --- a/NDTensors/src/lib/LabelledNumbers/test/runtests.jl +++ b/NDTensors/src/lib/LabelledNumbers/test/runtests.jl @@ -1,6 +1,7 @@ @eval module $(gensym()) using LinearAlgebra: norm -using NDTensors.LabelledNumbers: LabelledInteger, islabelled, label, labelled, unlabel +using NDTensors.LabelledNumbers: + LabelledInteger, LabelledUnitRange, islabelled, label, labelled, unlabel using Test: @test, @testset @testset "LabelledNumbers" begin @testset "Labelled number ($n)" for n in (2, 2.0) @@ -112,4 +113,16 @@ using Test: @test, @testset end end end + +using BlockArrays: Block, blockaxes, blocklength, blocklengths +@testset "LabelledNumbersBlockArraysExt" begin + x = labelled(1:2, "x") + @test blockaxes(x) == (Block.(1:1),) + @test blocklength(x) == 1 + @test blocklengths(x) == [2] + a = x[Block(1)] + @test a == 1:2 + @test a isa LabelledUnitRange + @test label(a) == "x" +end end