Skip to content
This repository has been archived by the owner on Dec 7, 2021. It is now read-only.

Commit

Permalink
Rename Empty -> EmptyStorage and add more operations for `EmptyStor…
Browse files Browse the repository at this point in the history
…age` (#81)

* Rename `Empty -> EmptyStorage` and add more operations for `EmptyStorage` (like addition, contraction, etc.)

* Make an internal NDTensors.similar function that is more extensive than Base.similar (to avoid type piracy)
  • Loading branch information
mtfishman committed May 4, 2021
1 parent 233780b commit 0363fc4
Show file tree
Hide file tree
Showing 16 changed files with 431 additions and 125 deletions.
1 change: 1 addition & 0 deletions src/NDTensors.jl
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ include("imports.jl")
# DenseTensor and DiagTensor
#
include("aliasstyle.jl")
include("similar.jl")
include("tupletools.jl")
include("dims.jl")
include("tensorstorage.jl")
Expand Down
2 changes: 1 addition & 1 deletion src/blocksparse/blockdims.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ const BlockDims{N} = NTuple{N,BlockDim}

Base.ndims(ds::Type{<:BlockDims{N}}) where {N} = N

similar_type(::Type{<:BlockDims}, ::Type{Val{N}}) where {N} = BlockDims{N}
similartype(::Type{<:BlockDims}, ::Type{Val{N}}) where {N} = BlockDims{N}

Base.copy(ds::BlockDims) = ds

Expand Down
6 changes: 6 additions & 0 deletions src/blocksparse/blocksparse.jl
Original file line number Diff line number Diff line change
Expand Up @@ -51,11 +51,17 @@ end
#end
#BlockSparse{ElT}() where {ElT} = BlockSparse(ElT[],BlockOffsets())

datatype(::Type{<:BlockSparse{<:Any,DataT}}) where {DataT} = DataT

similar(D::BlockSparse) = setdata(D, similar(data(D)))

# TODO: test this function
similar(D::BlockSparse, ::Type{ElT}) where {ElT} = setdata(D, similar(data(D), ElT))

function similartype(::Type{StoreT}, ::Type{ElT}) where {StoreT<:BlockSparse,ElT}
return BlockSparse{ElT,similartype(datatype(StoreT), ElT),ndims(StoreT)}
end

# TODO: check the offsets are the same?
function copyto!(D1::BlockSparse, D2::BlockSparse)
blockoffsets(D1) blockoffsets(D1) &&
Expand Down
31 changes: 18 additions & 13 deletions src/blocksparse/blocksparsetensor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,14 @@ nonzeros(T::Tensor) = data(T)
# Special version for BlockSparseTensor
# Generic version doesn't work since BlockSparse us parametrized by
# the Tensor order
function similar_type(
function similartype(
::Type{<:Tensor{ElT,NT,<:BlockSparse{ElT,VecT},<:Any}}, ::Type{IndsR}
) where {NT,ElT,VecT,IndsR}
NR = length(IndsR)
return Tensor{ElT,NR,BlockSparse{ElT,VecT,NR},IndsR}
end

function similar_type(
function similartype(
::Type{<:Tensor{ElT,NT,<:BlockSparse{ElT,VecT},<:Any}}, ::Type{IndsR}
) where {NT,ElT,VecT,IndsR<:NTuple{NR}} where {NR}
return Tensor{ElT,NR,BlockSparse{ElT,VecT,NR},IndsR}
Expand Down Expand Up @@ -147,6 +147,11 @@ function similar(
return BlockSparseTensor(ElT, undef, blockoffsets, inds)
end

# This version of similar creates a tensor with no blocks
function similar(::Type{TensorT}, inds::Tuple) where {TensorT<:BlockSparseTensor}
return similar(TensorT, BlockOffsets{ndims(TensorT)}(), inds)
end

function zeros(
::BlockSparseTensor{ElT,N}, blockoffsets::BlockOffsets{N}, inds
) where {ElT,N}
Expand Down Expand Up @@ -303,8 +308,7 @@ function +(T1::BlockSparseTensor{<:Number,N}, T2::BlockSparseTensor{<:Number,N})
inds(T1) inds(T2) &&
error("Cannot add block sparse tensors with different block structure")
R = copy(T1)
R = permutedims!!(R, T2, ntuple(identity, Val(N)), +)
return R
return permutedims!!(R, T2, ntuple(identity, Val(N)), +)
end

function permutedims(T::BlockSparseTensor{<:Number,N}, perm::NTuple{N,Int}) where {N}
Expand Down Expand Up @@ -627,14 +631,15 @@ function permutedims!!(
perm::NTuple{N,Int},
f::Function=(r, t) -> t,
) where {ElR,ElT,N}
RR = convert(promote_type(typeof(R), typeof(T)), R)
#@timeit_debug timer "block sparse permutedims!!" begin
bofsRR = blockoffsets(R)
bofsRR = blockoffsets(RR)
bofsT = blockoffsets(T)

# Determine if bofsRR has been copied
copy_bofsRR = false

new_nnz = nnz(R)
new_nnz = nnz(RR)
for (blockT, offsetT) in pairs(bofsT)
blockTperm = permute(blockT, perm)
if !isassigned(bofsRR, blockTperm)
Expand All @@ -653,13 +658,13 @@ function permutedims!!(
## # and offsets
## copyto!(data(RR), data(R))

if new_nnz > nnz(R)
dataRR = append!(data(R), zeros(new_nnz - nnz(R)))
R = Tensor(BlockSparse(dataRR, bofsRR), inds(R))
if new_nnz > nnz(RR)
dataRR = append!(data(RR), zeros(new_nnz - nnz(RR)))
RR = Tensor(BlockSparse(dataRR, bofsRR), inds(RR))
end

permutedims!(R, T, perm, f)
return R
permutedims!(RR, T, perm, f)
return RR
#end
end

Expand Down Expand Up @@ -1025,7 +1030,7 @@ function permute_combine(inds::IndsT, pos::Vararg{IntOrIntTuple,N}) where {IndsT
end
newinds[i] = newind_i
end
IndsR = similar_type(IndsT, Val{N})
IndsR = similartype(IndsT, Val{N})
indsR = IndsR(Tuple(newinds))
return indsR
end
Expand All @@ -1049,7 +1054,7 @@ function combine(inds::IndsT, com::Vararg{IntOrIntTuple,N}) where {IndsT,N}
end
newinds[i] = newind_i
end
IndsR = similar_type(IndsT, Val{N})
IndsR = similartype(IndsT, Val{N})
indsR = IndsR(Tuple(newinds))
return indsR
end
Expand Down
18 changes: 10 additions & 8 deletions src/blocksparse/diagblocksparse.jl
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ end
# ElT2<:Number} = promote_type(Vector{ElT2},ElT1)

# TODO: how do we make this work more generally for T2<:AbstractVector{S2}?
# Make a similar_type(AbstractVector{S2},T1) -> AbstractVector{T1} function?
# Make a similartype(AbstractVector{S2},T1) -> AbstractVector{T1} function?
function promote_rule(
::Type{<:UniformDiagBlockSparse{ElT1,VecT1}},
::Type{<:NonuniformDiagBlockSparse{ElT2,Vector{ElT2}}},
Expand Down Expand Up @@ -255,7 +255,7 @@ end
function contraction_output_type(
TensorT1::Type{<:DiagBlockSparseTensor}, TensorT2::Type{<:BlockSparseTensor}, IndsR::Type
)
return similar_type(promote_type(TensorT1, TensorT2), IndsR)
return similartype(promote_type(TensorT1, TensorT2), IndsR)
end

function contraction_output_type(
Expand All @@ -278,9 +278,9 @@ function contraction_output_type(
if ValLength(IndsR) === Val{N1 + N2}
# Turn into is_outer(inds1,inds2,indsR) function?
# How does type inference work with arithmatic of compile time values?
return similar_type(dense(promote_type(TensorT1, TensorT2)), IndsR)
return similartype(dense(promote_type(TensorT1, TensorT2)), IndsR)
end
return similar_type(promote_type(TensorT1, TensorT2), IndsR)
return similartype(promote_type(TensorT1, TensorT2), IndsR)
end

# The output must be initialized as zero since it is sparse, cannot be undefined
Expand Down Expand Up @@ -461,8 +461,9 @@ function permutedims!!(
perm::NTuple{N,Int},
f::Function=(r, t) -> t,
) where {N}
permutedims!(R, T, perm, f)
return R
RR = convert(promote_type(typeof(R), typeof(T)), R)
permutedims!(RR, T, perm, f)
return RR
end

function permutedims!!(
Expand All @@ -471,8 +472,9 @@ function permutedims!!(
perm::NTuple{N,Int},
f::Function=(r, t) -> t,
) where {ElR,ElT,N}
R = tensor(DiagBlockSparse(f(getdiagindex(R, 1), getdiagindex(T, 1))), inds(R))
return R
RR = convert(promote_type(typeof(R), typeof(T)), R)
RR = tensor(DiagBlockSparse(f(getdiagindex(RR, 1), getdiagindex(T, 1))), inds(RR))
return RR
end

function permutedims!(
Expand Down
12 changes: 9 additions & 3 deletions src/blocksparse/linearalgebra.jl
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,13 @@ function LinearAlgebra.svd(T::BlockSparseMatrix{ElT}; kwargs...) where {ElT}
Us[n] = Ub
Ss[n] = Sb
Vs[n] = Vb
append!(d, vector(diag(Sb)))
# Previously this was:
# vector(diag(Sb))
# But it broke, did `diag(::Tensor)` change types?
# TODO: call this a function `diagonal`, i.e.:
# https://github.com/JuliaLang/julia/issues/30250
# or make `diag(::Tensor)` return a view by default.
append!(d, data(Sb))
end

# Square the singular values to get
Expand Down Expand Up @@ -201,14 +207,14 @@ function LinearAlgebra.eigen(
Db, Vb = eigen(blockT)
Ds = [Db]
Vs = [Vb]
append!(d, abs.(vector(diag(Db))))
append!(d, abs.(data(Db)))
for (n, b) in enumerate(eachnzblock(T))
n == 1 && continue
blockT = blockview(T, b)
Db, Vb = eigen(blockT)
push!(Ds, Db)
push!(Vs, Vb)
append!(d, abs.(vector(diag(Db))))
append!(d, abs.(data(Db)))
end

dropblocks = Int[]
Expand Down
19 changes: 5 additions & 14 deletions src/combiner.jl
Original file line number Diff line number Diff line change
Expand Up @@ -54,14 +54,10 @@ function contraction_output(
return contraction_output(T2, T1, indsR)
end

function contract!!(
R::Tensor{<:Number,NR},
labelsR::NTuple{NR},
T1::CombinerTensor{<:Number,N1},
labelsT1::NTuple{N1},
T2::Tensor{<:Number,N2},
labelsT2::NTuple{N2},
) where {NR,N1,N2}
function contract!!(R::Tensor, labelsR, T1::CombinerTensor, labelsT1, T2::Tensor, labelsT2)
NR = ndims(R)
N1 = ndims(T1)
N2 = ndims(T2)
if N1 1
# Empty combiner, acts as multiplying by 1
R = permutedims!!(R, T2, getperm(labelsR, labelsT2))
Expand Down Expand Up @@ -111,12 +107,7 @@ function contract!!(
end

function contract!!(
R::Tensor{<:Number,NR},
labelsR::NTuple{NR},
T1::Tensor{<:Number,N1},
labelsT1::NTuple{N1},
T2::CombinerTensor{<:Number,N2},
labelsT2::NTuple{N2},
R::Tensor, labelsR, T1::Tensor, labelsT1, T2::CombinerTensor, labelsT2
) where {NR,N1,N2}
return contract!!(R, labelsR, T2, labelsT2, T1, labelsT1)
end
Expand Down
64 changes: 28 additions & 36 deletions src/dense.jl
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,17 @@ function similar(
return Dense(similar(Vector{ElT}, length))
end

function similartype(::Type{StoreT}, ::Type{ElT}) where {StoreT<:Dense,ElT}
return Dense{ElT,similartype(datatype(StoreT), ElT)}
end

# TODO: make these more general, move to tensorstorage.jl
datatype(::Type{<:Dense{<:Any,DataT}}) where {DataT} = DataT

function similar(::Type{StorageT}, ::Type{ElT}, length::Int) where {StorageT<:Dense,ElT}
return Dense(similar(datatype(StorageT), ElT, length))
end

similar(D::Dense, ::Type{T}) where {T<:Number} = Dense(similar(data(D), T))

zeros(DenseT::Type{<:Dense{ElT}}, inds) where {ElT} = zeros(DenseT, dim(inds))
Expand Down Expand Up @@ -199,19 +210,6 @@ function zeros(TensorT::Type{<:DenseTensor}, inds::Tuple{})
return _zeros(TensorT, inds)
end

function _similar(TensorT::Type{<:DenseTensor}, inds)
return tensor(similar(storagetype(TensorT), dim(inds)), inds)
end

function similar(TensorT::Type{<:DenseTensor}, inds)
return _similar(TensorT, inds)
end

# To fix method ambiguity with similar(::AbstractArray,::Tuple)
function similar(TensorT::Type{<:DenseTensor}, inds::Dims)
return _similar(TensorT, inds)
end

# To fix method ambiguity with similar(::AbstractArray,::Type)
function similar(T::DenseTensor, ::Type{ElT}) where {ElT}
return tensor(similar(storage(T), ElT), inds(T))
Expand Down Expand Up @@ -355,21 +353,19 @@ end

# Version that may overwrite the result or promote
# and return the result
# TODO: move to tensor.jl?
function permutedims!!(
R::Tensor, T::Tensor, perm::NTuple{N,Int}, f::Function=(r, t) -> t
) where {N}
function permutedims!!(R::DenseTensor, T::DenseTensor, perm::Tuple, f::Function=(r, t) -> t)
RR = convert(promote_type(typeof(R), typeof(T)), R)
#RA = array(R)
#TA = array(T)
RA = ReshapedArray(data(R), dims(R), ())
RA = ReshapedArray(data(RR), dims(RR), ())
TA = ReshapedArray(data(T), dims(T), ())
if !is_trivial_permutation(perm)
@strided RA .= f.(RA, permutedims(TA, perm))
else
# TODO: specialize for specific functions
RA .= f.(RA, TA)
end
return R
return RR
end

# TODO: move to tensor.jl?
Expand Down Expand Up @@ -524,7 +520,7 @@ const ⊗ = outer
function contraction_output_type(
::Type{TensorT1}, ::Type{TensorT2}, ::Type{IndsR}
) where {TensorT1<:Tensor,TensorT2<:Tensor,IndsR}
return similar_type(promote_type(TensorT1, TensorT2), IndsR)
return similartype(promote_type(TensorT1, TensorT2), IndsR)
end

function contraction_output(
Expand Down Expand Up @@ -559,15 +555,11 @@ end

# Move to tensor.jl? Is this generic for all storage types?
function contract!!(
R::Tensor{<:Number,NR},
labelsR::NTuple{NR},
T1::Tensor{<:Number,N1},
labelsT1::NTuple{N1},
T2::Tensor{<:Number,N2},
labelsT2::NTuple{N2},
α::Number=1,
β::Number=0,
) where {NR,N1,N2}
R::Tensor, labelsR, T1::Tensor, labelsT1, T2::Tensor, labelsT2, α::Number=1, β::Number=0
)
NR = ndims(R)
N1 = ndims(T1)
N2 = ndims(T2)
if (N1 0) && (N2 0) && (N1 + N2 == NR)
# Outer product
1 || β 0) && error(
Expand Down Expand Up @@ -983,7 +975,7 @@ function permute_reshape(
newdims[i] = eltype(IndsT)(newdim_i)
end
end
newinds = similar_type(IndsT, Val{N})(Tuple(newdims))
newinds = similartype(IndsT, Val{N})(Tuple(newdims))
return reshape(T, newinds)
end

Expand All @@ -997,11 +989,11 @@ function LinearAlgebra.svd(
u = ind(UM, 2)
v = ind(VM, 2)

Linds = similar_type(IndsT, Val{NL})(ntuple(i -> inds(T)[Lpos[i]], Val(NL)))
Linds = similartype(IndsT, Val{NL})(ntuple(i -> inds(T)[Lpos[i]], Val(NL)))
Uinds = push(Linds, u)

# TODO: do these positions need to be reversed?
Rinds = similar_type(IndsT, Val{NR})(ntuple(i -> inds(T)[Rpos[i]], Val(NR)))
Rinds = similartype(IndsT, Val{NR})(ntuple(i -> inds(T)[Rpos[i]], Val(NR)))
Vinds = push(Rinds, v)

U = reshape(UM, Uinds)
Expand All @@ -1021,10 +1013,10 @@ function LinearAlgebra.qr(
r = ind(RM, 1)
# TODO: simplify this by permuting inds(T) by (Lpos,Rpos)
# then grab Linds,Rinds
Linds = similar_type(IndsT, Val{NL})(ntuple(i -> inds(T)[Lpos[i]], Val(NL)))
Linds = similartype(IndsT, Val{NL})(ntuple(i -> inds(T)[Lpos[i]], Val(NL)))
Qinds = push(Linds, r)
Q = reshape(QM, Qinds)
Rinds = similar_type(IndsT, Val{NR})(ntuple(i -> inds(T)[Rpos[i]], Val(NR)))
Rinds = similartype(IndsT, Val{NR})(ntuple(i -> inds(T)[Rpos[i]], Val(NR)))
Rinds = pushfirst(Rinds, r)
R = reshape(RM, Rinds)
return Q, R
Expand All @@ -1039,8 +1031,8 @@ function polar(
UM, PM = polar(M)

# TODO: turn these into functions
Linds = similar_type(IndsT, Val{NL})(ntuple(i -> inds(T)[Lpos[i]], Val(NL)))
Rinds = similar_type(IndsT, Val{NR})(ntuple(i -> inds(T)[Rpos[i]], Val(NR)))
Linds = similartype(IndsT, Val{NL})(ntuple(i -> inds(T)[Lpos[i]], Val(NL)))
Rinds = similartype(IndsT, Val{NR})(ntuple(i -> inds(T)[Rpos[i]], Val(NR)))

# Use sim to create "similar" indices, in case
# the indices have identifiers. If not this should
Expand Down
Loading

0 comments on commit 0363fc4

Please sign in to comment.