Skip to content

Commit

Permalink
improve performance of Monte-Carlo sampling
Browse files Browse the repository at this point in the history
  • Loading branch information
EigenSolver committed Jul 10, 2024
1 parent 73e876b commit 46faae6
Show file tree
Hide file tree
Showing 4 changed files with 25 additions and 22 deletions.
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "SpinShuttling"
uuid = "eff49e45-ae6d-44ae-9170-2124a87971c9"
authors = ["Yuning Zhang"]
version = "0.2.2"
version = "0.2.3"

[deps]
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Expand Down
12 changes: 8 additions & 4 deletions src/SpinShuttling.jl
Original file line number Diff line number Diff line change
Expand Up @@ -260,10 +260,14 @@ Sampling an observable that defines on a specific spin shuttling model
- `objective::Function`: The objective function `objective(mode::ShuttlingModel; randseq)``
- `M::Int`: Monte-Carlo sampling size
"""
function sampling(model::ShuttlingModel, objective::Function, M::Int; isarray::Bool=false)
randpool = randn(model.n * model.N, M)
samplingfunction = i::Int -> objective(model, randpool[:, i]; isarray=isarray)::Union{Number,VecOrMat{<:Number}}
return sampling(samplingfunction, M)
function sampling(model::ShuttlingModel, objective::Function, M::Int; isarray::Bool=false, isparallel::Bool=true)
N=model.n * model.N
samplingfunction()::Union{Number,VecOrMat{<:Number}} = objective(model, randn(N); isarray=isarray)
if isparallel
return parallelsampling(samplingfunction, M)
else
return serialsampling(samplingfunction, M)
end
end


Expand Down
19 changes: 8 additions & 11 deletions src/sampling.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""
Monte-Carlo sampling of any objective function.
Monte-Carlo sampling of any objective function without storage in memory.
The function must return Tuple{Number,Number} or Tuple{VecOrMat{<:Number},VecOrMat{<:Number}}
# Arguments
Expand All @@ -17,15 +17,12 @@ sampling(f, 1000)
# Reference
https://en.wikipedia.org/wiki/Standard_deviation#Rapid_calculation_methods
"""
function sampling(samplingfunction::Function, M::Int)::Union{Tuple{Number,Number},Tuple{VecOrMat{<:Number},VecOrMat{<:Number}}}
if nthreads() > 1
return parallelsampling(samplingfunction, M)
end
A = samplingfunction(1)
function serialsampling(samplingfunction::Function, M::Int)::Union{Tuple{Number,Number},Tuple{VecOrMat{<:Number},VecOrMat{<:Number}}}
A = samplingfunction()
A isa Array ? A .= 0 : A = 0
Q = copy(A)
for k in 1:M
x = samplingfunction(k)::Union{Number,VecOrMat{<:Number}}
x = samplingfunction()::Union{Number,VecOrMat{<:Number}}
Q = Q + (k - 1) / k * abs.(x - A) .^ 2
A = A + (x - A) / k
end
Expand All @@ -37,27 +34,27 @@ Multi-threaded Monte-Carlo sampling of any objective function.
The function must return Tuple{Number,Number} or Tuple{VecOrMat{<:Number},VecOrMat{<:Number}}
"""
function parallelsampling(samplingfunction::Function, M::Int)::Union{Tuple{Number,Number},Tuple{VecOrMat{<:Number},VecOrMat{<:Number}}}
obj = samplingfunction(1)
obj = samplingfunction()
if obj isa Number
cache = zeros(typeof(obj), M)
@threads for i in 1:M
cache[i] = samplingfunction(i)
cache[i] = samplingfunction()
end
A = mean(cache)
Q = var(cache)
return A, Q
elseif obj isa Vector
cache = zeros(typeof(obj).parameters[1], length(obj), M)
@threads for i in 1:M
cache[:, i] .= samplingfunction(i)
cache[:, i] .= samplingfunction()
end
A = dropdims(mean(cache, dims=2),dims=2)
Q = dropdims(var(cache, dims=2),dims=2)
return A, Q
elseif obj isa Matrix
cache = zeros(typeof(obj).parameters[1], size(obj)..., M)
@threads for i in 1:M
cache[:, :, i] .= samplingfunction(i)
cache[:, :, i] .= samplingfunction()
end
A = dropdims(mean(cache, dims=3),dims=3)
Q = dropdims(var(cache, dims=3),dims=3)
Expand Down
14 changes: 8 additions & 6 deletions src/stochastics.jl
Original file line number Diff line number Diff line change
Expand Up @@ -36,20 +36,21 @@ a Gaussian random process traced from a Gaussian random field.
- `μ::Vector{<:Real}`: mean of the process
- `P::Vector{<:Point}`: time-position array
- `Σ::Symmetric{<:Real}`: covariance matrices
- `C::Cholesky`: Cholesky decomposition of the covariance matrices
- `L::Matrix{<:Real}`: lower triangle matrix of Cholesky decomposition
"""
struct RandomFunction
μ::Vector{<:Real}
P::Vector{<:Point} # sample trace
Σ::Symmetric{<:Real} # covariance matrices
C::Cholesky # decomposition
L::Matrix{<:Real} # Lower triangle matrix of Cholesky decomposition
function RandomFunction(P::Vector{<:Point}, process::GaussianRandomField)
μ = process.μ isa Function ? process.μ(P) : repeat([process.μ], length(P))
Σ = covariancematrix(P, process)
return new(μ, P, Σ, cholesky(Σ))
L = collect(cholesky(Σ).L)
return new(μ, P, Σ, L)
end

function RandomFunction::Vector{<:Real}, P::Vector{<:Point}, Σ::Symmetric{<:Real}, C::Cholesky)
function RandomFunction::Vector{<:Real}, P::Vector{<:Point}, Σ::Symmetric{<:Real}, L::Matrix{<:Real})
new(μ, P, Σ, C)
end
end
Expand Down Expand Up @@ -109,7 +110,8 @@ function CompositeRandomFunction(R::RandomFunction, c::Vector{Int})::RandomFunct
μ = sum(c .* meanpartition(R, n))
Σ = Symmetric(sum((c * c') .* covariancepartition(R, n)))
t = [(p[1],) for p in R.P[1:(N÷n)]]
return RandomFunction(μ, t, Σ, cholesky(Σ))
L = collect(cholesky(Σ).L)
return RandomFunction(μ, t, Σ, L)
end


Expand All @@ -124,7 +126,7 @@ Generate a random time series from a Gaussian random field.
`R(randseq)` generates a random time series from a Gaussian random field `R` with a given random sequence `randseq`.
"""
function (R::RandomFunction)(randseq::Vector{<:Real})
return R.μ .+ R.C.L * randseq
return R.μ .+ R.L * randseq
end

(R::RandomFunction)() = R(randn(size(R.Σ, 1)))
Expand Down

0 comments on commit 46faae6

Please sign in to comment.