diff --git a/Project.toml b/Project.toml index 60697ac..30312c2 100644 --- a/Project.toml +++ b/Project.toml @@ -24,8 +24,7 @@ SpeciesDistributionToolkit = "72b53823-5c0b-4575-ad0e-8e97227ad13b" NeutralLandscapes = "71847384-8354-4223-ac08-659a5128069f" [extensions] -SDMToolkitExt = ["SpeciesDistributionToolkit"] -NLExt = ["NeutralLandscapes"] +SDTExt = ["SpeciesDistributionToolkit"] [compat] Distributions = "0.25" diff --git a/docs/src/vignettes/entropize.md b/docs/src/vignettes/entropize.md index 95ad631..6734c88 100644 --- a/docs/src/vignettes/entropize.md +++ b/docs/src/vignettes/entropize.md @@ -29,15 +29,6 @@ pixel scale: ```@example 1 U = entropize(measurements) -heatmap(U) -``` - -The values closest to the median of the distribution have the highest entropy, and the values closest to its extrema have an entropy of 0. The entropy matrix is guaranteed to have values on the unit interval. - -We can use `entropize` as part of a pipeline, and overlay the points optimized based on entropy on the measurement map: - -```@example 1 locations = - measurements |> entropize |> seed(BalancedAcceptance(; numpoints = 100)) |> first -heatmap(U) -``` \ No newline at end of file + seed(BalancedAcceptance(; numsites = 100, uncertainty=U)) +``` diff --git a/docs/src/vignettes/overview.md b/docs/src/vignettes/overview.md index 58e6857..8183204 100644 --- a/docs/src/vignettes/overview.md +++ b/docs/src/vignettes/overview.md @@ -31,22 +31,15 @@ less) uncertainty. To start with, we will extract 200 candidate points, *i.e.* ```@example 1 -pack = seed(BalancedAcceptance(; numpoints = 200), U); +candidates = seed(BalancedAcceptance(; numsites = 200)); ``` -The output of a `BONSampler` (whether at the seeding or refinement step) is -always a tuple, storing in the first position a vector of `CartesianIndex` -elements, and in the second position the matrix given as input. We can have a -look at the first five points: +We can have a look at the first five points: ```@example 1 -first(pack)[1:5] +candidates[1:5] ``` -Although returning the input matrix may seem redundant, it actually allows to -chain samplers together to build pipelines that take a matrix as input, and -return a set of places to sample as outputs; an example is given below. - The positions of locations to sample are given as a vector of `CartesianIndex`, which are coordinates in the uncertainty matrix. Once we have generated a candidate proposal, we can further refine it using a `BONRefiner` -- in this @@ -54,8 +47,7 @@ case, `AdaptiveSpatial`, which performs adaptive spatial sampling (maximizing the distribution of entropy while minimizing spatial auto-correlation). ```@example 1 -candidates, uncertainty = pack -locations, _ = refine(candidates, AdaptiveSpatial(; numpoints = 50), uncertainty) +locations = refine(candidates, AdaptiveSpatial(; numsites = 50, uncertainty=U)) locations[1:5] ``` @@ -72,10 +64,8 @@ functions have a curried version that allows chaining them together using pipes ```@example 1 locations = - U |> - seed(BalancedAcceptance(; numpoints = 200)) |> - refine(AdaptiveSpatial(; numpoints = 50)) |> - first + seed(BalancedAcceptance(; numsites = 200)) |> + refine(AdaptiveSpatial(; numsites = 50, uncertainty=U)) ``` This works because `seed` and `refine` have curried versions that can be used @@ -84,5 +74,6 @@ the original uncertainty matrix: ```@example 1 plt = heatmap(U) -#scatter!(plt, [x[1] for x in locations], [x[2] for x in locations], ms=2.5, mc=:white, label="") -``` \ No newline at end of file +scatter!(plt, [x[1] for x in locations], [x[2] for x in locations]) +current_figure() +``` diff --git a/docs/src/vignettes/uniqueness.md b/docs/src/vignettes/uniqueness.md index 41e335e..756f11f 100644 --- a/docs/src/vignettes/uniqueness.md +++ b/docs/src/vignettes/uniqueness.md @@ -32,8 +32,6 @@ Now we'll use the `stack` function to combine our four environmental layers into layers = BiodiversityObservationNetworks.stack([temp,precip,elevation]); ``` -this requires NeutralLandscapes v0.1.2 - ```@example 1 uncert = rand(MidpointDisplacement(0.8), size(temp), mask=temp); heatmap(uncert) @@ -42,15 +40,15 @@ heatmap(uncert) Now we'll get a set of candidate points from a BalancedAcceptance seeder that has no bias toward higher uncertainty values. ```@example 1 -candpts, uncert = uncert |> seed(BalancedAcceptance(numpoints=100, α=0.0)); +candpts = seed(BalancedAcceptance(numsites=100)); ``` Now we'll `refine` our `100` candidate points down to the 30 most environmentally unique. ```@example 1 -finalpts, uncert = refine(candpts, Uniqueness(;numpoints=30, layers=layers), uncert) -#= +finalpts = refine(candpts, Uniqueness(;numsites=30, layers=layers)) heatmap(uncert) -scatter!([p[2] for p in candpts], [p[1] for p in candpts], fa=0.0, msc=:white, label="Candidate Points") -scatter!([p[2] for p in finalpts], [p[1] for p in finalpts], c=:dodgerblue, msc=:white, label="Selected Points")=# -``` \ No newline at end of file +scatter!([p[1] for p in candpts], [p[2] for p in candpts], color=:white) +scatter!([p[1] for p in finalpts], [p[2] for p in finalpts], color=:dodgerblue, msc=:white) +current_figure() +``` diff --git a/ext/SDTExt.jl b/ext/SDTExt.jl new file mode 100644 index 0000000..7e0f19f --- /dev/null +++ b/ext/SDTExt.jl @@ -0,0 +1,23 @@ +module SDTExt + +using BiodiversityObservationNetworks +using SpeciesDistributionToolkit + +@info "Loading BONs.jl support for SimpleSDMLayers.jl ..." + +function stack( + layers::Vector{<:SpeciesDistributionToolkit.SimpleSDMLayers.SimpleSDMLayer}, +) + # assert all layers are the same size if we add this function to BONs.jl + mat = zeros(size(first(layers))..., length(layers)) + for (l, layer) in enumerate(layers) + thismin, thismax = extrema(layer) + mat[:, :, l] .= broadcast( + x -> isnothing(x) ? NaN : (x - thismin) / (thismax - thismin), + layer.grid, + ) + end + return mat +end + +end diff --git a/src/BiodiversityObservationNetworks.jl b/src/BiodiversityObservationNetworks.jl index 257b4f5..cfb0cfe 100644 --- a/src/BiodiversityObservationNetworks.jl +++ b/src/BiodiversityObservationNetworks.jl @@ -37,6 +37,9 @@ export AdaptiveSpatial include("cubesampling.jl") export CubeSampling +include("fractaltriad.jl") +export FractalTriad + include("uniqueness.jl") export Uniqueness @@ -49,16 +52,7 @@ export refine, refine! include("entropize.jl") export entropize, entropize! -include("optimize.jl") -export optimize - include("utils.jl") -export stack, squish, _squish, _norm - -#=using Requires -function __init__() - @require SpeciesDistributionToolkit="72b53823-5c0b-4575-ad0e-8e97227ad13b" include(joinpath("integrations", "simplesdms.jl")) - @require Zygote="e88e6eb3-aa80-5325-afca-941959d7151f" include(joinpath("integrations", "zygote.jl")) -end=# +export stack end diff --git a/src/adaptivespatial.jl b/src/adaptivespatial.jl index 7cce7c6..3482712 100644 --- a/src/adaptivespatial.jl +++ b/src/adaptivespatial.jl @@ -3,40 +3,32 @@ ... -**numpoints**, an Integer (def. 50), specifying the number of points to use. - -**α**, an AbstractFloat (def. 1.0), specifying ... +**numsites**, an Integer (def. 50), specifying the number of points to use. """ -Base.@kwdef mutable struct AdaptiveSpatial{T <: Integer} <: BONRefiner - numpoints::T = 50 - function AdaptiveSpatial(numpoints) - if numpoints < one(numpoints) - throw( - ArgumentError( - "You cannot have an AdaptiveSpatial with fewer than one point", - ), - ) - end - return new{typeof(numpoints)}(numpoints) +Base.@kwdef mutable struct AdaptiveSpatial{T <: Integer, F <: AbstractFloat} <: BONRefiner + numsites::T = 30 + uncertainty::Array{F, 2} = rand(50, 50) + function AdaptiveSpatial(numsites, uncertainty) + as = new{typeof(numsites), typeof(uncertainty[begin])}(numsites, uncertainty) + check_arguments(as) + return as end end -_generate!( - coords::Vector{CartesianIndex}, - pool::Vector{CartesianIndex}, - sampler::AdaptiveSpatial, - uncertainty) = throw(ArgumentError( - "You can only call AdaptiveSpatial with a single layer of type Matrix")) +maxsites(as::AdaptiveSpatial) = prod(size(as.uncertainty)) +function check_arguments(as::AdaptiveSpatial) + check(TooFewSites, as) + check(TooManySites, as) + return nothing +end function _generate!( coords::Vector{CartesianIndex}, pool::Vector{CartesianIndex}, sampler::AdaptiveSpatial, - uncertainty::Array{T,2}, -) where {T <: AbstractFloat} - +) # Distance matrix (inlined) - d = zeros(Float64, Int((sampler.numpoints * (sampler.numpoints - 1)) / 2)) + d = zeros(Float64, Int((sampler.numsites * (sampler.numsites - 1)) / 2)) # Start with the point with maximum entropy imax = last(findmax([uncertainty[i] for i in pool])) @@ -46,7 +38,7 @@ function _generate!( best_score = 0.0 best_s = 1 - for i in 2:(sampler.numpoints) + for i in 2:(sampler.numsites) for (ci, cs) in enumerate(pool) coords[i] = cs # Distance update @@ -65,7 +57,7 @@ function _generate!( end coords[i] = popat!(pool, best_s) end - return (coords, uncertainty) + return coords end function _matérn(d, ρ, ν) @@ -86,3 +78,24 @@ function _D(a1::T, a2::T) where {T <: CartesianIndex{2}} x2, y2 = a2.I return sqrt((x1 - x2)^2.0 + (y1 - y2)^2.0) end + +# ==================================================== +# +# Tests +# +# ===================================================== + +@testitem "AdaptiveSpatial default constructor works" begin + @test typeof(AdaptiveSpatial()) <: AdaptiveSpatial +end + +@testitem "AdaptiveSpatial has right subtypes" begin + @test AdaptiveSpatial <: BONRefiner + @test AdaptiveSpatial <: BONSampler +end + +@testitem "AdaptiveSpatial requires positive number of sites" begin + @test_throws TooFewSites AdaptiveSpatial(numsites = 1) + @test_throws TooFewSites AdaptiveSpatial(numsites = 0) + @test_throws TooFewSites AdaptiveSpatial(numsites = -1) +end diff --git a/src/balancedacceptance.jl b/src/balancedacceptance.jl index b41afe5..3845159 100644 --- a/src/balancedacceptance.jl +++ b/src/balancedacceptance.jl @@ -5,30 +5,34 @@ A `BONSeeder` that uses Balanced-Acceptance Sampling (Van-dem-Bates et al. 2017 https://doi.org/10.1111/2041-210X.13003) """ Base.@kwdef struct BalancedAcceptance{I <: Integer} <: BONSeeder - numpoints::I = 30 - function BalancedAcceptance(numpoints) - bas = new{typeof(numpoints)}(numpoints) + numsites::I = 30 + dims::Tuple{I, I} = (50, 50) + function BalancedAcceptance(numsites, dims) + bas = new{typeof(numsites)}(numsites, dims) check_arguments(bas) return bas end end +maxsites(bas::BalancedAcceptance) = prod(bas.dims) + function check_arguments(bas::BalancedAcceptance) - return check(TooFewSites, bas) + check(TooFewSites, bas) + check(TooManySites, bas) + return nothing end function _generate!( coords::Vector{CartesianIndex}, - ::BalancedAcceptance, - uncertainty::Matrix{T}, -) where {T <: Real} + ba::BalancedAcceptance, +) seed = rand(Int32.(1e0:1e7), 2) - x, y = size(uncertainty) + x, y = ba.dims for (idx, ptct) in enumerate(eachindex(coords)) i, j = haltonvalue(seed[1] + ptct, 2), haltonvalue(seed[2] + ptct, 3) coords[idx] = CartesianIndex(convert.(Int32, [ceil(x * i), ceil(y * j)])...) end - return (coords, uncertainty) + return coords end # ==================================================== @@ -42,39 +46,36 @@ end end @testitem "BalancedAcceptance requires positive number of sites" begin - @test_throws TooFewSites BalancedAcceptance(0) - @test_throws TooFewSites BalancedAcceptance(1) + @test_throws TooFewSites BalancedAcceptance(numsites = 1) + @test_throws TooFewSites BalancedAcceptance(numsites = 0) + @test_throws TooFewSites BalancedAcceptance(numsites = -1) end @testitem "BalancedAcceptance can't be run with too many sites" begin numpts, numcandidates = 26, 25 @test numpts > numcandidates # who watches the watchmen? - bas = BalancedAcceptance(numpts) - dims = Int32.(floor.([sqrt(numcandidates), sqrt(numcandidates)])) - uncert = rand(dims...) - - @test_throws TooManySites seed(bas, uncert) + dims = Int32.(floor.((sqrt(numcandidates), sqrt(numcandidates)))) + @test_throws TooManySites BalancedAcceptance(numpts, dims) end @testitem "BalancedAcceptance can generate points" begin bas = BalancedAcceptance() - sz = (50, 50) - coords = seed(bas, rand(sz...)) |> first + coords = seed(bas) @test typeof(coords) <: Vector{CartesianIndex} - @test length(coords) == bas.numpoints + @test length(coords) == bas.numsites end -@testitem "BalancedAcceptance can generate a custom number of points" begin +@testitem "BalancedAcceptance can generate a custom number of points as positional argument" begin numpts = 77 - bas = BalancedAcceptance(numpts) sz = (50, 50) - coords = seed(bas, rand(sz...)) |> first + bas = BalancedAcceptance(numpts, sz) + coords = seed(bas) @test numpts == length(coords) end @testitem "BalancedAcceptance can take number of points as keyword argument" begin N = 40 - bas = BalancedAcceptance(; numpoints = N) - @test bas.numpoints == N + bas = BalancedAcceptance(; numsites = N) + @test bas.numsites == N end diff --git a/src/cubesampling.jl b/src/cubesampling.jl index 18dfcb1..001b0cb 100644 --- a/src/cubesampling.jl +++ b/src/cubesampling.jl @@ -5,54 +5,59 @@ A `BONRefiner` that uses Cube Sampling (Tillé 2011) ... -**numpoints**, an Integer (def. 50), specifying the number of points to use. +**numsites**, an Integer (def. 50), specifying the number of points to use. **fast**, a Boolean (def. true) indicating whether to use the fast flight algorithm. **x**, a Matrix of auxillary variables for the candidate points, with one row for each variable and one column for each candidate point. -**πₖ**, a Float Vector indicating the probabilities of inclusion for each candidate point; should sum to numpoints value. +**πₖ**, a Float Vector indicating the probabilities of inclusion for each candidate point; should sum to numsites value. """ Base.@kwdef struct CubeSampling{I <: Integer, M <: Matrix, V <: Vector} <: BONRefiner - numpoints::I = 50 + numsites::I = 50 fast::Bool = true x::M = rand(0:4, 3, 50) πₖ::V = zeros(size(x, 2)) - function CubeSampling(numpoints, fast, x, πₖ) - cs = new{typeof(numpoints), typeof(x), typeof(πₖ)}(numpoints, fast, x, πₖ) + function CubeSampling(numsites, fast, x, πₖ) + cs = new{typeof(numsites), typeof(x), typeof(πₖ)}(numsites, fast, x, πₖ) _check_arguments(cs) - return cs end end -function check_arguments(cs::CubeSampling) - check(TooFewSites, cs) - if numpoints > length(πₖ) +numsites(cubesampling::CubeSampling) = cubesampling.numsites +maxsites(cubesampling::CubeSampling) = size(cubesampling.x, 2) + +function check_arguments(cubesampling::CubeSampling) + check(TooFewSites, cubesampling) + check(TooManySites, cubesampling) + + if numsites > length(cubesampling.πₖ) throw( ArgumentError( "You cannot select more points than the number of candidate points.", ), ) end - if length(πₖ) != size(x, 2) + if length(cubesampling.πₖ) != size(cubesampling.x, 2) throw( DimensionMismatch( "The number of inclusion probabilites does not match the dimensions of the auxillary variable matrix.", ), ) end + return end function check_conditions(coords, pool, sampler) πₖ = sampler.πₖ if sum(sampler.πₖ) == 0 @info "Probabilities of inclusion were not provided, so we assume equal probability design." - πₖ = [sampler.numpoints / length(pool) for _ in eachindex(pool)] + πₖ = [sampler.numsites / length(pool) for _ in eachindex(pool)] end - if round(Int, sum(πₖ)) != sampler.numpoints - @warn "The inclusion probabilities sum to $(round(Int, sum(πₖ))), which will be your sample size instead of numpoints." + if round(Int, sum(πₖ)) != sampler.numsites + @warn "The inclusion probabilities sum to $(round(Int, sum(πₖ))), which will be your sample size instead of numsites." end if length(pool) != length(πₖ) throw( @@ -442,7 +447,7 @@ end # ===================================================== @testitem "CubeSampling throws exception with too few points" begin - @test_throws TooFewSites CubeSampling(numpoints = -1) - @test_throws TooFewSites CubeSampling(numpoints = 0) - @test_throws TooFewSites CubeSampling(numpoints = 1) + @test_throws TooFewSites CubeSampling(numsites = -1) + @test_throws TooFewSites CubeSampling(numsites = 0) + @test_throws TooFewSites CubeSampling(numsites = 1) end diff --git a/src/exceptions.jl b/src/exceptions.jl index afabfcc..c012d22 100644 --- a/src/exceptions.jl +++ b/src/exceptions.jl @@ -8,17 +8,22 @@ Base.showerror(io::IO, e::E) where {E <: BONException} = ) function _check_arguments(sampler::S) where {S <: Union{BONSeeder, BONRefiner}} - return sampler.numpoints > 1 || throw(TooFewSites(sampler.numpoints)) -end - -function check(TooFewSites, sampler) - return sampler.numpoints > 1 || throw(TooFewSites(sampler.numpoints)) + sampler.numsites > 1 || throw(TooFewSites()) + return nothing end @kwdef struct TooFewSites <: BONException message = "Number of sites to select must be at least two." end +function check(::Type{TooFewSites}, sampler) + sampler.numsites > 1 || throw(TooFewSites()) + return nothing +end @kwdef struct TooManySites <: BONException message = "Cannot select more sites than there are candidates." end +function check(::Type{TooManySites}, sampler) + sampler.numsites <= maxsites(sampler) || throw(TooManySites()) + return nothing +end diff --git a/src/fractaltriad.jl b/src/fractaltriad.jl index 261feda..c0b3b23 100644 --- a/src/fractaltriad.jl +++ b/src/fractaltriad.jl @@ -1,10 +1,184 @@ -Base.@kwdef struct FractalTriad{IT <: Integer, FT <: AbstractFloat} <: SpatialSampler - numpoints::IT = 50 - padding::FT = 0.1 +""" + FractalTriad + +A `BONSeeder` that generates `FractalTriad` designs +""" +Base.@kwdef struct FractalTriad{I <: Integer, F <: AbstractFloat} <: BONSeeder + numsites::I = 81 + horizontal_padding::F = 0.1 + vertical_padding::F = 0.1 + dims::Tuple{I, I} = (100, 100) + function FractalTriad(numsites, horizontal_padding, vertical_padding, dims) + ft = new{typeof(numsites), typeof(horizontal_padding)}( + numsites, + horizontal_padding, + vertical_padding, + dims, + ) + check_arguments(ft) + return ft + end +end + +maxsites(ft::FractalTriad) = maximum(ft.dims) * 10 # gets numerically unstable for large values because float coords belong to the the same cell in the raster, and arctan breaks +function check_arguments(ft::FractalTriad) + check(TooManySites, ft) + ft.numsites > 9 || throw(TooFewSites("FractalTriad requires more than 9 or more sites")) + ft.numsites % 9 == 0 || + throw(ArgumentError("FractalTriad requires number of sites to be a multiple of 9")) + return +end + +""" + _outer_triangle + +Takes a FractalTriad generator `ft` and returns the outermost triangle based on the size of the raster and the padding on the horizontal and vertical. +""" +function _outer_triangle(ft) + x, y = ft.dims + Δx, Δy = + Int.([floor(0.5 * ft.horizontal_padding * x), floor(0.5 * ft.vertical_padding * y)]) + + return CartesianIndex.([(Δx, Δy), (x ÷ 2, y - Δy), (x - Δx, Δy)]) end -function _generate!(ft::FractalTriad, sdm::M) where {M <: AbstractMatrix} - response = zeros(ft.numpoints, 2) +""" + _hexagon(A, B, C) + +Takes a set of `CartesianIndex`'s that form the vertices of a triangle, and returns the `CartisianIndex`'s for each of the points that form the internal hexagon points + +For input vertices A, B, C, `_hexagon` returns the points on the edges of the triangle below in the order `[d,e,f,g,h,i]` + + B + + e f + + + d g + + A i h C + +-- + +After running `vcat(triangle, hex)`, the resulting indices form the 2-level triad with indices corresponding to points in the below manner: + + 2 + + + 5 6 + + + 4 7 + + 1 9 8 3 + + - +γ = |AB| +χ = |BC| + +θ = ⦤ BAC +α = ⦤ BCA + +TODO: this always assumes |AC| is horizontal. This could be changed later. +""" +function _hexagon(A, B, C) + γ = sqrt((B[1] - A[1])^2 + (B[2] - A[2])^2) # left side length + χ = sqrt((B[1] - C[1])^2 + (B[2] - C[2])^2) # right side length + + θ = atan((B[2] - A[2]) / (B[1] - A[1])) + α = atan((B[2] - C[2]) / (C[1] - B[1])) + + d = (A[1] + (γ * cos(θ) / 3), A[2] + γ * sin(θ) / 3) + e = (A[1] + 2γ * cos(θ) / 3, A[2] + 2γ * sin(θ) / 3) + f = (A[1] + (C[1] - A[1]) - (2χ * cos(α) / 3), A[2] + 2χ * sin(α) / 3) + g = (A[1] + (C[1] - A[1] - (χ * cos(α) / 3)), A[2] + χ * sin(α) / 3) + h = (A[1] + 2(C[1] - A[1]) / 3, A[2]) + i = (A[1] + (C[1] - A[1]) / 3, A[2]) + return [CartesianIndex(Int.([floor(x[1]), floor(x[2])])...) for x in [d, e, f, g, h, i]] +end + +""" + _fill_triangle!(coords, traingle, count) + +Takes a set of vertices of a triangle `triangle`, and fills the internal hexagon for those points. +""" +function _fill_triangle!(coords, triangle, count) + start = count + hex = _hexagon(triangle...) + for i in eachindex(hex) + coords[count] = CartesianIndex(hex[i]) + count += 1 + if count > length(coords) + return coords[start:(count - 1)], count + end + end + + return coords[start:(count - 1)], count +end + +""" + _generate!(coords::Vector{CartesianIndex}, ft::FractalTriad) + +Fills `coords` with a set of points generated using the `FractalTriad` generator `ft`. +""" +function _generate!( + coords::Vector{CartesianIndex}, + ft::FractalTriad, +) + base_triangle = _outer_triangle(ft) + coords[1:3] .= base_triangle + count = 4 + + triangle = coords[1:3] + hex, count = _fill_triangle!(coords, triangle, count) + pack = vcat(triangle, hex) + vert_idxs = [[5, 2, 6], [1, 4, 9], [8, 7, 3]] + + pack_stack = [pack] + + while length(pack_stack) > 0 + pack = popat!(pack_stack, 1) + for idx in vert_idxs + triangle = pack[idx] + hex, count = _fill_triangle!(coords, triangle, count) + if count > ft.numsites + return coords + end + push!(pack_stack, vcat(triangle, hex)) + end + end + return coords +end + +# ==================================================== +# +# Tests +# +# ===================================================== + +@testitem "FractalTriad is correct subtype" begin + @test FractalTriad <: BONSeeder + @test FractalTriad <: BONSampler +end + +@testitem "FractalTriad default constructor works" begin + @test typeof(FractalTriad()) <: FractalTriad +end + +@testitem "FractalTriad can change number of sites with keyword argument" begin + ft = FractalTriad(; numsites = 18) + @test typeof(ft) <: FractalTriad + @test ft.numsites == 18 + + ft = FractalTriad(; numsites = 27) + @test typeof(ft) <: FractalTriad + @test ft.numsites == 27 + + @test_throws ArgumentError FractalTriad(numsites = 20) +end - return response +@testitem "FractalTriad throws error when too few points as passed as an argument" begin + @test_throws TooFewSites FractalTriad(; numsites = 9) + @test_throws TooFewSites FractalTriad(; numsites = -1) + @test_throws TooFewSites FractalTriad(; numsites = 0) end diff --git a/src/integrations/simplesdms.jl b/src/integrations/simplesdms.jl deleted file mode 100644 index be621f9..0000000 --- a/src/integrations/simplesdms.jl +++ /dev/null @@ -1,11 +0,0 @@ -@info "Loading BONs.jl support for SimpleSDMLayers.jl ..." - -function BiodiversityObservationNetworks.stack(layers::Vector{<:SpeciesDistributionToolkit.SimpleSDMLayers.SimpleSDMLayer}) - # assert all layers are the same size if we add this function to BONs.jl - mat = zeros(size(first(layers))..., length(layers)) - for (l,layer) in enumerate(layers) - thismin, thismax = extrema(layer) - mat[:,:,l] .= broadcast(x->isnothing(x) ? NaN : (x-thismin)/(thismax-thismin), layer.grid) - end - mat -end diff --git a/src/integrations/zygote.jl b/src/integrations/zygote.jl deleted file mode 100644 index 0a70496..0000000 --- a/src/integrations/zygote.jl +++ /dev/null @@ -1,36 +0,0 @@ -@info "Loading BONs.jl support for Zygote.jl ..." - - -function BiodiversityObservationNetworks.optimize(layers, loss; targets = 3, learningrate = 1e-4, numsteps = 10) - ndims(layers) == 3 || throw(ArgumentError("Layers must be a 3-dimensional array")) - typeof(loss) <: Function || throw(ArgumentError("`loss` must be a function")) - learningrate > 0.0 || throw(ArgumentError("learningrate must be positive")) - numsteps > 0 || throw(ArgumentError("numsteps must be positive")) - - W = rand(size(layers, 3), targets) - for (i,c) in enumerate(eachcol(W)) - W[:,i] .= c ./ sum(c) - end - - α = rand(targets) - α ./= sum(α) - - losses = zeros(numsteps) - - @showprogress for step in 1:numsteps - dL, ∂W, ∂α = learningrate .* Zygote.gradient(loss, layers, W, α) - W += ∂W - α += ∂α - - W = clamp.(W, 0, 1) - for (i,c) in enumerate(eachcol(W)) - W[:,i] .= c ./ sum(c) - end - - α = clamp.(α, 0, 1) - α ./= sum(α) - - losses[step] = loss(layers, W, α) - end - return W,α,losses -end diff --git a/src/optimize.jl b/src/optimize.jl deleted file mode 100644 index 8b13789..0000000 --- a/src/optimize.jl +++ /dev/null @@ -1 +0,0 @@ - diff --git a/src/refine.jl b/src/refine.jl index 3680c1b..fd8d06b 100644 --- a/src/refine.jl +++ b/src/refine.jl @@ -1,5 +1,5 @@ """ - refine!(cooords::Vector{CartesianIndex}, pool::Vector{CartesianIndex}, sampler::ST, uncertainty::Matrix{T}) + refine!(cooords::Vector{CartesianIndex}, pool::Vector{CartesianIndex}, sampler::ST) Refines a set of candidate sampling locations in the preallocated vector `coords` from a vector of coordinates `pool` using `sampler`, where `sampler` is a [`BONRefiner`](@ref). @@ -8,12 +8,11 @@ function refine!( coords::Vector{CartesianIndex}, pool::Vector{CartesianIndex}, sampler::ST, - uncertainty::Matrix{T}, -) where {ST <: BONRefiner, T <: AbstractFloat} - if length(coords) != sampler.numpoints +) where {ST <: BONRefiner} + if length(coords) != sampler.numsites throw( DimensionMismatch( - "The length of the coordinate vector must match the `numpoints` fields of the sampler", + "The length of the coordinate vector must match the `numsites` fields of the sampler", ), ) end @@ -24,62 +23,47 @@ function refine!( ), ) end - return _generate!(coords, copy(pool), sampler, uncertainty) + return _generate!(coords, copy(pool), sampler) end """ - refine!(cooords::Vector{CartesianIndex}, pool::Vector{CartesianIndex}, sampler::ST, uncertainty::Matrix{T}) + refine!(cooords::Vector{CartesianIndex}, pool::Vector{CartesianIndex}, sampler::ST) The curried version of `refine!`, which returns a function that acts on the input coordinate pool passed to the curried function (`p` below). """ function refine!(coords::Vector{CartesianIndex}, sampler::ST) where {ST <: BONRefiner} - if length(coords) != sampler.numpoints + if length(coords) != sampler.numsites throw( DimensionMismatch( - "The length of the coordinate vector must match the `numpoints` fields of the sampler", + "The length of the coordinate vector must match the `numsites` fields of the sampler", ), ) end - return (p, u) -> refine!(coords, copy(p), sampler, u) + return (p) -> refine!(coords, copy(p), sampler) end """ - refine(pool::Vector{CartesianIndex}, sampler::ST, uncertainty::Matrix{T}) + refine(pool::Vector{CartesianIndex}, sampler::ST) -Refines a set of candidate sampling locations and returns a vector `coords` of length numpoints +Refines a set of candidate sampling locations and returns a vector `coords` of length numsites from a vector of coordinates `pool` using `sampler`, where `sampler` is a [`BONRefiner`](@ref). """ function refine( pool::Vector{CartesianIndex}, sampler::ST, - uncertainty::Matrix{T}, -) where {ST <: BONRefiner, T <: AbstractFloat} - coords = Vector{CartesianIndex}(undef, sampler.numpoints) - return refine!(coords, copy(pool), sampler, uncertainty) +) where {ST <: BONRefiner} + coords = Vector{CartesianIndex}(undef, sampler.numsites) + return refine!(coords, copy(pool), sampler) end """ refine(sampler::BONRefiner) -Returns a curried function of `refine` with *two* methods: both are using the -output of `seed`, one in its packed form, the other in its splatted form. +Returns a curried function of `refine` """ function refine(sampler::ST) where {ST <: BONRefiner} - coords = Vector{CartesianIndex}(undef, sampler.numpoints) - _inner(p, u) = refine!(coords, copy(p), sampler, u) - _inner(p) = refine!(coords, first(p), sampler, last(p)) + coords = Vector{CartesianIndex}(undef, sampler.numsites) + _inner(p) = refine!(coords, first(p), sampler) return _inner end - -""" - refine(pack, sampler::BONRefiner) - -Calls `refine` on the appropriatedly splatted version of `pack`. -""" -function refine( - pack::Tuple{Vector{CartesianIndex}, Matrix{Float64}}, - sampler::ST, -) where {ST <: BONRefiner} - return refine(first(pack), sampler, last(pack)) -end diff --git a/src/seed.jl b/src/seed.jl index e042db6..c44789c 100644 --- a/src/seed.jl +++ b/src/seed.jl @@ -9,62 +9,23 @@ from a raster `uncertainty` using `sampler`, where `sampler` is a [`BONSeeder`]( function seed!( coords::Vector{CartesianIndex}, sampler::ST, - uncertainty::Matrix{T}, -) where {ST <: BONSeeder, T <: AbstractFloat} - length(coords) != sampler.numpoints && +) where {ST <: BONSeeder} + length(coords) != sampler.numsites && throw( DimensionMismatch( - "The length of the coordinate vector must match the `numpoints` fiel s of the sampler", + "The length of the coordinate vector must match the `numsites` fiel s of the sampler", ), ) - - max_sites = prod(size(uncertainty)) - max_sites < sampler.numpoints && throw( - TooManySites( - "Cannot select $(sampler.numpoints) sampling sites from $max_sites possible locations.", - ), - ) - return _generate!(coords, sampler, uncertainty) -end - -""" - seed!(coords::Vector{CartesianIndex}, sampler::ST) - -The curried version of `seed!`, which returns a function that acts on the input -uncertainty layer passed to the curried function (`u` below). -""" -function seed!(coords::Vector{CartesianIndex}, sampler::ST) where {ST <: BONSeeder} - if length(coords) != sampler.numpoints - throw( - DimensionMismatch( - "The length of the coordinate vector must match the `numpoints` fields of the sampler", - ), - ) - end - return (u) -> seed!(coords, sampler, u) -end - -""" - seed(sampler::ST, uncertainty::Matrix{T}) - -Produces a set of candidate sampling locations in a vector `coords` of length numpoints -from a raster `uncertainty` using `sampler`, where `sampler` is a [`BONSeeder`](@ref). -""" -function seed( - sampler::ST, - uncertainty::Matrix{T}, -) where {ST <: BONSeeder, T <: AbstractFloat} - coords = Vector{CartesianIndex}(undef, sampler.numpoints) - return seed!(coords, sampler, uncertainty) + return _generate!(coords, sampler) end """ - seed!(coords::Vector{CartesianIndex}, sampler::ST) + seed(sampler::ST) -The curried version of `seed!`, which returns a function that acts on the input -uncertainty layer passed to the curried function (`u` below). +Produces a set of candidate sampling locations in a vector `coords` of length numsites +from a raster using `sampler`, where `sampler` is a [`BONSeeder`](@ref). """ function seed(sampler::ST) where {ST <: BONSeeder} - coords = Vector{CartesianIndex}(undef, sampler.numpoints) - return (u) -> seed!(coords, sampler, u) + coords = Vector{CartesianIndex}(undef, sampler.numsites) + return seed!(coords, sampler) end diff --git a/src/simplerandom.jl b/src/simplerandom.jl index d636f22..444c38a 100644 --- a/src/simplerandom.jl +++ b/src/simplerandom.jl @@ -4,27 +4,29 @@ Implements Simple Random spatial sampling (each location has equal probability of selection) """ Base.@kwdef struct SimpleRandom{I <: Integer} <: BONSeeder - numpoints::I = 50 - function SimpleRandom(numpoints) - srs = new{typeof(numpoints)}(numpoints) + numsites::I = 30 + dims::Tuple{I, I} = (50, 50) + function SimpleRandom(numsites, dims) + srs = new{typeof(numsites)}(numsites, dims) check_arguments(srs) return srs end end +maxsites(srs::SimpleRandom) = prod(srs.dims) -function check_arguments(srs) - return check(TooFewSites, srs) +function check_arguments(srs::SRS) where {SRS <: SimpleRandom} + check(TooFewSites, srs) + check(TooManySites, srs) + return nothing end function _generate!( coords::Vector{CartesianIndex}, sampler::SimpleRandom, - uncertainty::Matrix{T}, -) where {T <: AbstractFloat} - pool = CartesianIndices(uncertainty) - - coords .= sample(pool, sampler.numpoints; replace = false) - return (coords, uncertainty) +) + pool = CartesianIndices(sampler.dims) + coords .= sample(pool, sampler.numsites; replace = false) + return coords end # ==================================================== @@ -39,22 +41,20 @@ end end @testitem "SimpleRandom must have more than one point" begin - @test_throws TooFewSites SimpleRandom(-1) - @test_throws TooFewSites SimpleRandom(0) - @test_throws TooFewSites SimpleRandom(1) + @test_throws TooFewSites SimpleRandom(numsites = -1) + @test_throws TooFewSites SimpleRandom(numsites = 0) + @test_throws TooFewSites SimpleRandom(numsites = 1) end @testitem "SimpleRandom allows keyword arguments for number of points" begin N = 314 - srs = SimpleRandom(; numpoints = N) - @test srs.numpoints == N + srs = SimpleRandom(; numsites = N) + @test srs.numsites == N end @testitem "SimpleRandom throws exception if there are more sites than candidates" begin numpts, numcandidates = 26, 25 - srs = SimpleRandom(; numpoints = numpts) - dims = Int32.(floor.([sqrt(numcandidates), sqrt(numcandidates)])) - uncert = rand(dims...) - @test prod(dims) < numpts - @test_throws TooManySites seed(srs, uncert) + dims = Int.(floor.((sqrt(numcandidates), sqrt(numcandidates)))) + srs = @test prod(dims) < numpts + @test_throws TooManySites SimpleRandom(; numsites = numpts, dims = dims) end diff --git a/src/spatialstratified.jl b/src/spatialstratified.jl index c903c29..e03a34b 100644 --- a/src/spatialstratified.jl +++ b/src/spatialstratified.jl @@ -2,12 +2,12 @@ SpatiallyStratified """ @kwdef struct SpatiallyStratified{I <: Integer, F <: AbstractFloat} <: BONSeeder - numpoints::I = 50 + numsites::I = 50 strata::Matrix{I} = _default_strata((50, 50)) inclusion_probability_by_stratum::Vector{F} = ones(3) ./ 3 - function SpatiallyStratified(numpoints, strata, inclusion_probability_by_stratum) - ss = new{typeof(numpoints), typeof(inclusion_probability_by_stratum[begin])}( - numpoints, + function SpatiallyStratified(numsites, strata, inclusion_probability_by_stratum) + ss = new{typeof(numsites), typeof(inclusion_probability_by_stratum[begin])}( + numsites, strata, inclusion_probability_by_stratum, ) @@ -16,8 +16,11 @@ end end +maxsites(ss::SpatiallyStratified) = prod(size(ss.strata)) + function check_arguments(ss::SpatiallyStratified) check(TooFewSites, ss) + check(TooManySites, ss) length(unique(ss.strata)) == length(ss.inclusion_probability_by_stratum) || throw( ArgumentError( @@ -25,8 +28,9 @@ function check_arguments(ss::SpatiallyStratified) ), ) - return sum(ss.inclusion_probability_by_stratum) ≈ 1.0 || - throw(ArgumentError("Inclusion probabilities for each strata do not sum to 1.")) + sum(ss.inclusion_probability_by_stratum) ≈ 1.0 || + throw(ArgumentError("Inclusion probabilities for each strata do not sum to 1.")) + return nothing end function _default_strata(sz) @@ -45,8 +49,7 @@ end function _generate!( coords::Vector{CartesianIndex}, sampler::SpatiallyStratified, - uncertainty::Matrix{T}, -) where {T} +) strata = sampler.strata idx_per_strata = [ findall(i -> strata[i] == x, CartesianIndices(strata)) for @@ -54,12 +57,12 @@ function _generate!( ] πᵢ = sampler.inclusion_probability_by_stratum - strata_per_sample = rand(Categorical(πᵢ), sampler.numpoints) + strata_per_sample = rand(Categorical(πᵢ), sampler.numsites) for (i, s) in enumerate(strata_per_sample) coords[i] = rand(idx_per_strata[s]) end - return coords, uncertainty + return coords end # ==================================================== @@ -74,33 +77,31 @@ end @testitem "SpatiallyStratified with default arguments can generate points" begin ss = SpatiallyStratified() - uncert = rand(size(ss.strata)...) - coords = seed(ss, uncert) |> first + coords = seed(ss) @test typeof(coords) <: Vector{CartesianIndex} end @testitem "SpatiallyStratified throws error when number of sites is below 2" begin - @test_throws TooFewSites SpatiallyStratified(numpoints = -1) - @test_throws TooFewSites SpatiallyStratified(numpoints = 0) - @test_throws TooFewSites SpatiallyStratified(numpoints = 1) + @test_throws TooFewSites SpatiallyStratified(numsites = -1) + @test_throws TooFewSites SpatiallyStratified(numsites = 0) + @test_throws TooFewSites SpatiallyStratified(numsites = 1) end @testitem "SpatiallyStratified can use custom number of points as keyword argument" begin NUM_POINTS = 42 - ss = SpatiallyStratified(; numpoints = NUM_POINTS) - @test ss.numpoints == NUM_POINTS + ss = SpatiallyStratified(; numsites = NUM_POINTS) + @test ss.numsites == NUM_POINTS end @testitem "SpatiallyStratified can use custom strata as keyword argument" begin dims = (42, 30) strata = rand(1:10, dims...) - uncert = rand(dims...) inclusion_probability = [0.1 for i in 1:10] ss = SpatiallyStratified(; strata = strata, inclusion_probability_by_stratum = inclusion_probability, ) - coords = seed(ss, uncert) |> first + coords = seed(ss) @test typeof(coords) <: Vector{CartesianIndex} end diff --git a/src/types.jl b/src/types.jl index 7bfb39b..6896eee 100644 --- a/src/types.jl +++ b/src/types.jl @@ -22,3 +22,6 @@ tuple with the coordinates as a vector of `CartesianIndex`, and the weight matrix as a `Matrix` of `AbstractFloat`, in that order. """ const BONSampler = Union{BONSeeder, BONRefiner} + + +numsites(sampler::BONSampler) = sampler.numsites \ No newline at end of file diff --git a/src/uniqueness.jl b/src/uniqueness.jl index 40d4a68..60b7425 100644 --- a/src/uniqueness.jl +++ b/src/uniqueness.jl @@ -3,54 +3,86 @@ A `BONRefiner` """ -Base.@kwdef mutable struct Uniqueness{I <: Integer, T <: Number} <: BONRefiner - numpoints::I = 30 - layers::Array{T, 3} - function Uniqueness(numpoints, layers::Array{T, N}) where {T, N} - if numpoints < one(numpoints) - throw( - ArgumentError( - "You cannot have a Uniqueness sampler with less than one point", - ), - ) - end - if N != 3 - throw( - ArgumentError( - "You cannot have a Uniqueness sampler without layers passed as a cube.", - ), - ) - end - return new{typeof(numpoints), T}(numpoints, layers) +Base.@kwdef struct Uniqueness{I <: Integer, T <: AbstractFloat} <: BONRefiner + numsites::I = 30 + layers::Array{T, 3} = rand(50, 50, 3) + function Uniqueness(numsites, layers) + uniq = new{typeof(numsites), typeof(layers[begin])}(numsites, layers) + check_arguments(uniq) + return uniq end end +maxsites(uniq::Uniqueness) = prod(size(uniq.layers)[1:2]) + +function check_arguments(uniq::Uniqueness) + check(TooFewSites, uniq) + check(TooManySites, uniq) + + length(size(uniq.layers)) == 3 || + throw( + ArgumentError( + "You cannot have a Uniqueness sampler without layers passed as a 3-dimenisional array, where the first two axes are latitude and longitude.", + ), + ) + + return nothing +end + function _generate!( coords::Vector{CartesianIndex}, pool::Vector{CartesianIndex}, sampler::Uniqueness, - uncertainty, -) where {T <: AbstractFloat} +) layers = sampler.layers ndims(layers) <= 2 && throw(ArgumentError("Uniqueness needs more than one layer to work.")) size(uncertainty) != (size(layers, 1), size(layers, 2)) && throw(DimensionMismatch("Layers are not the same dimension as uncertainty")) - covscore = zeros(length(pool)) + total_covariance = zeros(length(pool)) for (i, p1) in enumerate(pool) v1 = layers[p1[1], p1[2], :] for (j, p2) in enumerate(pool) v2 = layers[p2[1], p2[2], :] if p1 != p2 - covscore[i] += abs(cov(v1, v2)) + total_covariance[i] += abs(cov(v1, v2)) end end end - np = sampler.numpoints - sortedvals = sortperm(vec(covscore)) + np = sampler.numsites + sortedvals = sortperm(vec(total_covariance)) coords[:] .= pool[sortedvals[1:np]] - return (coords, uncertainty) + return coords +end + +# ==================================================== +# +# Tests +# +# ===================================================== + +@testitem "Uniqueness default constructor works" begin + @test typeof(Uniqueness()) <: Uniqueness +end + +@testitem "Uniqueness requires more than one point" begin + @test_throws TooFewSites Uniqueness(numsites = -1) + @test_throws TooFewSites Uniqueness(numsites = 0) + @test_throws TooFewSites Uniqueness(numsites = 1) +end + +@testitem "Uniqueness throws error if more points are requested than are possible" begin + @test_throws TooManySites Uniqueness(numsites = 26, layers = rand(5, 5, 2)) +end + +@testitem "Uniqueness has correct subtypes" begin + @test Uniqueness <: BONRefiner + @test Uniqueness <: BONSampler +end + +@testitem "Uniqueness works with positional constructor" begin + @test typeof(Uniqueness(2, rand(5, 5, 5))) <: Uniqueness end diff --git a/src/utils.jl b/src/utils.jl index 5fb2b59..d856673 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -2,64 +2,12 @@ function stack(layers::Vector{<:AbstractMatrix}) # assert all layers are the same size if we add this function to BONs.jl mat = zeros(size(first(layers))..., length(layers)) - for (l,layer) in enumerate(layers) + for (l, layer) in enumerate(layers) nonnanvals = vec(layer[findall(!isnan, layer)]) thismin, thismax = findmin(nonnanvals)[1], findmax(nonnanvals)[1] - mat[:,:,l] .= broadcast(x->isnan(x) ? NaN : (x-thismin)/(thismax-thismin), layer) + mat[:, :, l] .= + broadcast(x -> isnan(x) ? NaN : (x - thismin) / (thismax - thismin), layer) end - mat + return mat end - -function _squish(layers::Array{T, 3}, W::Matrix{T}) where {T <: AbstractFloat} - size(W,1) == size(layers,3) || throw(ArgumentError("W does not have the same number of rows are there are number of layers")) - all([sum(c) ≈ 1 for c in eachcol(W)]) || throw(ArgumentError("Not all of the columns of W sum to 1.")) - - return convert(Array, slicemap(x -> x * W, layers; dims = (2, 3))) -end - -function _squish(layers::Array{T, 3}, α::Vector{T}) where {T <: AbstractFloat} - length(α) == size(layers,3) || throw(ArgumentError("α is not the same length as number of layers")) - sum(α) ≈ 1 || throw(ArgumentError("α does not sum to 1.0")) - return slicemap(x -> x * reshape(α, (length(α), 1)), layers; dims = (2, 3))[:, :, 1] -end - -""" - squish(layers, W, α) - -Takes a set of `n` layers and squishes them down -to a single layer. - - - numcolumns = size(W,2) - for i in 1:numcolumns - W[:,i] ./= sum(W[:,i]) - end - -For a coordinate in the raster (i,j), denote the vector of -values across all locations at that coordinate v⃗ᵢⱼ. The value -at that coordinate in squished layer, s⃗ᵢⱼ, is computed in two steps. - -**(1):** First we apply a weights matrix, `W``, with `n` rows and `m` columns (`m` < `n`), to -reduce the initial `n` layers down to a set of `m` layers, each of which corresponds -to a particular target of optimization. For example, we may want to propose sampling -locations that are optimized to best sample abalance multiple criteria, like (a) the -current distribution of a species and (b) if that distribution is changing over time. - -Each entry in the weights matrix `W` corresponds to the 'importance' of the layer -in the corresponding row to the successful measurement of the target of the corresponding -column. As such, each column of `W` must sum to 1.0. -using Optim - -For each location, the value of the condensed layer `tᵢ`, corresponding to target `i`, at -coordinate (i,j) is given by the dot product of v⃗ᵢⱼ and the `i`-th column of `W`. - -**(2):** Apply a weighted average across each target layer. To produce the final output layer, -we apply a weighted average to each target layer, where the weights are provided in the vector α⃗ -of length `m`. - -The final value of the squished layer at (i,j) is given by s⃗ᵢⱼ = ∑ₓ αₓ*tᵢⱼ(x), where tᵢⱼ(x) is -the value of the x-th target layer at (i,j). -""" -squish(layers, W, α) = _squish(_squish(layers, W), α) - diff --git a/src/weightedbas.jl b/src/weightedbas.jl index a1d68fc..83b8e89 100644 --- a/src/weightedbas.jl +++ b/src/weightedbas.jl @@ -4,29 +4,34 @@ A `BONSeeder` that uses Balanced-Acceptance Sampling [@cite] combined with rejection sampling to create a set of sampling sites that is weighted toward values with higher uncertainty as a function of the parameter α. """ Base.@kwdef struct WeightedBalancedAcceptance{I <: Integer, F <: Real} <: BONSeeder - numpoints::I = 3 + numsites::I = 30 + uncertainty::Matrix{F} = rand(50, 50) α::F = 1.0 - function WeightedBalancedAcceptance(numpoints, α) - wbas = new{typeof(numpoints), typeof(α)}(numpoints, α) + function WeightedBalancedAcceptance(numsites, uncertainty, α) + wbas = new{typeof(numsites), typeof(α)}(numsites, uncertainty, α) check_arguments(wbas) return wbas end end +maxsites(wbas::WeightedBalancedAcceptance) = prod(size(wbas.uncertainty)) + function check_arguments(wbas::WeightedBalancedAcceptance) check(TooFewSites, wbas) - return wbas.α > 0 || - throw( - ArgumentError("WeightedBalancedAcceptance requires α to be greater than 0 "), - ) + check(TooManySites, wbas) + wbas.α > 0 || + throw( + ArgumentError("WeightedBalancedAcceptance requires α to be greater than 0 "), + ) + return nothing end function _generate!( coords::Vector{CartesianIndex}, - sampler::WeightedBalancedAcceptance{I}, - uncertainty::Matrix{T}, + sampler::WeightedBalancedAcceptance{I, T}, ) where {I <: Integer, T <: AbstractFloat} seed = rand(I.(1e0:1e7), 2) + uncertainty = sampler.uncertainty α = sampler.α x, y = size(uncertainty) @@ -67,7 +72,7 @@ function _generate!( ptct += 1 end - return (coords, uncertainty) + return coords end # ==================================================== @@ -81,48 +86,49 @@ end end @testitem "WeightedBalancedAcceptance requires positive number of sites" begin - α = 1.0 - @test_throws TooFewSites WeightedBalancedAcceptance(0, α) - @test_throws TooFewSites WeightedBalancedAcceptance(1, α) + @test_throws TooFewSites WeightedBalancedAcceptance(numsites = 0) + @test_throws TooFewSites WeightedBalancedAcceptance(numsites = 1) end @testitem "WeightedBalancedAcceptance can't be run with too many sites" begin α = 1.0 numpts, numcandidates = 26, 25 @test numpts > numcandidates # who watches the watchmen? - wbas = WeightedBalancedAcceptance(numpts, α) - dims = Int32.(floor.([sqrt(numcandidates), sqrt(numcandidates)])) + dims = Int.(floor.((sqrt(numcandidates), sqrt(numcandidates)))) uncert = rand(dims...) - @test_throws TooManySites seed(wbas, uncert) + @test_throws TooManySites WeightedBalancedAcceptance( + numsites = numpts, + uncertainty = uncert, + ) end @testitem "WeightedBalancedAcceptance can generate points" begin wbas = WeightedBalancedAcceptance() - sz = (50, 50) - coords = seed(wbas, rand(sz...)) |> first + coords = seed(wbas) @test typeof(coords) <: Vector{CartesianIndex} - @test length(coords) == wbas.numpoints + @test length(coords) == wbas.numsites end -@testitem "WeightedBalancedAcceptance can generate a custom number of points" begin +@testitem "WeightedBalancedAcceptance can generate a custom number of points with positional arguments" begin numpts = 77 - α = 1.0 - wbas = WeightedBalancedAcceptance(numpts, α) sz = (50, 50) - coords = seed(wbas, rand(sz...)) |> first + α = 1.0 + uncert = rand(sz...) + wbas = WeightedBalancedAcceptance(numpts, uncert, α) + coords = seed(wbas) @test numpts == length(coords) end -@testitem "BalancedAcceptance can take bias parameter α as keyword argument" begin +@testitem "WeightedBalancedAcceptance can take bias parameter α as keyword argument" begin α = 3.14159 wbas = WeightedBalancedAcceptance(; α = α) @test wbas.α == α end -@testitem "BalancedAcceptance can take number of points as keyword argument" begin +@testitem "WeightedBalancedAcceptance can take number of points as keyword argument" begin N = 40 - wbas = WeightedBalancedAcceptance(; numpoints = N) - @test wbas.numpoints == N + wbas = WeightedBalancedAcceptance(; numsites = N) + @test wbas.numsites == N end diff --git a/test/adaptivespatial.jl b/test/adaptivespatial.jl index 0be95ca..945ea18 100644 --- a/test/adaptivespatial.jl +++ b/test/adaptivespatial.jl @@ -12,29 +12,29 @@ using Test # Test with a random uncertainty matrix U = rand(20, 20) -pack = seed(BalancedAcceptance(; numpoints = 30), U) +pack = seed(BalancedAcceptance(; numsites = 30), U) c = Vector{CartesianIndex}(undef, 15) smpl = AdaptiveSpatial(length(c)) # Length and element type -@test length(first(refine(pack, smpl))) == smpl.numpoints +@test length(first(refine(pack, smpl))) == smpl.numsites @test eltype(first(refine(first(pack), smpl, U))) == CartesianIndex # Test with an existing coordinates vector @test_throws DimensionMismatch refine!( c, first(pack), - AdaptiveSpatial(; numpoints = length(c) - 1), + AdaptiveSpatial(; numsites = length(c) - 1), last(pack), ) # Test the curried version -@test length(first(refine(AdaptiveSpatial(; numpoints = 12))(pack...))) == 12 +@test length(first(refine(AdaptiveSpatial(; numsites = 12))(pack...))) == 12 # Test the curried allocating version -@test length(first(refine!(c, AdaptiveSpatial(; numpoints = length(c)))(pack...))) == +@test length(first(refine!(c, AdaptiveSpatial(; numsites = length(c)))(pack...))) == length(c) -@test_throws DimensionMismatch refine!(c, AdaptiveSpatial(; numpoints = length(c) - 1))( +@test_throws DimensionMismatch refine!(c, AdaptiveSpatial(; numsites = length(c) - 1))( pack..., ) diff --git a/test/balancedacceptance.jl b/test/balancedacceptance.jl index 8bfbbd8..e2590c4 100644 --- a/test/balancedacceptance.jl +++ b/test/balancedacceptance.jl @@ -6,7 +6,7 @@ using Test # Must have one point or more @test_throws TooFewSites BalancedAcceptance(0, 1.0) -@test_throws TooManySites seed(BalancedAcceptance(; numpoints = 26), rand(5, 5)) +@test_throws TooManySites seed(BalancedAcceptance(; numsites = 26), rand(5, 5)) # Must have a positive alpha @test_throws ArgumentError BalancedAcceptance(1, -0.01) @@ -20,19 +20,19 @@ using Test # Test with a random uncertainty matrix U = rand(20, 20) -@test length(first(seed(BalancedAcceptance(; numpoints = 10), U))) == 10 -@test length(first(seed(BalancedAcceptance(; numpoints = 20), U))) == 20 -@test eltype(first(seed(BalancedAcceptance(; numpoints = 10), U))) == CartesianIndex +@test length(first(seed(BalancedAcceptance(; numsites = 10), U))) == 10 +@test length(first(seed(BalancedAcceptance(; numsites = 20), U))) == 20 +@test eltype(first(seed(BalancedAcceptance(; numsites = 10), U))) == CartesianIndex # Test with an existing coordinates vector c = Vector{CartesianIndex}(undef, 20) -@test_throws DimensionMismatch seed!(c, BalancedAcceptance(; numpoints = length(c) - 1), U) +@test_throws DimensionMismatch seed!(c, BalancedAcceptance(; numsites = length(c) - 1), U) # Test the curried version -@test length(first(seed(BalancedAcceptance(; numpoints = 12))(U))) == 12 +@test length(first(seed(BalancedAcceptance(; numsites = 12))(U))) == 12 # Test the curried allocating version -@test length(first(seed!(c, BalancedAcceptance(; numpoints = length(c)))(U))) == length(c) -@test_throws DimensionMismatch seed!(c, BalancedAcceptance(; numpoints = length(c) - 1))(U) +@test length(first(seed!(c, BalancedAcceptance(; numsites = length(c)))(U))) == length(c) +@test_throws DimensionMismatch seed!(c, BalancedAcceptance(; numsites = length(c) - 1))(U) end diff --git a/test/cubesampling.jl b/test/cubesampling.jl index 1781b66..a29df73 100644 --- a/test/cubesampling.jl +++ b/test/cubesampling.jl @@ -4,35 +4,35 @@ using BiodiversityObservationNetworks using Test # Must have one point or more -@test_throws ArgumentError CubeSampling(numpoints = 0) +@test_throws ArgumentError CubeSampling(numsites = 0) # Must select fewer points than number of candidate points -@test_throws ArgumentError CubeSampling(numpoints = 20, πₖ = zeros(10), x = rand(0:4, 2, 10)) +@test_throws ArgumentError CubeSampling(numsites = 20, πₖ = zeros(10), x = rand(0:4, 2, 10)) # Must have the same number of inclusion probabilites as auxillary variables -@test_throws DimensionMismatch CubeSampling(numpoints = 5, πₖ = zeros(15), x = rand(0:4, 2, 10)) +@test_throws DimensionMismatch CubeSampling(numsites = 5, πₖ = zeros(15), x = rand(0:4, 2, 10)) # Correct subtype -@test typeof(CubeSampling(numpoints = 5, x = rand(0:4, 2, 10))) <: BONRefiner -@test typeof(CubeSampling(numpoints = 5, x = rand(0:4, 2, 10)))<: BONSampler +@test typeof(CubeSampling(numsites = 5, x = rand(0:4, 2, 10))) <: BONRefiner +@test typeof(CubeSampling(numsites = 5, x = rand(0:4, 2, 10)))<: BONSampler # Test with a random uncertainty matrix N = 100 U = rand(20, 20) -pack = seed(BalancedAcceptance(; numpoints = N), U) +pack = seed(BalancedAcceptance(; numsites = N), U) c = Vector{CartesianIndex}(undef, 15) x_mat = rand(0:4, 2, N) -smpl = CubeSampling(numpoints = length(c), x = x_mat) +smpl = CubeSampling(numsites = length(c), x = x_mat) # Length and element type -@test length(first(refine(pack, smpl))) == smpl.numpoints +@test length(first(refine(pack, smpl))) == smpl.numsites @test eltype(first(refine(first(pack), smpl, U))) == CartesianIndex # Test with an existing coordinates vector @test_throws DimensionMismatch refine!( c, first(pack), - CubeSampling(; numpoints = length(c) - 1, x = x_mat), + CubeSampling(; numsites = length(c) - 1, x = x_mat), last(pack), ) diff --git a/test/squish.jl b/test/squish.jl index 8eafaa2..8b13789 100644 --- a/test/squish.jl +++ b/test/squish.jl @@ -1,51 +1 @@ -# Need to write tests for: -# - squish -# - optimize -# - stack -# - integrations -module BONTestSquish - using BiodiversityObservationNetworks - using Distributions - using Test - - function makelayers(nl) - dims = 50,50 - layers = zeros(dims...,nl) - for l in 1:nl - layers[:,:,l] .= rand(dims) - end - layers - end - - layers = makelayers(5) - ntargs = 3 - α = rand(Dirichlet([1 for t in 1:ntargs])) - W = rand(5, ntargs) - for i in 1:size(W,2) - W[:,i] .= W[:,i] ./ sum(W[:,i]) - end - @test typeof(squish(layers, W, α)) <: Matrix - - layers = makelayers(5) - ntargs = 3 - α = rand(Dirichlet([1 for t in 1:ntargs])) - W = rand(5, ntargs) - @test_throws ArgumentError squish(layers, W, α) - - W = rand(4, ntargs) - @test_throws ArgumentError squish(layers, W, α) - - - α = [1., 2., 3.] - @test_throws ArgumentError squish(layers, W, α) - - α = rand(Dirichlet([1 for t in 1:ntargs+1])) - @test_throws ArgumentError squish(layers, W, α) - - layers = makelayers(5) - W = rand(4, ntargs) - α = rand(Dirichlet([1 for t in 1:ntargs])) - @test_throws ArgumentError squish(layers, W, α) - -end \ No newline at end of file diff --git a/test/uniqueness.jl b/test/uniqueness.jl index 3cb425d..4f4a0a3 100644 --- a/test/uniqueness.jl +++ b/test/uniqueness.jl @@ -4,40 +4,40 @@ using BiodiversityObservationNetworks using Test # Must have one point or more -@test_throws ArgumentError Uniqueness(0, zeros(5,5,5)) -@test_throws ArgumentError Uniqueness(0, zeros(5,5)) +@test_throws ArgumentError Uniqueness(0, zeros(5, 5, 5)) +@test_throws ArgumentError Uniqueness(0, zeros(5, 5)) # Parametric constructor -@test typeof(Uniqueness(Int32(2), zeros(Float32, 5,5,5))) == Uniqueness{Int32,Float32} +@test typeof(Uniqueness(Int32(2), zeros(Float32, 5, 5, 5))) == Uniqueness{Int32,Float32} # Correct subtype -@test typeof(Uniqueness(2, zeros(5,5,5))) <: BONRefiner -@test typeof(Uniqueness(2, zeros(5,5,5))) <: BONSampler +@test typeof(Uniqueness(2, zeros(5, 5, 5))) <: BONRefiner +@test typeof(Uniqueness(2, zeros(5, 5, 5))) <: BONSampler # Test with a random uncertainty matrix layers = rand(20, 20, 5) np = 25 -pack = seed(BalancedAcceptance(; numpoints = np), rand(20,20)) +pack = seed(BalancedAcceptance(; numsites=np), rand(20, 20)) -@test length(first(refine(pack, Uniqueness(; numpoints = 10, layers=layers)))) == 10 -@test length(first(refine(pack, Uniqueness(; numpoints = 20, layers=layers)))) == 20 -@test eltype(first(refine(pack, Uniqueness(; numpoints = 10, layers=layers)))) == CartesianIndex +@test length(first(refine(pack, Uniqueness(; numsites=10, layers=layers)))) == 10 +@test length(first(refine(pack, Uniqueness(; numsites=20, layers=layers)))) == 20 +@test eltype(first(refine(pack, Uniqueness(; numsites=10, layers=layers)))) == CartesianIndex # Test with an existing coordinates vector c = Vector{CartesianIndex}(undef, np) @test_throws DimensionMismatch refine!( c, first(pack), - Uniqueness(; numpoints = length(c) - 1, layers=layers), + Uniqueness(; numsites=length(c) - 1, layers=layers), last(pack), ) # Test the curried allocating version -@test length(first(refine(Uniqueness(; numpoints = 12, layers=layers))(pack...))) == 12 +@test length(first(refine(Uniqueness(; numsites=12, layers=layers))(pack...))) == 12 # Test the curried allocating version -@test length(first(refine!(c, Uniqueness(; numpoints = length(c), layers=layers))(pack...))) == +@test length(first(refine!(c, Uniqueness(; numsites=length(c), layers=layers))(pack...))) == length(c) -@test_throws DimensionMismatch refine!(c, Uniqueness(; numpoints = length(c) - 1, layers=layers))( +@test_throws DimensionMismatch refine!(c, Uniqueness(; numsites=length(c) - 1, layers=layers))( pack..., ) -end \ No newline at end of file +end