From 9a9043571f7ee55b1e6e37b3d0583bdf5a253619 Mon Sep 17 00:00:00 2001 From: Ludovic Raess Date: Mon, 20 Nov 2023 12:41:18 +0100 Subject: [PATCH] Improve distributed tests --- test/runtests.jl | 20 ++++++++- test/test_distributed.jl | 86 ------------------------------------- test/test_distributed_2D.jl | 47 ++++++++++++++++++++ test/test_distributed_3D.jl | 55 ++++++++++++++++++++++++ 4 files changed, 120 insertions(+), 88 deletions(-) delete mode 100644 test/test_distributed.jl create mode 100644 test/test_distributed_2D.jl create mode 100644 test/test_distributed_3D.jl diff --git a/test/runtests.jl b/test/runtests.jl index c6ad28f7..68fcfdf3 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -3,7 +3,15 @@ using FastIce using Pkg -excludedfiles = ["test_excluded.jl"]; +excludedfiles = ["test_excluded.jl"] + +# distributed +test_distributed = ["test_distributed_2D.jl", "test_distributed_3D.jl"] +nprocs_2D = 4 +nprocs_3D = 8 +ENV["DIMX"] = 2 +ENV["DIMY"] = 2 +ENV["DIMZ"] = 2 function parse_flags!(args, flag; default=nothing, typ=typeof(default)) for f in args @@ -42,7 +50,15 @@ function runtests() continue end try - run(`$exename --startup-file=no $(joinpath(testdir, f))`) + if basename(f) ∈ test_distributed + nprocs = contains(f, "2D") ? nprocs_2D : nprocs_3D + cmd(n=nprocs) = `mpiexecjl -n $n $exename --startup-file=no --color=yes $(joinpath(testdir, f))` + withenv("JULIA_NUM_THREADS" => "4") do + run(cmd()) + end + else + run(`$exename --startup-file=no $(joinpath(testdir, f))`) + end catch ex nfail += 1 end diff --git a/test/test_distributed.jl b/test/test_distributed.jl deleted file mode 100644 index d94bc356..00000000 --- a/test/test_distributed.jl +++ /dev/null @@ -1,86 +0,0 @@ -include("common.jl") - -using MPI -using FastIce.Distributed - -MPI.Init() - -nprocs = MPI.Comm_size(MPI.COMM_WORLD) - -@testset "Distributed" begin - backend = CPU() # until we have testing environment setup for GPU-aware MPI, run only on CPU - @testset "2D" begin - mpi_dims = parse.(Int, (get(ENV, "DIMX", "1"), - get(ENV, "DIMY", "1"))) - dims = (0, 0) - topo = CartesianTopology(dims) - local_size = (4, 5) - @testset "Topology" begin - @test dimensions(topo) == mpi_dims - @test length(neighbors(topo)) == 2 - @test node_size(topo) == nprocs - if global_rank(topo) == 0 - @test neighbor(topo, 1, 1) == MPI.PROC_NULL - @test neighbor(topo, 2, 1) == MPI.PROC_NULL - @test has_neighbor(topo, 1, 1) == false - @test has_neighbor(topo, 2, 1) == false - if mpi_dims[2] > 1 - @test neighbor(topo, 1, 2) == mpi_dims[2] - @test has_neighbor(topo, 1, 2) == true - else - @test neighbor(topo, 1, 2) == MPI.PROC_NULL - @test has_neighbor(topo, 1, 2) == false - end - end - @test global_grid_size(topo, local_size) == mpi_dims .* local_size - end - @testset "gather!" begin - src = fill(global_rank(topo) + 1, local_size) - dst = (global_rank(topo) == 0) ? zeros(Int, mpi_dims .* local_size) : nothing - gather!(dst, src, cartesian_communicator(topo)) - @test dst == repeat(reshape(1:global_size(topo), dimensions(topo))'; inner=size(src)) - end - end - @testset "3D" begin - mpi_dims = parse.(Int, (get(ENV, "DIMX", "1"), - get(ENV, "DIMY", "1"), - get(ENV, "DIMZ", "1"))) - dims = (0, 0, 0) - topo = CartesianTopology(dims) - local_size = (4, 5, 6) - @testset "Topology" begin - @test dimensions(topo) == mpi_dims - @test length(neighbors(topo)) == 3 - @test node_size(topo) == nprocs - if global_rank(topo) == 0 - @test neighbor(topo, 1, 1) == MPI.PROC_NULL - @test neighbor(topo, 2, 1) == MPI.PROC_NULL - @test neighbor(topo, 3, 1) == MPI.PROC_NULL - @test has_neighbor(topo, 1, 1) == false - @test has_neighbor(topo, 2, 1) == false - @test has_neighbor(topo, 3, 1) == false - if mpi_dims[2] > 1 && mpi_dims[3] > 1 - @test neighbor(topo, 1, 2) == mpi_dims[2] * mpi_dims[3] - @test neighbor(topo, 2, 2) == mpi_dims[3] - @test has_neighbor(topo, 1, 2) == true - @test has_neighbor(topo, 2, 2) == true - else - @test neighbor(topo, 1, 2) == MPI.PROC_NULL - @test neighbor(topo, 2, 2) == MPI.PROC_NULL - @test has_neighbor(topo, 1, 2) == false - @test has_neighbor(topo, 2, 2) == false - end - end - @test global_grid_size(topo, local_size) == mpi_dims .* local_size - end - @testset "gather!" begin - src = fill(global_rank(topo) + 1, local_size) - dst = (global_rank(topo) == 0) ? zeros(Int, mpi_dims .* local_size) : nothing - gather!(dst, src, cartesian_communicator(topo)) - ranks_mat = permutedims(reshape(1:global_size(topo), dimensions(topo)), reverse(1:3)) - @test dst == repeat(ranks_mat; inner=size(src)) - end - end -end - -MPI.Finalize() diff --git a/test/test_distributed_2D.jl b/test/test_distributed_2D.jl new file mode 100644 index 00000000..5e44c8c9 --- /dev/null +++ b/test/test_distributed_2D.jl @@ -0,0 +1,47 @@ +include("common.jl") + +using MPI +using FastIce.Distributed + +MPI.Init() + +nprocs = MPI.Comm_size(MPI.COMM_WORLD) + +backend = CPU() # until we have testing environment setup for GPU-aware MPI, run only on CPU + +@testset "Distributed 2D" begin + mpi_dims = parse.(Int, (get(ENV, "DIMX", "1"), + get(ENV, "DIMY", "1"))) + dims = (0, 0) + topo = CartesianTopology(dims) + local_size = (4, 5) + @testset "Topology" begin + @test dimensions(topo) == mpi_dims + @test length(neighbors(topo)) == 2 + @test node_size(topo) == nprocs + if global_rank(topo) == 0 + @test neighbor(topo, 1, 1) == MPI.PROC_NULL + @test neighbor(topo, 2, 1) == MPI.PROC_NULL + @test has_neighbor(topo, 1, 1) == false + @test has_neighbor(topo, 2, 1) == false + if mpi_dims[2] > 1 + @test neighbor(topo, 1, 2) == mpi_dims[2] + @test has_neighbor(topo, 1, 2) == true + else + @test neighbor(topo, 1, 2) == MPI.PROC_NULL + @test has_neighbor(topo, 1, 2) == false + end + end + @test global_grid_size(topo, local_size) == mpi_dims .* local_size + end + @testset "gather!" begin + src = fill(global_rank(topo) + 1, local_size) + dst = (global_rank(topo) == 0) ? zeros(Int, mpi_dims .* local_size) : nothing + gather!(dst, src, cartesian_communicator(topo)) + if global_rank(topo) == 0 + @test dst == repeat(reshape(1:global_size(topo), dimensions(topo))'; inner=size(src)) + end + end +end + +MPI.Finalize() diff --git a/test/test_distributed_3D.jl b/test/test_distributed_3D.jl new file mode 100644 index 00000000..d10797b4 --- /dev/null +++ b/test/test_distributed_3D.jl @@ -0,0 +1,55 @@ +include("common.jl") + +using MPI +using FastIce.Distributed + +MPI.Init() + +nprocs = MPI.Comm_size(MPI.COMM_WORLD) + +backend = CPU() # until we have testing environment setup for GPU-aware MPI, run only on CPU + +@testset "Distributed 3D" begin + mpi_dims = parse.(Int, (get(ENV, "DIMX", "1"), + get(ENV, "DIMY", "1"), + get(ENV, "DIMZ", "1"))) + dims = (0, 0, 0) + topo = CartesianTopology(dims) + local_size = (4, 5, 6) + @testset "Topology" begin + @test dimensions(topo) == mpi_dims + @test length(neighbors(topo)) == 3 + @test node_size(topo) == nprocs + if global_rank(topo) == 0 + @test neighbor(topo, 1, 1) == MPI.PROC_NULL + @test neighbor(topo, 2, 1) == MPI.PROC_NULL + @test neighbor(topo, 3, 1) == MPI.PROC_NULL + @test has_neighbor(topo, 1, 1) == false + @test has_neighbor(topo, 2, 1) == false + @test has_neighbor(topo, 3, 1) == false + if mpi_dims[2] > 1 && mpi_dims[3] > 1 + @test neighbor(topo, 1, 2) == mpi_dims[2] * mpi_dims[3] + @test neighbor(topo, 2, 2) == mpi_dims[3] + @test has_neighbor(topo, 1, 2) == true + @test has_neighbor(topo, 2, 2) == true + else + @test neighbor(topo, 1, 2) == MPI.PROC_NULL + @test neighbor(topo, 2, 2) == MPI.PROC_NULL + @test has_neighbor(topo, 1, 2) == false + @test has_neighbor(topo, 2, 2) == false + end + end + @test global_grid_size(topo, local_size) == mpi_dims .* local_size + end + @testset "gather!" begin + src = fill(global_rank(topo) + 1, local_size) + dst = (global_rank(topo) == 0) ? zeros(Int, mpi_dims .* local_size) : nothing + gather!(dst, src, cartesian_communicator(topo)) + ranks_mat = permutedims(reshape(1:global_size(topo), dimensions(topo)), reverse(1:3)) + if global_rank(topo) == 0 + @test dst == repeat(ranks_mat; inner=size(src)) + end + end +end + +MPI.Finalize()