diff --git a/GNNGraphs/test/Project.toml b/GNNGraphs/test/Project.toml index e9ebaf98a..d104c951a 100644 --- a/GNNGraphs/test/Project.toml +++ b/GNNGraphs/test/Project.toml @@ -1,4 +1,5 @@ [deps] +CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000" Functors = "d9f16b24-f501-4c13-a1f2-28368ffc5196" GNNGraphs = "aed8fd31-079b-4b5a-b342-a13352159b8c" @@ -13,11 +14,13 @@ Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" SimpleWeightedGraphs = "47aef6b3-ad0c-573a-a1e2-d07658019622" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" +StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3" Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" TestItemRunner = "f8b46487-2199-4994-9208-9a1283c18c0a" TestItems = "1c621080-faea-4a02-84b6-bbd5e436b8fe" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" +cuDNN = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd" [compat] GPUArraysCore = "0.1" diff --git a/GNNGraphs/test/gnngraph.jl b/GNNGraphs/test/gnngraph.jl index 0fcd4ff5d..1e78349dc 100644 --- a/GNNGraphs/test/gnngraph.jl +++ b/GNNGraphs/test/gnngraph.jl @@ -94,7 +94,7 @@ end @test adjacency_matrix(g; dir = :in) == adj_mat @test adjacency_matrix(g; dir = :out) == adj_mat - if TEST_GPU && !(dev isa MetalDevice) + if TEST_GPU && !(dev isa MetalDevice) && GRAPH_T != :sparse # See https://github.com/JuliaGPU/CUDA.jl/pull/1093 mat_gpu = adjacency_matrix(g_gpu) @test mat_gpu isa AbstractMatrix{Int} @@ -105,7 +105,7 @@ end @testset "normalized_laplacian" begin mat = normalized_laplacian(g) - if TEST_GPU && !(dev isa MetalDevice) + if TEST_GPU && !(dev isa MetalDevice) && GRAPH_T != :sparse mat_gpu = normalized_laplacian(g_gpu) @test mat_gpu isa AbstractMatrix{Float32} @test get_device(mat_gpu)isa AbstractGPUDevice @@ -114,7 +114,7 @@ end end @testset "scaled_laplacian" begin - if TEST_GPU && !(dev isa MetalDevice) + if TEST_GPU && !(dev isa MetalDevice) && GRAPH_T != :sparse mat = scaled_laplacian(g) mat_gpu = scaled_laplacian(g_gpu) @test mat_gpu isa AbstractMatrix{Float32} @@ -132,10 +132,10 @@ end @testset "functor" begin s_cpu, t_cpu = edge_index(g) s_gpu, t_gpu = edge_index(g_gpu) - @test s_gpu isa AbstractVector{Int} + @test s_gpu isa AbstractVector{<:Integer} @test get_device(s_gpu) isa AbstractGPUDevice @test Array(s_gpu) == s_cpu - @test t_gpu isa AbstractVector{Int} + @test t_gpu isa AbstractVector{<:Integer} @test get_device(t_gpu) isa AbstractGPUDevice @test Array(t_gpu) == t_cpu end diff --git a/GNNGraphs/test/sampling.jl b/GNNGraphs/test/sampling.jl index 77acf033e..557a8ae89 100644 --- a/GNNGraphs/test/sampling.jl +++ b/GNNGraphs/test/sampling.jl @@ -108,7 +108,6 @@ end @test !isempty(mini_batch_gnn.graph) num_sampled_nodes = mini_batch_gnn.num_nodes - println("Number of nodes in mini-batch: ", num_sampled_nodes) @test num_sampled_nodes == 2 diff --git a/GNNGraphs/test/utils.jl b/GNNGraphs/test/utils.jl index e625299a1..85995c795 100644 --- a/GNNGraphs/test/utils.jl +++ b/GNNGraphs/test/utils.jl @@ -94,16 +94,16 @@ end @testitem "color_refinement" setup=[GraphsTestModule] begin - # TODO: this passes only on julia 1.10. Use a deterministic graph generator using .GraphsTestModule + using StableRNGs for GRAPH_T in GRAPH_TYPES - rng = MersenneTwister(17) + rng = StableRNG(17) g = rand_graph(rng, 10, 20, graph_type = GRAPH_T) x0 = ones(Int, 10) x, ncolors, niters = color_refinement(g, x0) - @test ncolors == 7 + @test ncolors == 9 @test niters == 2 - @test x == [1, 1, 6, 6, 6, 7, 8, 9, 10, 11] + @test x == [6, 7, 8, 9, 10, 11, 12, 6, 13, 14] x2, _, _ = color_refinement(g) @test x2 == x