-
Notifications
You must be signed in to change notification settings - Fork 2
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #15 from risi-kondor/pytest
Pytest automated tests
- Loading branch information
Showing
12 changed files
with
312 additions
and
10 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -10,6 +10,7 @@ on: | |
branches: | ||
- main | ||
- master | ||
- dev** | ||
pull_request: | ||
branches: | ||
- main | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,4 @@ | ||
#* | ||
#** | ||
.#* | ||
__pycache__/** |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,113 @@ | ||
import os | ||
import torch | ||
import pytest | ||
|
||
|
||
@pytest.fixture(scope="session") | ||
def ptens_cuda_support(): | ||
import ptens_base | ||
|
||
string = ptens_base.status_str().split("\n") | ||
for line in string: | ||
if "CUDA support" in line: | ||
if "ON" in line: | ||
return True | ||
if "OFF" in line: | ||
return False | ||
assert False | ||
|
||
|
||
@pytest.fixture(scope="session") | ||
def device(ptens_cuda_support): | ||
device = os.environ["TORCH_TEST_DEVICE"] | ||
|
||
if "cuda" in device: | ||
assert ptens_cuda_support | ||
assert torch.cuda.is_available() | ||
|
||
return device | ||
|
||
|
||
@pytest.fixture(scope="session") | ||
def float_epsilon(): | ||
return 1e-5 | ||
|
||
|
||
def numerical_grad_sum(fn, x, h): | ||
grad = torch.zeros_like(x) | ||
for i in range(x.numel()): | ||
xp = x.clone() | ||
xp.view(-1)[i] += h | ||
xm = x.clone() | ||
xm.view(-1)[i] -= h | ||
|
||
# Using torch.sum here, because torch autograd, calcualtes the partial diff of a scalar valued functino. | ||
# With sum, we can a scalar valued function, and the summed parts factorize | ||
num_diff = torch.sum(fn(xp)) - torch.sum(fn(xm)) | ||
grad_value = num_diff / (2 * float(h)) | ||
grad.view(-1)[i] = grad_value | ||
return grad | ||
|
||
@pytest.mark.parametrize("m,c", [(0., 3.), (0.5, -0.3), (-0.8, 0.2)]) | ||
def test_numerical_grad_linear(m, c): | ||
def linear(x): | ||
return m*x + c | ||
|
||
x = torch.randn((5,10)) | ||
grad = numerical_grad_sum(linear, x, 1e-2) | ||
ana_grad = torch.ones_like(x) * m | ||
|
||
allclose = torch.allclose(ana_grad, grad, rtol=1e-3, atol=1e-5) | ||
if not allclose: | ||
print(f"Max absolute difference: {torch.max(torch.abs(ana_grad - grad))}") | ||
print(f"Mean absolute difference: {torch.mean(torch.abs(ana_grad - grad))}") | ||
print(f"Numerical grad range: [{grad.min()}, {grad.max()}]") | ||
print(f"Analytical grad range: [{ana_grad.min()}, {ana_grad.max()}]") | ||
|
||
assert allclose | ||
|
||
@pytest.mark.parametrize("a,b,c", [(1. ,2., 3.), (-0.5, 0.4, -0.3), (1.2, -0.8, 0.2)]) | ||
def test_numerical_grad_square(a, b, c): | ||
from torch.autograd.gradcheck import gradcheck | ||
def square(x): | ||
return a*x**2 + b*x + c | ||
|
||
x = torch.randn((5,10)) | ||
grad = numerical_grad_sum(square, x, 1e-3) | ||
ana_grad = 2*a*x + b | ||
|
||
allclose = torch.allclose(ana_grad, grad, rtol=1e-2, atol=1e-2) | ||
|
||
if not allclose: | ||
print(f"Max absolute difference: {torch.max(torch.abs(ana_grad - grad))}") | ||
print(f"Mean absolute difference: {torch.mean(torch.abs(ana_grad - grad))}") | ||
print(f"Numerical grad range: [{grad.min()}, {grad.max()}]") | ||
print(f"Analytical grad range: [{ana_grad.min()}, {ana_grad.max()}]") | ||
|
||
assert allclose | ||
x.requires_grad_() | ||
assert gradcheck(square, (x,), eps=1e-2, rtol=1e-2, atol=1e-2) | ||
|
||
|
||
# Add a test against autograd for validation | ||
def test_against_autograd(): | ||
def complex_function(x): | ||
return torch.sum(torch.sin(x) + x**2) | ||
|
||
x = torch.randn(5, 10, requires_grad=True) | ||
|
||
# Compute gradient using autograd | ||
y = complex_function(x) | ||
y.backward() | ||
autograd_grad = x.grad | ||
|
||
# Compute gradient using numerical method | ||
numerical_grad = numerical_grad_sum(complex_function, x.detach(), 1e-3) | ||
|
||
allclose = torch.allclose(autograd_grad, numerical_grad, rtol=1e-2, atol=1e-2) | ||
if not allclose: | ||
print(f"Max absolute difference: {torch.max(torch.abs(autograd_grad - numerical_grad))}") | ||
print(f"Mean absolute difference: {torch.mean(torch.abs(autograd_grad - numerical_grad))}") | ||
|
||
|
||
assert allclose |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,61 @@ | ||
import torch | ||
import ptens | ||
|
||
|
||
def test_create_tensor(device): | ||
A = torch.ones((3, 5)) | ||
sum_a = float(torch.sum(A**2)) | ||
assert A.shape == (3, 5) | ||
B = A.to(device) | ||
sum_b = float(torch.sum(B**2)) | ||
assert device in str(B.device) | ||
assert abs(sum_a - sum_b) < 1e-6 | ||
|
||
|
||
def test_create_ptensor0(device, float_epsilon): | ||
A = ptens.ptensor0.randn([2], 5) | ||
sum_a = float(torch.sum(A**2)) | ||
B = A.to(device) | ||
sum_b = float(torch.sum(B**2)) | ||
print(B) | ||
assert B.atoms == [2] | ||
assert B.shape == (5,) | ||
assert device in str(B.device) | ||
assert sum_a > 1e-3 | ||
assert abs(sum_a - sum_b) < float_epsilon | ||
|
||
|
||
def test_create_ptensor1(device, float_epsilon): | ||
A = ptens.ptensor1.randn([1, 2, 3], 5) | ||
sum_a = float(torch.sum(A**2)) | ||
B = A.to(device) | ||
sum_b = float(torch.sum(B**2)) | ||
print(B) | ||
assert B.atoms == [1, 2, 3] | ||
assert B.shape == (3, 5) | ||
assert device in str(B.device) | ||
assert abs(sum_a - sum_b) < float_epsilon | ||
assert sum_a > 1e-3 | ||
|
||
|
||
def test_create_ptensor2(device, float_epsilon): | ||
A = ptens.ptensor2.randn([1, 2, 3], 5) | ||
sum_a = float(torch.sum(A**2)) | ||
B = A.to(device) | ||
sum_b = float(torch.sum(B**2)) | ||
print(B) | ||
assert B.atoms == [1, 2, 3] | ||
assert B.shape == (3, 3, 5) | ||
assert device in str(B.device) | ||
assert abs(sum_a - sum_b) < float_epsilon | ||
assert sum_a > 1e-3 | ||
|
||
|
||
def test_sequential(device, float_epsilon): | ||
A = ptens.ptensor1.sequential([1, 2, 3], 5) | ||
sum_a = float(torch.sum(A)) | ||
B = A.to(device) | ||
sum_b = float(torch.sum(B)) | ||
|
||
assert abs(sum_a - sum_b) < float_epsilon | ||
assert abs(sum_a - sum(range(3 * 5))) < float_epsilon |
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,64 @@ | ||
import torch | ||
import ptens | ||
import pytest | ||
import ptens_base | ||
from conftest import numerical_grad_sum | ||
|
||
from torch.autograd.gradcheck import gradcheck | ||
|
||
|
||
def test_bug1(device): | ||
nnodes = 15 | ||
graph = ptens.ggraph.random(nnodes, 0.5) | ||
subgraphs = [ptens.subgraph.trivial(), ptens.subgraph.edge()] | ||
node_values = torch.rand(nnodes, 1, requires_grad=True) | ||
|
||
node_attributes = ptens.subgraphlayer0.from_matrix(graph, ptens.subgraph.trivial(), node_values) | ||
|
||
for sg in subgraphs: | ||
gather_features = ptens.subgraphlayer0.gather(sg, node_attributes) | ||
result = torch.sum(gather_features) | ||
result.backward() | ||
|
||
# linmap_features = ptens.subgraphlayer0.linmaps(node_attributes) | ||
result = torch.sum(node_attributes) | ||
result.backward() | ||
|
||
check = gradcheck(ptens.subgraphlayer0.gather, (sg, node_attributes), eps=1e-3) | ||
assert check | ||
|
||
|
||
|
||
# class TestGather(object): | ||
# h=1e-3 | ||
|
||
# def backprop(self,cls, N,nc, device): | ||
# atoms=ptens_base.atomspack.random(N, nc, 0.3) | ||
# x=cls.randn(atoms,nc).to(device) | ||
# x.requires_grad_() | ||
# G=ptens.ggraph.random(N,0.3) | ||
# atoms2 = G.subgraphs(ptens.subgraph.trivial()) | ||
|
||
# check = gradcheck(cls.gather, (atoms2, x), eps=self.h) | ||
# assert check | ||
|
||
# z = cls.gather(atoms2, x) | ||
# loss=torch.sum(z) | ||
# loss.backward() | ||
# xgrad=x.grad | ||
|
||
|
||
# fn = lambda x: cls.gather(atoms2, x) | ||
# xgrad2 = numerical_grad_sum(fn, x, self.h) | ||
|
||
# assert torch.allclose(xgrad, xgrad2, rtol=1e-2, atol=1e-2) | ||
|
||
|
||
|
||
# @pytest.mark.parametrize(('N', 'nc'), [(8, 1), (1, 2), (16, 4)]) | ||
# def test_gather0(self,N, nc, device): | ||
# self.backprop(ptens.ptensorlayer0,N,nc, device) | ||
|
||
# @pytest.mark.parametrize(('N', 'nc'), [(8, 1), (1, 2), (16, 4)]) | ||
# def test_gather1(self,N, nc, device): | ||
# self.backprop(ptens.ptensorlayer0,N,nc, device) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,33 @@ | ||
import torch | ||
import ptens_base as pb | ||
import ptens as p | ||
|
||
|
||
def test_ptensorlayer(device, float_epsilon): | ||
atoms = pb.atomspack.from_list([[1, 3, 4], [2, 5], [0, 2]]) | ||
atoms2 = pb.atomspack.random(5, 5, 0.6) | ||
|
||
A0 = p.ptensorlayer0.randn(atoms, 3) | ||
A1 = p.ptensorlayer1.randn(atoms, 3) | ||
A2 = p.ptensorlayer2.randn(atoms, 3) | ||
|
||
B0 = p.ptensorlayer1.gather(atoms2, A0) | ||
B1 = p.ptensorlayer1.gather(atoms2, A1) | ||
B2 = p.ptensorlayer1.gather(atoms2, A2) | ||
|
||
A0g = A0.to(device) | ||
A1g = A1.to(device) | ||
A2g = A2.to(device) | ||
|
||
B0g = p.ptensorlayer1.gather(atoms2, A0g) | ||
B1g = p.ptensorlayer1.gather(atoms2, A1g) | ||
B2g = p.ptensorlayer1.gather(atoms2, A2g) | ||
|
||
# Making sure we don't just compute on zeros | ||
assert torch.norm(B0) > float_epsilon | ||
assert torch.norm(B1) > float_epsilon | ||
assert torch.norm(B2) > float_epsilon | ||
|
||
assert torch.allclose(B0, B0g.to("cpu")) | ||
assert torch.allclose(B1g.to("cpu"), B1) | ||
assert torch.allclose(B2g.to("cpu"), B2) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.