Skip to content

Commit

Permalink
Merge pull request #15 from risi-kondor/pytest
Browse files Browse the repository at this point in the history
Pytest automated tests
  • Loading branch information
risi-kondor authored Nov 8, 2024
2 parents fec0d0a + ee41d00 commit 8634a64
Show file tree
Hide file tree
Showing 12 changed files with 312 additions and 10 deletions.
5 changes: 5 additions & 0 deletions .github/workflows/pytest-cpu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ on:
branches:
- main
- master
- dev**
pull_request:
branches:
- main
Expand Down Expand Up @@ -57,6 +58,7 @@ jobs:
cd ..
- name: Install and build
run: |
pip install pytest
export CNINE_FOLDER="/../cnine/"
pip install python/
# TODO activate pytests when ready
Expand All @@ -66,3 +68,6 @@ jobs:
mkdir ./tmp-run/
cd ./tmp-run/
python -c "import ptens"
export TORCH_TEST_DEVICE='cpu'
pytest ../python/pytest
1 change: 1 addition & 0 deletions .github/workflows/pytest-gpu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ on:
branches:
- main
- master
- dev**
pull_request:
branches:
- main
Expand Down
4 changes: 4 additions & 0 deletions python/pytest/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#*
#**
.#*
__pycache__/**
113 changes: 113 additions & 0 deletions python/pytest/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
import os
import torch
import pytest


@pytest.fixture(scope="session")
def ptens_cuda_support():
import ptens_base

string = ptens_base.status_str().split("\n")
for line in string:
if "CUDA support" in line:
if "ON" in line:
return True
if "OFF" in line:
return False
assert False


@pytest.fixture(scope="session")
def device(ptens_cuda_support):
device = os.environ["TORCH_TEST_DEVICE"]

if "cuda" in device:
assert ptens_cuda_support
assert torch.cuda.is_available()

return device


@pytest.fixture(scope="session")
def float_epsilon():
return 1e-5


def numerical_grad_sum(fn, x, h):
grad = torch.zeros_like(x)
for i in range(x.numel()):
xp = x.clone()
xp.view(-1)[i] += h
xm = x.clone()
xm.view(-1)[i] -= h

# Using torch.sum here, because torch autograd, calcualtes the partial diff of a scalar valued functino.
# With sum, we can a scalar valued function, and the summed parts factorize
num_diff = torch.sum(fn(xp)) - torch.sum(fn(xm))
grad_value = num_diff / (2 * float(h))
grad.view(-1)[i] = grad_value
return grad

@pytest.mark.parametrize("m,c", [(0., 3.), (0.5, -0.3), (-0.8, 0.2)])
def test_numerical_grad_linear(m, c):
def linear(x):
return m*x + c

x = torch.randn((5,10))
grad = numerical_grad_sum(linear, x, 1e-2)
ana_grad = torch.ones_like(x) * m

allclose = torch.allclose(ana_grad, grad, rtol=1e-3, atol=1e-5)
if not allclose:
print(f"Max absolute difference: {torch.max(torch.abs(ana_grad - grad))}")
print(f"Mean absolute difference: {torch.mean(torch.abs(ana_grad - grad))}")
print(f"Numerical grad range: [{grad.min()}, {grad.max()}]")
print(f"Analytical grad range: [{ana_grad.min()}, {ana_grad.max()}]")

assert allclose

@pytest.mark.parametrize("a,b,c", [(1. ,2., 3.), (-0.5, 0.4, -0.3), (1.2, -0.8, 0.2)])
def test_numerical_grad_square(a, b, c):
from torch.autograd.gradcheck import gradcheck
def square(x):
return a*x**2 + b*x + c

x = torch.randn((5,10))
grad = numerical_grad_sum(square, x, 1e-3)
ana_grad = 2*a*x + b

allclose = torch.allclose(ana_grad, grad, rtol=1e-2, atol=1e-2)

if not allclose:
print(f"Max absolute difference: {torch.max(torch.abs(ana_grad - grad))}")
print(f"Mean absolute difference: {torch.mean(torch.abs(ana_grad - grad))}")
print(f"Numerical grad range: [{grad.min()}, {grad.max()}]")
print(f"Analytical grad range: [{ana_grad.min()}, {ana_grad.max()}]")

assert allclose
x.requires_grad_()
assert gradcheck(square, (x,), eps=1e-2, rtol=1e-2, atol=1e-2)


# Add a test against autograd for validation
def test_against_autograd():
def complex_function(x):
return torch.sum(torch.sin(x) + x**2)

x = torch.randn(5, 10, requires_grad=True)

# Compute gradient using autograd
y = complex_function(x)
y.backward()
autograd_grad = x.grad

# Compute gradient using numerical method
numerical_grad = numerical_grad_sum(complex_function, x.detach(), 1e-3)

allclose = torch.allclose(autograd_grad, numerical_grad, rtol=1e-2, atol=1e-2)
if not allclose:
print(f"Max absolute difference: {torch.max(torch.abs(autograd_grad - numerical_grad))}")
print(f"Mean absolute difference: {torch.mean(torch.abs(autograd_grad - numerical_grad))}")


assert allclose
61 changes: 61 additions & 0 deletions python/pytest/test_doc.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
import torch
import ptens


def test_create_tensor(device):
A = torch.ones((3, 5))
sum_a = float(torch.sum(A**2))
assert A.shape == (3, 5)
B = A.to(device)
sum_b = float(torch.sum(B**2))
assert device in str(B.device)
assert abs(sum_a - sum_b) < 1e-6


def test_create_ptensor0(device, float_epsilon):
A = ptens.ptensor0.randn([2], 5)
sum_a = float(torch.sum(A**2))
B = A.to(device)
sum_b = float(torch.sum(B**2))
print(B)
assert B.atoms == [2]
assert B.shape == (5,)
assert device in str(B.device)
assert sum_a > 1e-3
assert abs(sum_a - sum_b) < float_epsilon


def test_create_ptensor1(device, float_epsilon):
A = ptens.ptensor1.randn([1, 2, 3], 5)
sum_a = float(torch.sum(A**2))
B = A.to(device)
sum_b = float(torch.sum(B**2))
print(B)
assert B.atoms == [1, 2, 3]
assert B.shape == (3, 5)
assert device in str(B.device)
assert abs(sum_a - sum_b) < float_epsilon
assert sum_a > 1e-3


def test_create_ptensor2(device, float_epsilon):
A = ptens.ptensor2.randn([1, 2, 3], 5)
sum_a = float(torch.sum(A**2))
B = A.to(device)
sum_b = float(torch.sum(B**2))
print(B)
assert B.atoms == [1, 2, 3]
assert B.shape == (3, 3, 5)
assert device in str(B.device)
assert abs(sum_a - sum_b) < float_epsilon
assert sum_a > 1e-3


def test_sequential(device, float_epsilon):
A = ptens.ptensor1.sequential([1, 2, 3], 5)
sum_a = float(torch.sum(A))
B = A.to(device)
sum_b = float(torch.sum(B))

assert abs(sum_a - sum_b) < float_epsilon
assert abs(sum_a - sum(range(3 * 5))) < float_epsilon
Empty file added python/pytest/test_manual.py
Empty file.
64 changes: 64 additions & 0 deletions python/pytest/test_tests.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
import torch
import ptens
import pytest
import ptens_base
from conftest import numerical_grad_sum

from torch.autograd.gradcheck import gradcheck


def test_bug1(device):
nnodes = 15
graph = ptens.ggraph.random(nnodes, 0.5)
subgraphs = [ptens.subgraph.trivial(), ptens.subgraph.edge()]
node_values = torch.rand(nnodes, 1, requires_grad=True)

node_attributes = ptens.subgraphlayer0.from_matrix(graph, ptens.subgraph.trivial(), node_values)

for sg in subgraphs:
gather_features = ptens.subgraphlayer0.gather(sg, node_attributes)
result = torch.sum(gather_features)
result.backward()

# linmap_features = ptens.subgraphlayer0.linmaps(node_attributes)
result = torch.sum(node_attributes)
result.backward()

check = gradcheck(ptens.subgraphlayer0.gather, (sg, node_attributes), eps=1e-3)
assert check



# class TestGather(object):
# h=1e-3

# def backprop(self,cls, N,nc, device):
# atoms=ptens_base.atomspack.random(N, nc, 0.3)
# x=cls.randn(atoms,nc).to(device)
# x.requires_grad_()
# G=ptens.ggraph.random(N,0.3)
# atoms2 = G.subgraphs(ptens.subgraph.trivial())

# check = gradcheck(cls.gather, (atoms2, x), eps=self.h)
# assert check

# z = cls.gather(atoms2, x)
# loss=torch.sum(z)
# loss.backward()
# xgrad=x.grad


# fn = lambda x: cls.gather(atoms2, x)
# xgrad2 = numerical_grad_sum(fn, x, self.h)

# assert torch.allclose(xgrad, xgrad2, rtol=1e-2, atol=1e-2)



# @pytest.mark.parametrize(('N', 'nc'), [(8, 1), (1, 2), (16, 4)])
# def test_gather0(self,N, nc, device):
# self.backprop(ptens.ptensorlayer0,N,nc, device)

# @pytest.mark.parametrize(('N', 'nc'), [(8, 1), (1, 2), (16, 4)])
# def test_gather1(self,N, nc, device):
# self.backprop(ptens.ptensorlayer0,N,nc, device)
33 changes: 33 additions & 0 deletions python/pytest/test_tests4.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
import torch
import ptens_base as pb
import ptens as p


def test_ptensorlayer(device, float_epsilon):
atoms = pb.atomspack.from_list([[1, 3, 4], [2, 5], [0, 2]])
atoms2 = pb.atomspack.random(5, 5, 0.6)

A0 = p.ptensorlayer0.randn(atoms, 3)
A1 = p.ptensorlayer1.randn(atoms, 3)
A2 = p.ptensorlayer2.randn(atoms, 3)

B0 = p.ptensorlayer1.gather(atoms2, A0)
B1 = p.ptensorlayer1.gather(atoms2, A1)
B2 = p.ptensorlayer1.gather(atoms2, A2)

A0g = A0.to(device)
A1g = A1.to(device)
A2g = A2.to(device)

B0g = p.ptensorlayer1.gather(atoms2, A0g)
B1g = p.ptensorlayer1.gather(atoms2, A1g)
B2g = p.ptensorlayer1.gather(atoms2, A2g)

# Making sure we don't just compute on zeros
assert torch.norm(B0) > float_epsilon
assert torch.norm(B1) > float_epsilon
assert torch.norm(B2) > float_epsilon

assert torch.allclose(B0, B0g.to("cpu"))
assert torch.allclose(B1g.to("cpu"), B1)
assert torch.allclose(B2g.to("cpu"), B2)
24 changes: 22 additions & 2 deletions python/src/ptens/ptensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
import ptens_base as pb

class ptensor(torch.Tensor):

def __new__(cls, atoms:list, data:torch.Tensor | torch.Size, *args, **kwargs):
# We write a new __new__ function here, since the signature now includes atoms.
# But we need __new__ since it handles the memory allocations, potentially on the GPU.
Expand All @@ -37,7 +37,26 @@ def make(cls, atoms:list, M:torch.Tensor | torch.Size):

# ---- Operations ----------------------------------------------------------------------------------------


_covariant_functions = [
torch.Tensor.to,
torch.Tensor.clone,
]

@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}

r= super().__torch_function__(func, types, args, kwargs)
if func in cls._covariant_functions:
# A bit more robuts with the order of arguments.
for arg in args:
if hasattr(arg, "atoms"):
r.atoms = arg.atoms
break
return r


def __add__(self,y):
assert self.size()==y.size()
assert self.atoms==y.atoms
Expand All @@ -49,3 +68,4 @@ def __str__(self):

def to_string(self,indent):
return self.backend().str(indent)

5 changes: 3 additions & 2 deletions python/src/ptens/ptensorlayer.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,16 @@

class ptensorlayer(torch.Tensor):

covariant_functions=[torch.Tensor.to,torch.Tensor.add,torch.Tensor.sub,torch.relu,torch.nn.functional.linear]
covariant_functions=[torch.Tensor.to,torch.Tensor.add,torch.Tensor.sub,torch.relu,torch.nn.functional.linear, torch.Tensor.clone]

@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if func in ptensorlayer.covariant_functions:
r= super().__torch_function__(func, types, args, kwargs)
r.atoms=args[0].atoms
if hasattr(args[0], "atoms"):
r.atoms=args[0].atoms
else:
r= super().__torch_function__(func, types, args, kwargs)
if isinstance(r,torch.Tensor):
Expand Down
6 changes: 3 additions & 3 deletions python/tests/test_gather_c.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,13 @@
class TestGather(object):

def backprop(self,src,fn,N,_nc):
if(src==p.ptensors0):
if(src==p.ptensor0):
x=src.randn(N,_nc)
else:
atoms=ptens_base.atomspack.random(N,0.3)
x=src.randn(atoms,_nc)
x.requires_grad_()
G=p.graph.random(N,0.3)
G=p.ggraph.random(N,0.3)
z=fn(x,G)

testvec=z.randn_like()
Expand All @@ -29,4 +29,4 @@ def backprop(self,src,fn,N,_nc):

@pytest.mark.parametrize('nc', [1, 2, 4])
def test_gather(self,nc):
self.backprop(p.ptensors0,p.gather,8,nc)
self.backprop(p.ptensor0,p.gather,8,nc)
Loading

0 comments on commit 8634a64

Please sign in to comment.