Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding 2 fixes, for using batched ptensorlayers #14

Merged
merged 13 commits into from
Nov 5, 2024
3 changes: 2 additions & 1 deletion .github/workflows/pytest-cpu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,8 @@ jobs:
fail-fast: false
matrix:
os: ["ubuntu-latest", "macos-latest"]
python-version: ["3.10", "3.12"]
# Sorry 3.10 version of pytorch is buggy
python-version: ["3.12"]
steps:
- name: Checkout
uses: actions/checkout@v4
Expand Down
8 changes: 5 additions & 3 deletions .github/workflows/pytest-gpu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@ jobs:
run: |
git clone https://github.com/risi-kondor/cnine.git
cd cnine
git checkout main
pip install python/
git checkout dev
# pip install python/
cd ..
- name: Install and build
run: |
Expand All @@ -69,4 +69,6 @@ jobs:
# pytest python/tests
mkdir ./tmp-run/
cd ./tmp-run/
python -c "import ptens"
# This fails due to actually missing CUDA driver, but that's OK
# Testing for build alone is nice already
# python -c "import ptens"
2 changes: 2 additions & 0 deletions python/bindings/Subgraph_py.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,10 @@ pybind11::class_<Subgraph>(m,"subgraph")

.def("dense",[](const Subgraph& G){return G.dense().torch();})

.def("__eq__", &Subgraph::operator==, py::is_operator())
.def("str",&Subgraph::str,py::arg("indent")="")
.def("__str__",&Subgraph::str,py::arg("indent")="");


//.def("cached",&Subgraph::cached);

24 changes: 12 additions & 12 deletions python/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,23 +121,23 @@ def main():
# ---- Compilation commands ----------------------------------------------------------------------------------

if compile_with_cuda:
ext_modules = [CUDAExtension('ptens_base', [
'../../cnine/include/Cnine_base.cu',
#'../../cnine/cuda/TensorView_accumulators.cu',
#'../../cnine/cuda/BasicCtensorProducts.cu',
'../../cnine/cuda/RtensorUtils.cu',
'../../cnine/cuda/TensorView_add.cu',
'../../cnine/cuda/TensorView_assign.cu',
'../../cnine/cuda/TensorView_inc.cu',
'../../cnine/cuda/BlockCsparseMatrix.cu',
#'../../cnine/cuda/RtensorPackUtils.cu',
#'../../cnine/cuda/gatherRows.cu',
ext_modules = [CUDAExtension('ptens_base', [os.path.relpath(path) for path in [
cwd + cnine_folder + '/include/Cnine_base.cu',
#cwd + cnine_folder + '/cuda/TensorView_accumulators.cu',
#cwd + cnine_folder + '/cuda/BasicCtensorProducts.cu',
cwd + cnine_folder + '/cuda/RtensorUtils.cu',
cwd + cnine_folder + '/cuda/TensorView_add.cu',
cwd + cnine_folder + '/cuda/TensorView_assign.cu',
cwd + cnine_folder + '/cuda/TensorView_inc.cu',
cwd + cnine_folder + '/cuda/BlockCsparseMatrix.cu',
#cwd + cnine_folder + '/cuda/RtensorPackUtils.cu',
#cwd + cnine_folder + '/cuda/gatherRows.cu',
'../cuda/Ptensors0.cu',
'../cuda/Ptensors1.cu',
'../cuda/Ptensors2.cu',
#'../cuda/NodeLayer.cu',
'bindings/ptens_py.cpp'
],
]],
include_dirs=_include_dirs,
extra_compile_args={
'nvcc': _nvcc_compile_args,
Expand Down
19 changes: 13 additions & 6 deletions python/src/ptens/batched_ptensorlayer0.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,18 +142,25 @@ def backward(ctx,g):
class batched_ptensorlayer0_gatherFn(torch.autograd.Function):

@staticmethod
def forward(ctx,atoms,x,map):
def forward(atoms,x,map):
r=batched_ptensorlayer0.zeros(atoms,x.get_nc()*([1,1,2][x.getk()]),device=x.device)
r.backend().add_gather(x.backend(),map)
ctx.x=x
ctx.map=map
return r

@staticmethod
def setup_context(ctx, inputs, outputs):
atoms, x, map = inputs
ctx.map = map
ctx.atoms = atoms
ctx.save_for_backward(x)

@staticmethod
def backward(ctx,g):
r=ctx.x.zeros_like()
r.backend().add_gather_back(g.backend(),ctx.map)
return r
x, = ctx.saved_tensors
r = x.zeros_like()
g_view = pb.batched_ptensors0.view(ctx.atoms, g)
r.backend().add_gather_back(g_view, ctx.map)
return None, r, None



20 changes: 15 additions & 5 deletions python/src/ptens/batched_subgraphlayer0.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,12 +44,22 @@ def randn(self,G,S,nc,device='cpu'):
return batched_subgraphlayer0(G,S,atoms,M)

@classmethod
def from_ptensorlayers(self,list):
for a in list:
def from_subgraphlayers(self,subgraphlayer_list):
sub_graph = None
graph_list = []
for a in subgraphlayer_list:
assert isinstance(a,p.ptensorlayer0)
atoms=pb.batched_atomspack([a.atoms for a in list])
M=torch.cat(list,0)
return batched_subgraphlayer0(G,S,atoms,M)
if sub_graph is None:
sub_graph = a.S
elif sub_graph != a.S:
raise RuntimeError("When creating batched subgraphlayers0, the list of subgraphlayers did not contain identical subgraphs."
f" Please ensure all subgraphlayers in `subgraphlayer_list` have the same subgraph. The common subgraph {sub_graph}"
f" is not identical to the last found {a.S}")
graph_list.append(a.G)
atoms=pb.batched_atomspack([a.atoms for a in subgraphlayer_list])
G = p.batched_ggraph.from_graphs(graph_list)
M=torch.cat(subgraphlayer_list,0)
return batched_subgraphlayer0(G=G,S=sub_graph,atoms=atoms,M=M)

@classmethod
def from_matrix(self,G,S,M):
Expand Down
30 changes: 18 additions & 12 deletions python/src/ptens/subgraph.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
#
# This file is part of ptens, a C++/CUDA library for permutation
# equivariant message passing.
#
# This file is part of ptens, a C++/CUDA library for permutation
# equivariant message passing.
#
# Copyright (c) 2023, Imre Risi Kondor
#
# This source code file is subject to the terms of the noncommercial
# license distributed with cnine in the file LICENSE.TXT. Commercial
# use is prohibited. All redistributed versions of this file (in
# original or modified form) must retain this copyright notice and
# must be accompanied by a verbatim copy of the license.
# This source code file is subject to the terms of the noncommercial
# license distributed with cnine in the file LICENSE.TXT. Commercial
# use is prohibited. All redistributed versions of this file (in
# original or modified form) must retain this copyright notice and
# must be accompanied by a verbatim copy of the license.
#
#
import torch
Expand All @@ -29,7 +29,7 @@ def make(self,x):
@classmethod
def from_edge_index(self,M,n=-1,labels=None,degrees=None):
G=subgraph()
if degrees is None:
if degrees is None:
if labels is None:
G.obj=_subgraph.edge_index(M,n)
else:
Expand Down Expand Up @@ -94,15 +94,15 @@ def n_espaces(self):
def evecs(self):
self.set_evecs()
return self.obj.evecs()

def set_evecs(self):
if self.has_espaces()>0:
return
L=self.torch().float()
L=torch.diag(torch.sum(L,1))-L
U,S,V=torch.linalg.svd(L)
self.obj.set_evecs(U,S)

def torch(self):
return self.obj.dense()

Expand All @@ -116,4 +116,10 @@ def __str__(self):
def __repr__(self):
return self.obj.__str__()


# ---- Operators --------------------------------------------------------------------------------------------
def __eq__(self, other):
if id(self) == id(other):
return True
if id(self.obj) == id(other.obj):
return True
return self.obj.__eq__(other.obj)
Loading