-
Notifications
You must be signed in to change notification settings - Fork 3
/
blocks.py
86 lines (72 loc) · 2.93 KB
/
blocks.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
from utils.pointnet_utils import PointNetSetAbstraction, PointNetFeaturePropagation, index_points, square_distance
class TransformerBlock(nn.Module):
def __init__(self, d_points, d_model, k) -> None:
super().__init__()
self.fc1 = nn.Linear(d_points, d_model)
self.fc2 = nn.Linear(d_model, d_points)
self.fc_delta = nn.Sequential(
nn.Linear(3, d_model),
nn.ReLU(),
nn.Linear(d_model, d_model)
)
self.fc_gamma = nn.Sequential(
nn.Linear(d_model, d_model),
nn.ReLU(),
nn.Linear(d_model, d_model)
)
self.w_qs = nn.Linear(d_model, d_model, bias=False)
self.w_ks = nn.Linear(d_model, d_model, bias=False)
self.w_vs = nn.Linear(d_model, d_model, bias=False)
self.k = k
# xyz: b x n x 3, features: b x n x f
def forward(self, xyz, features):
dists = square_distance(xyz, xyz)
knn_idx = dists.argsort()[:, :, :self.k] # b x n x k
knn_xyz = index_points(xyz, knn_idx)
pre = features
x = self.fc1(features)
q, k, v = self.w_qs(x), index_points(self.w_ks(x), knn_idx), index_points(self.w_vs(x), knn_idx)
pos_enc = self.fc_delta(xyz[:, :, None] - knn_xyz) # b x n x k x f
attn = self.fc_gamma(q[:, :, None] - k + pos_enc)
attn = F.softmax(attn / np.sqrt(k.size(-1)), dim=-2) # b x n x k x f
res = torch.einsum('bmnf,bmnf->bmf', attn, v + pos_enc)
res = self.fc2(res) + pre
return res, attn
class TransitionDown(nn.Module):
def __init__(self, k, nneighbor, channels) -> None:
super(TransitionDown, self).__init__()
self.sa = PointNetSetAbstraction(k, 0, nneighbor, channels[0], channels[1:], group_all=False, knn=True)
def forward(self, xyz, points):
return self.sa(xyz, points)
class SwapAxes(nn.Module):
def __init__(self) -> None:
super(SwapAxes, self).__init__()
def forward(self, x):
return x.transpose(1, 2)
class TransitionUp(nn.Module):
def __init__(self, dim1, dim2, dim_out) -> None:
super(TransitionUp, self).__init__()
self.fc1 = nn.Sequential(
nn.Linear(dim1, dim_out),
SwapAxes(),
nn.BatchNorm1d(dim_out),
SwapAxes(),
nn.ReLU(),
)
self.fc2 = nn.Sequential(
nn.Linear(dim2, dim_out),
SwapAxes(),
nn.BatchNorm1d(dim_out),
SwapAxes(),
nn.ReLU(),
)
self.fp = PointNetFeaturePropagation(-1, [])
def forward(self, xyz1, points1, xyz2, points2):
feats1 = self.fc1(points1)
feats2 = self.fc2(points2)
feats1 = self.fp(xyz2.transpose(1, 2), xyz1.transpose(1, 2), None, feats1.transpose(1, 2)).transpose(1, 2)
return feats1 + feats2