-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathgradcheck.py
133 lines (105 loc) · 4.97 KB
/
gradcheck.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
"""
Calculation of gradcheck (PyTorch built-in function to check custom layer implementation)
for verifying a correct gradient implementation.
Documentation: https://pytorch.org/docs/stable/generated/torch.autograd.gradcheck.html
Author: Fabian Wagner
Contact: fabian.wagner@fau.de
"""
from joint_bilateral_filter_layer import JointBilateralFilterFunction3dCPU, JointBilateralFilterFunction3dGPU
import torch
from torch.autograd import gradcheck
def gradient_input(layer_bf, use_gpu):
tensor_in = (torch.randn(2, 1, 10, 10, 10, dtype=torch.double, requires_grad=True))
tensor_guidance = (torch.randn(2, 1, 10, 10, 10, dtype=torch.double))
if use_gpu:
tensor_in = tensor_in.cuda()
tensor_guidance = tensor_guidance.cuda()
sig_x = torch.tensor(1.1)
sig_y = torch.tensor(1.1)
sig_z = torch.tensor(1.1)
sig_r = torch.tensor(0.5)
print(gradcheck(layer_bf, (tensor_in, tensor_guidance, sig_x, sig_y, sig_z, sig_r), eps=1e-6, atol=1e-5))
def gradient_guidance(layer_bf, use_gpu):
tensor_in = (torch.randn(2, 1, 10, 10, 10, dtype=torch.double))
tensor_guidance = (torch.randn(2, 1, 10, 10, 10, dtype=torch.double, requires_grad=True))
if use_gpu:
tensor_in = tensor_in.cuda()
tensor_guidance = tensor_guidance.cuda()
sig_x = torch.tensor(1.1)
sig_y = torch.tensor(1.1)
sig_z = torch.tensor(1.1)
sig_r = torch.tensor(0.5)
print(gradcheck(layer_bf, (tensor_in, tensor_guidance, sig_x, sig_y, sig_z, sig_r), eps=1e-6, atol=1e-5))
def gradient_x(layer_bf, use_gpu):
tensor_in = (torch.randn(2, 1, 10, 10, 10))
tensor_guidance = (torch.randn(2, 1, 10, 10, 10))
if use_gpu:
tensor_in = tensor_in.cuda()
tensor_guidance = tensor_guidance.cuda()
sig_x = torch.tensor(1.1, dtype=torch.double, requires_grad=True)
sig_y = torch.tensor(1.1)
sig_z = torch.tensor(1.1)
sig_r = torch.tensor(0.5)
print(gradcheck(layer_bf, (tensor_in, tensor_guidance, sig_x, sig_y, sig_z, sig_r), eps=1e-2, atol=1e-3))
def gradient_y(layer_bf, use_gpu):
tensor_in = (torch.randn(2, 1, 10, 10, 10))
tensor_guidance = (torch.randn(2, 1, 10, 10, 10))
if use_gpu:
tensor_in = tensor_in.cuda()
tensor_guidance = tensor_guidance.cuda()
sig_x = torch.tensor(1.1)
sig_y = torch.tensor(1.1, dtype=torch.double, requires_grad=True)
sig_z = torch.tensor(1.1)
sig_r = torch.tensor(0.5)
print(gradcheck(layer_bf, (tensor_in, tensor_guidance, sig_x, sig_y, sig_z, sig_r), eps=1e-2, atol=1e-3))
def gradient_z(layer_bf, use_gpu):
tensor_in = (torch.randn(2, 1, 10, 10, 10))
tensor_guidance = (torch.randn(2, 1, 10, 10, 10))
if use_gpu:
tensor_in = tensor_in.cuda()
tensor_guidance = tensor_guidance.cuda()
sig_x = torch.tensor(1.1)
sig_y = torch.tensor(1.1)
sig_z = torch.tensor(1.1, dtype=torch.double, requires_grad=True)
sig_r = torch.tensor(0.5)
print(gradcheck(layer_bf, (tensor_in, tensor_guidance, sig_x, sig_y, sig_z, sig_r), eps=1e-2, atol=1e-3))
def gradient_r(layer_bf, use_gpu):
tensor_in = (torch.randn(2, 1, 10, 10, 10))
tensor_guidance = (torch.randn(2, 1, 10, 10, 10))
if use_gpu:
tensor_in = tensor_in.cuda()
tensor_guidance = tensor_guidance.cuda()
sig_x = torch.tensor(1.1)
sig_y = torch.tensor(1.1)
sig_z = torch.tensor(1.1)
sig_r = torch.tensor(0.5, dtype=torch.double, requires_grad=True)
print(gradcheck(layer_bf, (tensor_in, tensor_guidance, sig_x, sig_y, sig_z, sig_r), eps=1e-3, atol=1e-3))
# Get BF function.
layer_bf_gpu = JointBilateralFilterFunction3dGPU.apply
layer_bf_cpu = JointBilateralFilterFunction3dCPU.apply
# Note that eps and atol are chosen according to the investigated parameter, as the
# filter parameters operate on different scales/regimes. For example, eps
# (step size for the numerical gradient) must be chosen fairly large for the spatial
# parameters in order to get meaningful gradients. The tolerance parameter atol
# is chosen according to the magnitude of the numerical gradients, determined by eps.
print('-------------------------------------------------------------')
print('Gradcheck is passed if no error occurs and \'True\' is printed.')
print('-------------------------------------------------------------')
print('Gradient with respect to input:')
gradient_input(layer_bf_gpu, use_gpu=True)
gradient_input(layer_bf_cpu, use_gpu=False)
print('Gradient with respect to guidance:')
gradient_guidance(layer_bf_gpu, use_gpu=True)
gradient_guidance(layer_bf_cpu, use_gpu=False)
print('Gradient with respect to sigma_x:')
gradient_x(layer_bf_gpu, use_gpu=True)
gradient_x(layer_bf_cpu, use_gpu=False)
print('Gradient with respect to sigma_y:')
gradient_y(layer_bf_gpu, use_gpu=True)
gradient_y(layer_bf_cpu, use_gpu=False)
print('Gradient with respect to sigma_z:')
gradient_z(layer_bf_gpu, use_gpu=True)
gradient_z(layer_bf_cpu, use_gpu=False)
print('Gradient with respect to sigma_range:')
gradient_r(layer_bf_gpu, use_gpu=True)
gradient_r(layer_bf_cpu, use_gpu=False)