-
Notifications
You must be signed in to change notification settings - Fork 1
/
example_optimization.py
92 lines (78 loc) · 2.77 KB
/
example_optimization.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
"""
Example script: Optimizing the parameters of a joint bilateral filter layer for image denoising.
Author: Fabian Wagner
Contact: fabian.wagner@fau.de
"""
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from joint_bilateral_filter_layer import JointBilateralFilter3d
import time
from skimage.data import camera
#############################################################
#### PARAMETERS (to be modified) ####
#############################################################
# Set device.
use_gpu = True
# Filter parameter initialization.
sigma_x = 1.0
sigma_y = 1.0
sigma_z = 1.0
sigma_r = 0.01
# Image parameters.
downsample_factor = 2
n_slices = 1
# Training parameters.
n_epochs = 1000
#############################################################
if use_gpu:
dev = "cuda"
else:
dev = "cpu"
# Initialize filter layer.
layer_JBF = JointBilateralFilter3d(sigma_x, sigma_y, sigma_z, sigma_r, use_gpu=use_gpu)
# Load cameraman image.
image = camera()[::downsample_factor, ::downsample_factor]
target = torch.tensor(image).unsqueeze(2).repeat(1, 1, n_slices).unsqueeze(0).unsqueeze(0)
target = target / torch.max(target)
# Prepare noisy input.
noise = 0.1 * torch.randn(target.shape)
tensor_in = (target + noise).to(dev)
target = target.to(dev)
tensor_in.requires_grad = True
print("Input shape: {}".format(tensor_in.shape))
# Prepare guidance input.
noise_guidance = 0.1 * torch.randn(target.shape).to(dev)
tensor_guidance = (target + noise_guidance).to(dev)
tensor_guidance.requires_grad = True
# Define optimizer and loss.
optimizer = optim.Adam(layer_JBF.parameters(), lr=0.1)
loss_function = nn.MSELoss()
# Training loop.
for i in range(n_epochs):
optimizer.zero_grad()
prediction = layer_JBF(tensor_in, tensor_guidance)
loss = loss_function(prediction, target)
loss.backward()
optimizer.step()
print("Sigma x: {}".format(layer_JBF.sigma_x))
print("Sigma y: {}".format(layer_JBF.sigma_y))
print("Sigma z: {}".format(layer_JBF.sigma_z))
print("Sigma range: {}".format(layer_JBF.color_sigma))
# Visual results.
vmin_img = 0
vmax_img = 1
idx_center = int(tensor_in.shape[4] / 2)
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(7, 3))
axes[0].imshow(tensor_in[0, 0, :, :, idx_center].detach().cpu(), vmin=vmin_img, vmax=vmax_img, cmap='gray')
axes[0].set_title('Noisy input', fontsize=14)
axes[0].axis('off')
axes[1].imshow(prediction[0, 0, :, :, idx_center].detach().cpu(), vmin=vmin_img, vmax=vmax_img, cmap='gray')
axes[1].set_title('Filtered output', fontsize=14)
axes[1].axis('off')
axes[2].imshow(target[0, 0, :, :, idx_center].detach().cpu(), vmin=vmin_img, vmax=vmax_img, cmap='gray')
axes[2].set_title('Ground truth', fontsize=14)
axes[2].axis('off')
# plt.savefig('out/example_optimization.png')
plt.show()