forked from HyeongseokSon1/KPAC
-
Notifications
You must be signed in to change notification settings - Fork 1
/
model.py
363 lines (334 loc) · 25 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
#! /usr/bin/python
# -*- coding: utf8 -*-
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import *
def Defocus_Deblur_Net6_ms(t_image, ks = 5, bs=2, ch=48, is_train=False, reuse=False, hrg=128, wrg=128, name="deblur_net"):
w_init = tf.random_normal_initializer(stddev=0.04)
# w_init = tf.contrib.layers.xavier_initializer()
b_init = tf.constant_initializer(value=0.0)
g_init = tf.random_normal_initializer(1., 0.02)
lrelu = lambda x : tl.act.lrelu(x, 0.2)
with tf.variable_scope(name, reuse=reuse) as vs:
tl.layers.set_name_reuse(reuse)
n_ref = InputLayer(t_image, name='in')
n = n_ref
n = Conv2d(n, ch, (5, 5), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='deblur/c0')
n = Conv2d(n, ch, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='deblur/c0_2')
f1 = n
n = Conv2d(n, ch, (3, 3), (2, 2), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='deblur/c2')
n = Conv2d(n, ch, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='deblur/c2_2')
f2 = n
n = Conv2d(n, ch*2, (3, 3), (2, 2), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='deblur/c3')
n = Conv2d(n, ch*2, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='deblur/c3_2')
temp1 = n
stack = n
## pre residual blocks
for i in range(bs):
with tf.variable_scope(name, reuse=reuse):
tl.layers.set_name_reuse(reuse)
nn1 = AtrousConv2dLayer(n, ch, (ks, ks), rate=1, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=None, name='dc1/%s' % (i))
with tf.variable_scope(name, reuse=True):
tl.layers.set_name_reuse(True)
nn2 = AtrousConv2dLayer(n, ch, (ks, ks), rate=2, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=None, name='dc1/%s' % (i))
nn3 = AtrousConv2dLayer(n, ch, (ks, ks), rate=3, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=None, name='dc1/%s' % (i))
nn4 = AtrousConv2dLayer(n, ch, (ks, ks), rate=4, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=None, name='dc1/%s' % (i))
nn5 = AtrousConv2dLayer(n, ch, (ks, ks), rate=5, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=None, name='dc1/%s' % (i))
with tf.variable_scope(name, reuse=reuse):
tl.layers.set_name_reuse(reuse)
nn = ConcatLayer([nn1,nn2,nn3,nn4,nn5], 3, 'concat/%s' % (i))
## scale attention (spatially varying)
# n_sc = MeanPool2d(n, filter_size=(8, 8), strides=(8, 8), padding='SAME', name='sca_pool/%s' % (i))
n_sc = AtrousConv2dLayer(n, 32, (5, 5), rate=2, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='sca_dc1/%s' % (i))
n_sc = AtrousConv2dLayer(n_sc, 32, (5, 5), rate=2, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='sca_dc2/%s' % (i))
n_sc = AtrousConv2dLayer(n_sc, 16, (5, 5), rate=2, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='sca_dc3/%s' % (i))
n_sc = AtrousConv2dLayer(n_sc, 16, (5, 5), rate=2, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='sca_dc4/%s' % (i))
n_sc = Conv2d(n_sc, 5, (5, 5), (1, 1), act=tf.nn.sigmoid, padding='SAME', W_init=w_init,b_init=b_init, name='sca_c2/%s' % (i))
# n_sc.outputs = tf.nn.softmax(n_sc.outputs)
fa = n_sc
n_sc = TransposeLayer(n_sc, [0,1,3,2],'sca_trans/%s' % (i))
n_sc = UpSampling2dLayer(n_sc, [1,ch], method=1, name='sca_up/%s' % (i))
n_sc = TransposeLayer(n_sc, [0,1,3,2],'sca_trans_inv/%s' % (i))
# n_sc = UpSampling2dLayer(n_sc, [hrg/4,wrg/4], is_scale=False, method=1, align_corners=False, name='sca_attention/%s' % (i))
nn = ElementwiseLayer([nn, n_sc], tf.multiply, 'sca_attention_mul/%s' % (i))
## shape attention (global, shared)
n_sh = MeanPool2d(n, filter_size=(hrg/4, wrg/4), strides=(hrg/4, wrg/4), padding='SAME', name='sha_pool/%s' % (i))
n_sh = Conv2d(n_sh, ch/4, (1, 1), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='sha_c1/%s' % (i))
n_sh = Conv2d(n_sh, ch, (1, 1), (1, 1), act=tf.nn.sigmoid, padding='SAME', W_init=w_init,b_init=b_init, name='sha_c2/%s' % (i))
n_sh = TileLayer(n_sh, [1,1,1,5], name='sha_tile/%s' % (i))
n_sh = UpSampling2dLayer(n_sh, [hrg/4,wrg/4], is_scale=False, method=1, align_corners=False, name='sha_attention/%s' % (i))
nn = ElementwiseLayer([nn, n_sh], tf.multiply, 'sha_attention_mul/%s' % (i))
nn = Conv2d(nn, ch*2, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='cf/%s' % (i))
# nn = ElementwiseLayer([n, nn], tf.add, name = 'residual_add/%s' % (i))
n = nn
stack = ConcatLayer([stack, n], 3, name = 'dense_concat/%s' % (i))
# stack = n
with tf.variable_scope(name, reuse=reuse):
tl.layers.set_name_reuse(reuse)
n = Conv2d(stack, ch*2, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='deblur/cm_1')
n = Conv2d(n, ch*2, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='deblur/cm_2')
n = DeConv2d(n, ch, (4, 4), (hrg/2, wrg/2), (2, 2), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='deblur/d1')
n = ConcatLayer([n, f2], 3, name='pre/s1')
n = Conv2d(n, ch, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='deblur/d1_2')
n = DeConv2d(n, ch, (4, 4), (hrg, wrg), (2, 2), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='deblur/d2')
n = ConcatLayer([n, f1], 3, name='pre/s2')
n = Conv2d(n, 3, (5, 5), (1, 1), act=None, padding='SAME', W_init=w_init, b_init=b_init, name='deblur/d2_2')
n = ElementwiseLayer([n, n_ref], tf.add, name='post/s3')
return n
def Defocus_Deblur_Net6_ds(t_image, ks = 5, bs = 2, is_train=False, reuse=False, hrg=128, wrg=128, name="deblur_net"):
w_init = tf.random_normal_initializer(stddev=0.04)
# w_init = tf.contrib.layers.xavier_initializer()
b_init = tf.constant_initializer(value=0.0)
g_init = tf.random_normal_initializer(1., 0.02)
lrelu = lambda x : tl.act.lrelu(x, 0.2)
with tf.variable_scope(name, reuse=reuse) as vs:
tl.layers.set_name_reuse(reuse)
n_ref = InputLayer(t_image, name='in')
n = n_ref
n = Conv2d(n, 48, (5, 5), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='deblur/c0')
n = Conv2d(n, 48, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='deblur/c0_2')
f1 = n
n = Conv2d(n, 48, (3, 3), (2, 2), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='deblur/c2')
n = Conv2d(n, 48, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='deblur/c2_2')
f2 = n
n = Conv2d(n, 96, (3, 3), (2, 2), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='deblur/c3')
n = Conv2d(n, 96, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='deblur/c3_2')
f3 = n
n = Conv2d(n, 96, (3, 3), (2, 2), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='deblur/c4')
n = Conv2d(n, 96, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='deblur/c4_2')
temp1 = n
stack = n
## pre residual blocks
for i in range(bs):
with tf.variable_scope(name, reuse=reuse):
tl.layers.set_name_reuse(reuse)
nn1 = AtrousConv2dLayer(n, 48, (ks, ks), rate=1, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=None, name='dc1/%s' % (i))
with tf.variable_scope(name, reuse=True):
tl.layers.set_name_reuse(True)
nn2 = AtrousConv2dLayer(n, 48, (ks, ks), rate=2, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=None, name='dc1/%s' % (i))
nn3 = AtrousConv2dLayer(n, 48, (ks, ks), rate=3, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=None, name='dc1/%s' % (i))
nn4 = AtrousConv2dLayer(n, 48, (ks, ks), rate=4, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=None, name='dc1/%s' % (i))
nn5 = AtrousConv2dLayer(n, 48, (ks, ks), rate=5, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=None, name='dc1/%s' % (i))
with tf.variable_scope(name, reuse=reuse):
tl.layers.set_name_reuse(reuse)
nn = ConcatLayer([nn1,nn2,nn3,nn4,nn5], 3, 'concat/%s' % (i))
## scale attention (spatially varying)
# n_sc = MeanPool2d(n, filter_size=(8, 8), strides=(8, 8), padding='SAME', name='sca_pool/%s' % (i))
n_sc = AtrousConv2dLayer(n, 32, (5, 5), rate=2, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='sca_dc1/%s' % (i))
n_sc = AtrousConv2dLayer(n_sc, 32, (5, 5), rate=2, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='sca_dc2/%s' % (i))
n_sc = AtrousConv2dLayer(n_sc, 16, (5, 5), rate=2, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='sca_dc3/%s' % (i))
n_sc = AtrousConv2dLayer(n_sc, 16, (5, 5), rate=2, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='sca_dc4/%s' % (i))
n_sc = Conv2d(n_sc, 5, (5, 5), (1, 1), act=tf.nn.sigmoid, padding='SAME', W_init=w_init,b_init=b_init, name='sca_c2/%s' % (i))
# n_sc.outputs = tf.nn.softmax(n_sc.outputs)
fa = n_sc
n_sc = TransposeLayer(n_sc, [0,1,3,2],'sca_trans/%s' % (i))
n_sc = UpSampling2dLayer(n_sc, [1,48], method=1, name='sca_up/%s' % (i))
n_sc = TransposeLayer(n_sc, [0,1,3,2],'sca_trans_inv/%s' % (i))
# n_sc = UpSampling2dLayer(n_sc, [hrg/4,wrg/4], is_scale=False, method=1, align_corners=False, name='sca_attention/%s' % (i))
nn = ElementwiseLayer([nn, n_sc], tf.multiply, 'sca_attention_mul/%s' % (i))
## shape attention (global, shared)
n_sh = MeanPool2d(n, filter_size=(hrg/8, wrg/8), strides=(hrg/4, wrg/4), padding='SAME', name='sha_pool/%s' % (i))
n_sh = Conv2d(n_sh, 12, (1, 1), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='sha_c1/%s' % (i))
n_sh = Conv2d(n_sh, 48, (1, 1), (1, 1), act=tf.nn.sigmoid, padding='SAME', W_init=w_init,b_init=b_init, name='sha_c2/%s' % (i))
n_sh = TileLayer(n_sh, [1,1,1,5], name='sha_tile/%s' % (i))
n_sh = UpSampling2dLayer(n_sh, [hrg/8,wrg/8], is_scale=False, method=1, align_corners=False, name='sha_attention/%s' % (i))
nn = ElementwiseLayer([nn, n_sh], tf.multiply, 'sha_attention_mul/%s' % (i))
nn = Conv2d(nn, 96, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='cf/%s' % (i))
# nn = ElementwiseLayer([n, nn], tf.add, name = 'residual_add/%s' % (i))
n = nn
stack = ConcatLayer([stack, n], 3, name = 'dense_concat/%s' % (i))
# stack = n
with tf.variable_scope(name, reuse=reuse):
tl.layers.set_name_reuse(reuse)
n = Conv2d(stack, 96, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='deblur/cm_1')
n = Conv2d(n, 96, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='deblur/cm_2')
n = DeConv2d(n, 96, (4, 4), (hrg/4, wrg/4), (2, 2), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='deblur/d0')
n = ConcatLayer([n, f3], 3, name='pre/s0')
n = Conv2d(n, 96, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='deblur/d0_2')
n = DeConv2d(n, 48, (4, 4), (hrg/2, wrg/2), (2, 2), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='deblur/d1')
n = ConcatLayer([n, f2], 3, name='pre/s1')
n = Conv2d(n, 48, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='deblur/d1_2')
n = DeConv2d(n, 48, (4, 4), (hrg, wrg), (2, 2), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='deblur/d2')
n = ConcatLayer([n, f1], 3, name='pre/s2')
n = Conv2d(n, 3, (5, 5), (1, 1), act=None, padding='SAME', W_init=w_init, b_init=b_init, name='deblur/d2_2')
n = ElementwiseLayer([n, n_ref], tf.add, name='post/s3')
return n
def Defocus_Deblur_Net6_ds_dual(t_image_c, t_image_l, t_image_r, ks = 5, bs = 2, is_train=False, reuse=False, hrg=128, wrg=128, name="deblur_net"):
""" Generator in Deep Video deblurring
feature maps (n) and stride (s) feature maps (n) and stride (s)
"""
w_init = tf.random_normal_initializer(stddev=0.04)
# w_init = tf.contrib.layers.xavier_initializer()
b_init = tf.constant_initializer(value=0.0)
g_init = tf.random_normal_initializer(1., 0.02)
lrelu = lambda x : tl.act.lrelu(x, 0.2)
with tf.variable_scope(name, reuse=reuse) as vs:
tl.layers.set_name_reuse(reuse)
n_ref = InputLayer(t_image_c, name='in_c')
n_l = InputLayer(t_image_l, name='in_l')
n_r = InputLayer(t_image_r, name='in_r')
n = ConcatLayer([n_l,n_r], 3, name='concat_input')
n = Conv2d(n, 48, (5, 5), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='deblur/c0')
n = Conv2d(n, 48, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='deblur/c0_2')
f1 = n
n = Conv2d(n, 48, (3, 3), (2, 2), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='deblur/c2')
n = Conv2d(n, 48, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='deblur/c2_2')
f2 = n
n = Conv2d(n, 96, (3, 3), (2, 2), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='deblur/c3')
n = Conv2d(n, 96, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='deblur/c3_2')
f3 = n
n = Conv2d(n, 96, (3, 3), (2, 2), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='deblur/c4')
n = Conv2d(n, 96, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='deblur/c4_2')
temp1 = n
stack = n
## pre residual blocks
for i in range(bs):
with tf.variable_scope(name, reuse=reuse):
tl.layers.set_name_reuse(reuse)
nn1 = AtrousConv2dLayer(n, 48, (ks, ks), rate=1, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=None, name='dc1/%s' % (i))
with tf.variable_scope(name, reuse=True):
tl.layers.set_name_reuse(True)
nn2 = AtrousConv2dLayer(n, 48, (ks, ks), rate=2, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=None, name='dc1/%s' % (i))
nn3 = AtrousConv2dLayer(n, 48, (ks, ks), rate=3, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=None, name='dc1/%s' % (i))
nn4 = AtrousConv2dLayer(n, 48, (ks, ks), rate=4, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=None, name='dc1/%s' % (i))
nn5 = AtrousConv2dLayer(n, 48, (ks, ks), rate=5, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=None, name='dc1/%s' % (i))
with tf.variable_scope(name, reuse=reuse):
tl.layers.set_name_reuse(reuse)
nn = ConcatLayer([nn1,nn2,nn3,nn4,nn5], 3, 'concat/%s' % (i))
## scale attention (spatially varying)
# n_sc = MeanPool2d(n, filter_size=(8, 8), strides=(8, 8), padding='SAME', name='sca_pool/%s' % (i))
n_sc = AtrousConv2dLayer(n, 32, (5, 5), rate=2, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='sca_dc1/%s' % (i))
n_sc = AtrousConv2dLayer(n_sc, 32, (5, 5), rate=2, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='sca_dc2/%s' % (i))
n_sc = AtrousConv2dLayer(n_sc, 16, (5, 5), rate=2, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='sca_dc3/%s' % (i))
n_sc = AtrousConv2dLayer(n_sc, 16, (5, 5), rate=2, act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='sca_dc4/%s' % (i))
n_sc = Conv2d(n_sc, 5, (5, 5), (1, 1), act=tf.nn.sigmoid, padding='SAME', W_init=w_init,b_init=b_init, name='sca_c2/%s' % (i))
# n_sc.outputs = tf.nn.softmax(n_sc.outputs)
fa = n_sc
n_sc = TransposeLayer(n_sc, [0,1,3,2],'sca_trans/%s' % (i))
n_sc = UpSampling2dLayer(n_sc, [1,48], method=1, name='sca_up/%s' % (i))
n_sc = TransposeLayer(n_sc, [0,1,3,2],'sca_trans_inv/%s' % (i))
# n_sc = UpSampling2dLayer(n_sc, [hrg/4,wrg/4], is_scale=False, method=1, align_corners=False, name='sca_attention/%s' % (i))
nn = ElementwiseLayer([nn, n_sc], tf.multiply, 'sca_attention_mul/%s' % (i))
## shape attention (global, shared)
n_sh = MeanPool2d(n, filter_size=(hrg/8, wrg/8), strides=(hrg/4, wrg/4), padding='SAME', name='sha_pool/%s' % (i))
n_sh = Conv2d(n_sh, 12, (1, 1), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='sha_c1/%s' % (i))
n_sh = Conv2d(n_sh, 48, (1, 1), (1, 1), act=tf.nn.sigmoid, padding='SAME', W_init=w_init,b_init=b_init, name='sha_c2/%s' % (i))
n_sh = TileLayer(n_sh, [1,1,1,5], name='sha_tile/%s' % (i))
n_sh = UpSampling2dLayer(n_sh, [hrg/8,wrg/8], is_scale=False, method=1, align_corners=False, name='sha_attention/%s' % (i))
nn = ElementwiseLayer([nn, n_sh], tf.multiply, 'sha_attention_mul/%s' % (i))
nn = Conv2d(nn, 96, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init,b_init=b_init, name='cf/%s' % (i))
# nn = ElementwiseLayer([n, nn], tf.add, name = 'residual_add/%s' % (i))
n = nn
stack = ConcatLayer([stack, n], 3, name = 'dense_concat/%s' % (i))
# stack = n
with tf.variable_scope(name, reuse=reuse):
tl.layers.set_name_reuse(reuse)
n = Conv2d(stack, 96, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='deblur/cm_1')
n = Conv2d(n, 96, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='deblur/cm_2')
n = DeConv2d(n, 96, (4, 4), (hrg/4, wrg/4), (2, 2), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='deblur/d0')
n = ConcatLayer([n, f3], 3, name='pre/s0')
n = Conv2d(n, 96, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='deblur/d0_2')
n = DeConv2d(n, 48, (4, 4), (hrg/2, wrg/2), (2, 2), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='deblur/d1')
n = ConcatLayer([n, f2], 3, name='pre/s1')
n = Conv2d(n, 48, (3, 3), (1, 1), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='deblur/d1_2')
n = DeConv2d(n, 48, (4, 4), (hrg, wrg), (2, 2), act=tf.nn.leaky_relu, padding='SAME', W_init=w_init, b_init=b_init, name='deblur/d2')
n = ConcatLayer([n, f1], 3, name='pre/s2')
n = Conv2d(n, 3, (5, 5), (1, 1), act=None, padding='SAME', W_init=w_init, b_init=b_init, name='deblur/d2_2')
n = ElementwiseLayer([n, n_ref], tf.add, name='post/s3')
return n
def Vgg19_simple_api2(rgb, reuse):
"""
Build the VGG 19 Model
Parameters
-----------
rgb : rgb image placeholder [batch, height, width, 3] values scaled [0, 1]
"""
VGG_MEAN = [103.939, 116.779, 123.68]
with tf.variable_scope("VGG19", reuse=reuse) as vs:
tl.layers.set_name_reuse(reuse)
start_time = time.time()
print("build model started")
rgb = tf.maximum(0.0,tf.minimum(rgb,1.0))
rgb_scaled = rgb * 255.0
# Convert RGB to BGR
if tf.__version__ <= '0.11':
red, green, blue = tf.split(3, 3, rgb_scaled)
else: # TF 1.0
# print(rgb_scaled)
red, green, blue = tf.split(rgb_scaled, 3, 3)
# assert red.get_shape().as_list()[1:] == [224, 224, 1]
# assert green.get_shape().as_list()[1:] == [224, 224, 1]
# assert blue.get_shape().as_list()[1:] == [224, 224, 1]
if tf.__version__ <= '0.11':
bgr = tf.concat(3, [
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
])
else:
bgr = tf.concat([
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
], axis=3)
# assert bgr.get_shape().as_list()[1:] == [224, 224, 3]
""" input layer """
net_in = InputLayer(bgr, name='input')
""" conv1 """
network = Conv2d(net_in, n_filter=64, filter_size=(3, 3),
strides=(1, 1), act=tf.nn.relu,padding='SAME', name='conv1_1')
network = Conv2d(network, n_filter=64, filter_size=(3, 3),
strides=(1, 1), act=tf.nn.relu,padding='SAME', name='conv1_2')
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2),
padding='SAME', name='pool1')
""" conv2 """
network = Conv2d(network, n_filter=128, filter_size=(3, 3),
strides=(1, 1), act=tf.nn.relu,padding='SAME', name='conv2_1')
network = Conv2d(network, n_filter=128, filter_size=(3, 3),
strides=(1, 1), act=tf.nn.relu,padding='SAME', name='conv2_2')
conv2 = network
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2),
padding='SAME', name='pool2')
""" conv3 """
network = Conv2d(network, n_filter=256, filter_size=(3, 3),
strides=(1, 1), act=tf.nn.relu,padding='SAME', name='conv3_1')
network = Conv2d(network, n_filter=256, filter_size=(3, 3),
strides=(1, 1), act=tf.nn.relu,padding='SAME', name='conv3_2')
network = Conv2d(network, n_filter=256, filter_size=(3, 3),
strides=(1, 1), act=tf.nn.relu,padding='SAME', name='conv3_3')
network = Conv2d(network, n_filter=256, filter_size=(3, 3),
strides=(1, 1), act=tf.nn.relu,padding='SAME', name='conv3_4')
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2),
padding='SAME', name='pool3')
conv3 = network
""" conv4 """
network = Conv2d(network, n_filter=512, filter_size=(3, 3),
strides=(1, 1), act=tf.nn.relu,padding='SAME', name='conv4_1')
network = Conv2d(network, n_filter=512, filter_size=(3, 3),
strides=(1, 1), act=tf.nn.relu,padding='SAME', name='conv4_2')
network = Conv2d(network, n_filter=512, filter_size=(3, 3),
strides=(1, 1), act=tf.nn.relu,padding='SAME', name='conv4_3')
network = Conv2d(network, n_filter=512, filter_size=(3, 3),
strides=(1, 1), act=tf.nn.relu,padding='SAME', name='conv4_4')
conv4 = network
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2),
padding='SAME', name='pool4') # (batch_size, 14, 14, 512)
""" conv5 """
network = Conv2d(network, n_filter=512, filter_size=(3, 3),
strides=(1, 1), act=tf.nn.relu,padding='SAME', name='conv5_1')
network = Conv2d(network, n_filter=512, filter_size=(3, 3),
strides=(1, 1), act=tf.nn.relu,padding='SAME', name='conv5_2')
network = Conv2d(network, n_filter=512, filter_size=(3, 3),
strides=(1, 1), act=tf.nn.relu,padding='SAME', name='conv5_3')
network = Conv2d(network, n_filter=512, filter_size=(3, 3),
strides=(1, 1), act=tf.nn.relu,padding='SAME', name='conv5_4')
conv5 = network
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2),
padding='SAME', name='pool5') # (batch_size, 7, 7, 512)
""" fc 6~8 """
# network = FlattenLayer(network, name='flatten')
# network = DenseLayer(network, n_units=4096, act=tf.nn.relu, name='fc6')
# network = DenseLayer(network, n_units=4096, act=tf.nn.relu, name='fc7')
# network = DenseLayer(network, n_units=1000, act=tf.identity, name='fc8')
print("build model finished: %fs" % (time.time() - start_time))
return network, conv4