-
Notifications
You must be signed in to change notification settings - Fork 1
/
ops.py
115 lines (96 loc) · 4.37 KB
/
ops.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import tensorflow as tf
def _variable(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the Variable
shape: list of ints
initializer: initializer of Variable
Returns:
Variable Tensor
"""
var = tf.get_variable(name, shape, initializer=initializer, dtype=tf.float32)
return var
def _variable_with_weight_decay(name, shape, stddev, wd=0.001):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with truncated normal distribution
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard devision of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable(name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=tf.float32))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def conv2d(scope, input, kernel_size, stride=1, dilation=1, relu=True, wd=0.001):
"""convolutional layer
Args:
scope: string, variable_scope name
input: 4-D tensor [batch_size, height, width, depth]
kernel_size: 4-D tensor [k_height, k_width, in_channel, out_channel]
stride: int32, the stride of the convolution
dilation: int32, if >1, dilation will be used
relu: boolean, if relu is applied to the output of the current layer
wd: float32, weight decay/regularization coefficient
Return:
output: 4-D tensor [batch_size, height * stride, width * stride, out_channel]
"""
with tf.variable_scope(scope) as scope:
kernel = _variable_with_weight_decay('weights',
shape=kernel_size,
stddev=5e-2,
wd=wd)
if dilation == 1:
conv = tf.nn.conv2d(input, kernel, [1, stride, stride, 1], padding='SAME')
else:
conv = tf.nn.atrous_conv2d(input, kernel, dilation, padding='SAME')
biases = _variable('biases', kernel_size[3:], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
if relu:
conv1 = tf.nn.relu(bias)
else:
conv1 = bias
return conv1
def deconv2d(scope, input, kernel_size, stride=1, wd=0.001):
"""deconvolutional layer for upsampling
Args:
scope: string, variable_scope name
input: 4-D tensor [batch_size, height, width, depth]
kernel_size: 4-D tensor [k_height, k_width, in_channel, out_channel]
stride: int32, the stride of the convolution
wd: float32, weight decay/regularization coefficient
Return:
output: 4-D tensor [batch_size, height * stride, width * stride, out_channel]
"""
batch_size, height, width, in_channel = [int(i) for i in input.get_shape()]
out_channel = kernel_size[3]
kernel_size = [kernel_size[0], kernel_size[1], kernel_size[3], kernel_size[2]]
output_shape = [batch_size, height * stride, width * stride, out_channel]
with tf.variable_scope(scope) as scope:
kernel = _variable_with_weight_decay('weights',
shape=kernel_size,
stddev=5e-2,
wd=wd)
deconv = tf.nn.conv2d_transpose(input, kernel, output_shape, [1, stride, stride, 1], padding='SAME')
biases = _variable('biases', (out_channel), tf.constant_initializer(0.0))
bias = tf.nn.bias_add(deconv, biases)
deconv1 = tf.nn.relu(bias)
return deconv1
def batch_norm(scope, input, train=True, reuse=False):
"""Batch Normalization Layer
Args:
scope: string, variable_scope name
input: 4-D tensor [batch_size, height, width, depth]
train: boolean, if it is training
reuse: boolean, if it is reused
Return:
output: 4-D tensor [batch_size, height * stride, width * stride, out_channel]
"""
return tf.contrib.layers.batch_norm(input, center=True, scale=True, updates_collections=None,
is_training=train, trainable=True, scope=scope)