-
Notifications
You must be signed in to change notification settings - Fork 2
/
NN.py
executable file
·36 lines (29 loc) · 1.53 KB
/
NN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import math
import tensorflow as tf
def weight(name, shape, init='he'):
assert init == 'he' and len(shape) == 2
var = tf.get_variable(name, shape, initializer=tf.random_normal_initializer(stddev=math.sqrt(2.0 / shape[0])))
tf.add_to_collection('l2', tf.nn.l2_loss(var))
return var
def bias(name, dim, initial_value=1e-2):
return tf.get_variable(name, dim, initializer=tf.constant_initializer(initial_value))
def embedding(name, shape):
var = tf.get_variable(name, shape, initializer=tf.random_uniform_initializer(minval=-1.0 / shape[1], maxval=1.0 / shape[1]))
tf.add_to_collection('l2', tf.nn.l2_loss(var))
return var
def fully_connected(input, num_neurons, name, activation='elu'):
func = {'linear': tf.identity, 'sigmoid': tf.nn.sigmoid, 'tanh': tf.nn.tanh, 'relu': tf.nn.relu, 'elu': tf.nn.elu}
W = weight(name + '_W', [input.get_shape().as_list()[1], num_neurons], init='he')
l = tf.matmul(input, W) + bias(name + '_b', num_neurons)
return func[activation](l)
def dropout(x, keep_prob, training):
return tf.cond(training, lambda: tf.nn.dropout(x, keep_prob), lambda: tf.identity(x))
# assumes 2d input, first dimension if batch size
def batch_normalization(x, name):
with tf.variable_scope('BN'):
shape = x.get_shape()
beta = tf.get_variable(name + '_beta', shape[-1], initializer=tf.constant_initializer(0.))
gamma = tf.get_variable(name + '_gamma', shape[-1], initializer=tf.constant_initializer(1.))
mean, variance = tf.nn.moments(x, [0])
normed = tf.nn.batch_normalization(x, mean, variance, beta, gamma, 1e-3)
return normed