-
Notifications
You must be signed in to change notification settings - Fork 2
/
neuralnet_tf_eager.py
93 lines (66 loc) · 2.69 KB
/
neuralnet_tf_eager.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import tensorflow as tf
from tensorflow.contrib.eager.python import tfe
from filereader import Filereader
tf.enable_eager_execution()
tf.executing_eagerly()
f = Filereader('./data/')
x_train, y_train, r,c = f.getData(dataset="training", sample=60000)
x_test, y_test, r,c = f.getData(dataset="testing", sample=10000)
learning_rate = 0.001
training_epochs = 100
elu_alpha = 1.2
epochs = 100
batch_size = 250
l2_reg = 1e-4
layer_1_nodes = 200
layer_2_nodes = 200
output_layer_nodes = y_train.shape[1]
# Eager Batching
dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).\
shuffle(buffer_size=batch_size).\
batch(batch_size=batch_size).\
repeat()
# model
class MNISTModel(tf.keras.Model):
def __init__(self):
super(MNISTModel, self).__init__()
self.dense1 = tf.layers.Dense(units=layer_1_nodes, kernel_initializer=tf.keras.initializers.he_normal(seed=1), activation=tf.nn.elu, kernel_regularizer=tf.keras.regularizers.l2())
self.dense2 = tf.layers.Dense(units=layer_2_nodes, kernel_initializer=tf.keras.initializers.he_normal(seed=1), activation=tf.nn.elu, kernel_regularizer=tf.keras.regularizers.l2())
self.dense3 = tf.layers.Dense(units=output_layer_nodes, kernel_initializer=tf.keras.initializers.he_normal(seed=1), kernel_regularizer=tf.keras.regularizers.l2())
def call(self, input):
result = self.dense1(input)
result = self.dense2(result)
result = self.dense3(result)
return result
model = MNISTModel()
def predict(model, inputs):
return tf.nn.softmax(model(inputs))
def loss(model, inputs, targets):
prediction = model(inputs)
return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction, labels=targets))
def accuracy(target, prediction):
return tf.reduce_mean(
tf.cast(
tf.equal(
tf.argmax(
target,
axis=1
),
tf.argmax(
prediction,
axis=1
)
),
dtype=tf.float32
)
)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
num_batches_per_epoch = x_train.shape[0] // batch_size
for epoch in range(epochs):
for batch, (x_batch, y_batch) in enumerate(tfe.Iterator(dataset)):
optimizer.minimize(lambda : loss(model, x_batch, y_batch),
global_step=tf.train.get_or_create_global_step())
if batch >= num_batches_per_epoch:
break
print("Training accuracy after step {:03d}: {:.5f}".format(epoch+1, accuracy(y_train, predict(model, x_train))))
print("Testing accuracy {:.5f}".format(accuracy(y_test, predict(model, x_test))))