forked from RyanZotti/Self-Driving-Car
-
Notifications
You must be signed in to change notification settings - Fork 0
/
train_mlp.py
51 lines (40 loc) · 1.69 KB
/
train_mlp.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import tensorflow as tf
from Trainer import Trainer, parse_args
import os
from model import *
args = parse_args()
data_path = args["datapath"]
epochs = args["epochs"]
s3_bucket = args['s3_bucket']
show_speed = args['show_speed']
s3_sync = args['s3_sync']
sess = tf.InteractiveSession(config=tf.ConfigProto())
x = tf.placeholder(tf.float32, shape=[None, 240, 320, 3], name='x')
y_ = tf.placeholder(tf.float32, shape=[None, 3], name='y_')
x_shaped = tf.reshape(x, [-1, 240 * 320 * 3])
W1 = weight_variable('layer1',[240 * 320 * 3, 32])
b1 = bias_variable('layer1',[32])
h1 = tf.tanh(tf.matmul(x_shaped, W1) + b1)
W2 = weight_variable('layer2',[32, 32])
b2 = bias_variable('layer2',[32])
h2 = tf.tanh(tf.matmul(h1, W2) + b2)
W3 = weight_variable('layer3',[32, 3])
b3 = bias_variable('layer3',[3])
logits = tf.add(tf.matmul(h2, W3), b3, name='logits')
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_))
train_step = tf.train.AdamOptimizer(1e-5,name='train_step').minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),name='accuracy')
model_file = os.path.dirname(os.path.realpath(__file__)) + '/' + os.path.basename(__file__)
trainer = Trainer(data_path=data_path,
model_file=model_file,
s3_bucket=s3_bucket,
epochs=epochs,
max_sample_records=1000,
show_speed=show_speed,
s3_sync=s3_sync)
trainer.train(sess=sess, x=x, y_=y_,
accuracy=accuracy,
train_step=train_step,
train_feed_dict={},
test_feed_dict={})