-
Notifications
You must be signed in to change notification settings - Fork 17
/
tests.py
130 lines (121 loc) · 4.09 KB
/
tests.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
from three_layer_network import build_model, relu, relu_derivative, feed_forward, \
calculate_loss, backprop, train
#from three_layer_network import *
import matplotlib.pyplot as plt
from sklearn import datasets
import numpy as np
"""
to reproduce tests, modify the three_layer_network.py file by commenting out
'while done == True', and uncommenting 'while i < 150', and then by changing
'if i % 1000 == 0' to 'if i % 150 == 0'
"""
def num_observations():
obs_values = [10, 100, 1000]
nn_input_dim = 2 # input layer dimensionality
nn_output_dim = 2 # output layer dimensionality
learning_rate = 0.01 # learning rate for gradient descent
reg_lambda = 0.01 # regularization strength
losses_store = []
for i in obs_values:
X, y = datasets.make_moons(i, noise=0.1)
num_examples = len(X) # training set size
model = build_model(X,32,2)
model, losses = train(model,X, y, reg_lambda=reg_lambda, learning_rate=learning_rate)
losses_store.append(losses)
print losses
x = np.linspace(0,145,30)
for i in range(len(losses_store)):
lab = 'n_observations = ' + str(obs_values[i])
plt.plot(x,losses_store[i],label=lab)
plt.legend()
plt.show()
def noise():
noise_values = [0.01, 0.1, 0.2, 0.3, 0.4]
nn_input_dim = 2 # input layer dimensionality
nn_output_dim = 2 # output layer dimensionality
learning_rate = 0.01 # learning rate for gradient descent
reg_lambda = 0.01 # regularization strength
losses_store = []
for i in noise_values:
X, y = datasets.make_moons(200, noise=i)
num_examples = len(X) # training set size
model = build_model(X,32,2)
model, losses = train(model,X, y, reg_lambda=reg_lambda, learning_rate=learning_rate)
losses_store.append(losses)
print losses
x = np.linspace(0,145,30)
for i in range(len(losses_store)):
lab = 'noise_value = ' + str(noise_values[i])
plt.plot(x,losses_store[i],label=lab)
plt.legend()
plt.show()
def reg():
reg_values = [0.00, 0.01, 0.1, 0.2, 0.3]
nn_input_dim = 2 # input layer dimensionality
nn_output_dim = 2 # output layer dimensionality
learning_rate = 0.01 # learning rate for gradient descent
losses_store = []
for i in reg_values:
reg_lambda = i # regularization strength
X, y = datasets.make_moons(200, noise=0.2)
num_examples = len(X) # training set size
model = build_model(X,32,2)
model, losses = train(model,X, y, reg_lambda=reg_lambda, learning_rate=learning_rate)
losses_store.append(losses)
print losses
x = np.linspace(0,145,30)
for i in range(len(losses_store)):
lab = 'regularization_value = ' + str(reg_values[i])
plt.plot(x,losses_store[i],label=lab)
plt.legend()
plt.show()
def lr():
lr_values = [0.001, 0.01, 0.05]
nn_input_dim = 2 # input layer dimensionality
nn_output_dim = 2 # output layer dimensionality
reg_lambda = .01 # regularization strength
losses_store = []
for i in lr_values:
learning_rate = i
X, y = datasets.make_moons(200, noise=0.2)
num_examples = len(X) # training set size
model = build_model(X,32,2)
model, losses = train(model,X, y, reg_lambda=reg_lambda, learning_rate=learning_rate)
losses_store.append(losses)
print losses
x = np.linspace(0,145,30)
for i in range(len(losses_store)):
lab = 'learning rate = ' + str(lr_values[i])
plt.plot(x,losses_store[i],label=lab)
plt.legend()
plt.show()
def test_num_nodes():
X, y = datasets.make_moons(400, noise=0.2)
num_examples = len(X) # training set size
nn_input_dim = 2 # input layer dimensionality
nn_output_dim = 2 # output layer dimensionality
learning_rate = 0.01 # learning rate for gradient descent
reg_lambda = 0.01 # regularization strength
node_vals = [4,8,16,32,64,128]
losses_store = []
for val in node_vals:
model = build_model(X,val,2)
model, losses = train(model,X, y, reg_lambda=reg_lambda, learning_rate=learning_rate)
losses_store.append(losses)
print losses
x = np.linspace(0,145,30)
for i in range(len(losses_store)):
lab = 'n_nodes = ' + str(node_vals[i])
plt.plot(x,losses_store[i],label=lab)
plt.legend()
plt.show()
print "number of observations:"
num_observations()
print 'noise:'
noise()
print 'regularization:'
reg()
print 'learning rate:'
lr()
print 'hidden nodes:'
test_num_nodes()