forked from eriklindernoren/ML-From-Scratch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
convolutional_neural_network.py
87 lines (67 loc) · 2.84 KB
/
convolutional_neural_network.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
from __future__ import print_function
from sklearn import datasets
import matplotlib.pyplot as plt
import math
import numpy as np
# Import helper functions
from mlfromscratch.deep_learning import NeuralNetwork
from mlfromscratch.utils import train_test_split, to_categorical, normalize
from mlfromscratch.utils import get_random_subsets, shuffle_data, Plot
from mlfromscratch.utils.data_operation import accuracy_score
from mlfromscratch.deep_learning.optimizers import StochasticGradientDescent, Adam, RMSprop, Adagrad, Adadelta
from mlfromscratch.deep_learning.loss_functions import CrossEntropy
from mlfromscratch.utils.misc import bar_widgets
from mlfromscratch.deep_learning.layers import Dense, Dropout, Conv2D, Flatten, Activation, MaxPooling2D
from mlfromscratch.deep_learning.layers import AveragePooling2D, ZeroPadding2D, BatchNormalization, RNN
def main():
#----------
# Conv Net
#----------
optimizer = Adam()
data = datasets.load_digits()
X = data.data
y = data.target
# Convert to one-hot encoding
y = to_categorical(y.astype("int"))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, seed=1)
# Reshape X to (n_samples, channels, height, width)
X_train = X_train.reshape((-1,1,8,8))
X_test = X_test.reshape((-1,1,8,8))
clf = NeuralNetwork(optimizer=optimizer,
loss=CrossEntropy,
validation_data=(X_test, y_test))
clf.add(Conv2D(n_filters=16, filter_shape=(3,3), stride=1, input_shape=(1,8,8), padding='same'))
clf.add(Activation('relu'))
clf.add(Dropout(0.25))
clf.add(BatchNormalization())
clf.add(Conv2D(n_filters=32, filter_shape=(3,3), stride=1, padding='same'))
clf.add(Activation('relu'))
clf.add(Dropout(0.25))
clf.add(BatchNormalization())
clf.add(Flatten())
clf.add(Dense(256))
clf.add(Activation('relu'))
clf.add(Dropout(0.4))
clf.add(BatchNormalization())
clf.add(Dense(10))
clf.add(Activation('softmax'))
print ()
clf.summary(name="ConvNet")
train_err, val_err = clf.fit(X_train, y_train, n_epochs=50, batch_size=256)
# Training and validation error plot
n = len(train_err)
training, = plt.plot(range(n), train_err, label="Training Error")
validation, = plt.plot(range(n), val_err, label="Validation Error")
plt.legend(handles=[training, validation])
plt.title("Error Plot")
plt.ylabel('Error')
plt.xlabel('Iterations')
plt.show()
_, accuracy = clf.test_on_batch(X_test, y_test)
print ("Accuracy:", accuracy)
y_pred = np.argmax(clf.predict(X_test), axis=1)
X_test = X_test.reshape(-1, 8*8)
# Reduce dimension to 2D using PCA and plot the results
Plot().plot_in_2d(X_test, y_pred, title="Convolutional Neural Network", accuracy=accuracy, legend_labels=range(10))
if __name__ == "__main__":
main()