Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
vinayakumarr authored Mar 30, 2018
1 parent 24a1318 commit 159e89d
Show file tree
Hide file tree
Showing 14 changed files with 3,380 additions and 0 deletions.
126 changes: 126 additions & 0 deletions Bidirectioanl GRU.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Bidirectional GRU"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from __future__ import print_function\n",
"from sklearn.cross_validation import train_test_split\n",
"import pandas as pd\n",
"import numpy as np\n",
"np.random.seed(1337) # for reproducibility\n",
"from keras.preprocessing import sequence\n",
"from keras.utils import np_utils\n",
"from keras.models import Sequential\n",
"from keras.layers import Dense, Dropout, Activation, Embedding\n",
"from keras.layers import Dense, Dropout, Embedding, LSTM, Input, Bidirectional\n",
"from keras.datasets import imdb\n",
"from keras.utils.np_utils import to_categorical\n",
"from sklearn.metrics import (precision_score, recall_score,\n",
" f1_score, accuracy_score,mean_squared_error,mean_absolute_error)\n",
"from sklearn import metrics\n",
"from sklearn.preprocessing import Normalizer\n",
"import h5py\n",
"from keras import callbacks"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dataset = np.loadtxt(\"pima-indians-diabetes.csv\", delimiter=\",\")\n",
"# split into input (X) and output (Y) variables\n",
"X = dataset[:,0:8]\n",
"Y = dataset[:,8]\n",
"\n",
"#normalize the data\n",
"scaler = Normalizer().fit(X)\n",
"X = scaler.transform(X)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=42)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# reshape input to be [samples, time steps, features]\n",
"X_train = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))\n",
"X_test = np.reshape(testT, (testT.shape[0], 1, testT.shape[1]))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 1. define the network\n",
"model = Sequential()\n",
"model.add(Bidirectional(GRU(4),input_shape=(1, 8)))\n",
"model.add(Dropout(0.1))\n",
"model.add(Dense(1))\n",
"model.add(Activation('sigmoid'))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# try using different optimizers and different optimizer configs\n",
"model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])\n",
"checkpointer = callbacks.ModelCheckpoint(filepath=\"logs/bidirectioanl-gru/checkpoint-{epoch:02d}.hdf5\", verbose=1, save_best_only=True, monitor='val_acc',mode='max')\n",
"model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=1000, validation_data=(X_test, y_test),callbacks=[checkpointer])\n",
"model.save(\"logs/bidirectioanl-gru/lstm1layer_model.hdf5\")\n",
"\n",
"loss, accuracy = model.evaluate(X_test, y_test)\n",
"print(\"\\nLoss: %.2f, Accuracy: %.2f%%\" % (loss, accuracy*100))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "securetensor",
"language": "python",
"name": "securetensor"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.12"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
126 changes: 126 additions & 0 deletions Bidirectional-LSTM.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Bidirectional LSTM"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from __future__ import print_function\n",
"from sklearn.cross_validation import train_test_split\n",
"import pandas as pd\n",
"import numpy as np\n",
"np.random.seed(1337) # for reproducibility\n",
"from keras.preprocessing import sequence\n",
"from keras.utils import np_utils\n",
"from keras.models import Sequential\n",
"from keras.layers import Dense, Dropout, Activation, Embedding\n",
"from keras.layers import Dense, Dropout, Embedding, LSTM, Input, Bidirectional\n",
"from keras.datasets import imdb\n",
"from keras.utils.np_utils import to_categorical\n",
"from sklearn.metrics import (precision_score, recall_score,\n",
" f1_score, accuracy_score,mean_squared_error,mean_absolute_error)\n",
"from sklearn import metrics\n",
"from sklearn.preprocessing import Normalizer\n",
"import h5py\n",
"from keras import callbacks"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dataset = np.loadtxt(\"pima-indians-diabetes.csv\", delimiter=\",\")\n",
"# split into input (X) and output (Y) variables\n",
"X = dataset[:,0:8]\n",
"Y = dataset[:,8]\n",
"\n",
"#normalize the data\n",
"scaler = Normalizer().fit(X)\n",
"X = scaler.transform(X)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=42)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# reshape input to be [samples, time steps, features]\n",
"X_train = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))\n",
"X_test = np.reshape(testT, (testT.shape[0], 1, testT.shape[1]))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 1. define the network\n",
"model = Sequential()\n",
"model.add(Bidirectional(LSTM(4),input_shape=(1, 8)))\n",
"model.add(Dropout(0.1))\n",
"model.add(Dense(1))\n",
"model.add(Activation('sigmoid'))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# try using different optimizers and different optimizer configs\n",
"model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])\n",
"checkpointer = callbacks.ModelCheckpoint(filepath=\"logs/bidirectioanl-lstm/checkpoint-{epoch:02d}.hdf5\", verbose=1, save_best_only=True, monitor='val_acc',mode='max')\n",
"model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=1000, validation_data=(X_test, y_test),callbacks=[checkpointer])\n",
"model.save(\"logs/bidirectioanl-lstm/lstm1layer_model.hdf5\")\n",
"\n",
"loss, accuracy = model.evaluate(X_test, y_test)\n",
"print(\"\\nLoss: %.2f, Accuracy: %.2f%%\" % (loss, accuracy*100))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "securetensor",
"language": "python",
"name": "securetensor"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.12"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
126 changes: 126 additions & 0 deletions Bidirectional-RNN.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Bidirectioanl RNN"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from __future__ import print_function\n",
"from sklearn.cross_validation import train_test_split\n",
"import pandas as pd\n",
"import numpy as np\n",
"np.random.seed(1337) # for reproducibility\n",
"from keras.preprocessing import sequence\n",
"from keras.utils import np_utils\n",
"from keras.models import Sequential\n",
"from keras.layers import Dense, Dropout, Activation, Embedding\n",
"from keras.layers import Dense, Dropout, Embedding, LSTM, Input, Bidirectional\n",
"from keras.datasets import imdb\n",
"from keras.utils.np_utils import to_categorical\n",
"from sklearn.metrics import (precision_score, recall_score,\n",
" f1_score, accuracy_score,mean_squared_error,mean_absolute_error)\n",
"from sklearn import metrics\n",
"from sklearn.preprocessing import Normalizer\n",
"import h5py\n",
"from keras import callbacks"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dataset = np.loadtxt(\"pima-indians-diabetes.csv\", delimiter=\",\")\n",
"# split into input (X) and output (Y) variables\n",
"X = dataset[:,0:8]\n",
"Y = dataset[:,8]\n",
"\n",
"#normalize the data\n",
"scaler = Normalizer().fit(X)\n",
"X = scaler.transform(X)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=42)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# reshape input to be [samples, time steps, features]\n",
"X_train = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))\n",
"X_test = np.reshape(testT, (testT.shape[0], 1, testT.shape[1]))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 1. define the network\n",
"model = Sequential()\n",
"model.add(Bidirectional(SimpleRNN(4),input_shape=(1, 8)))\n",
"model.add(Dropout(0.1))\n",
"model.add(Dense(1))\n",
"model.add(Activation('sigmoid'))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# try using different optimizers and different optimizer configs\n",
"model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])\n",
"checkpointer = callbacks.ModelCheckpoint(filepath=\"logs/bidirectioanl-rnn/checkpoint-{epoch:02d}.hdf5\", verbose=1, save_best_only=True, monitor='val_acc',mode='max')\n",
"model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=1000, validation_data=(X_test, y_test),callbacks=[checkpointer])\n",
"model.save(\"logs/bidirectioanl-rnn/lstm1layer_model.hdf5\")\n",
"\n",
"loss, accuracy = model.evaluate(X_test, y_test)\n",
"print(\"\\nLoss: %.2f, Accuracy: %.2f%%\" % (loss, accuracy*100))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "securetensor",
"language": "python",
"name": "securetensor"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.12"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Loading

0 comments on commit 159e89d

Please sign in to comment.