-
Notifications
You must be signed in to change notification settings - Fork 0
/
Main.py
264 lines (220 loc) · 9.23 KB
/
Main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
#importing libraries
import os
import pickle
import numpy as np
from tqdm.notebook import tqdm
from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical, plot_model
from tensorflow.keras.layers import Input, Dense, LSTM, Embedding, Dropout,add
from PIL import Image
import matplotlib.pyplot as plt
#======================================================Loading vgg model
model=VGG16()
model=Model(inputs=model.inputs,outputs=model.layers[-2].output)
print(model.summary())
#===================================================== Dividing directories (This project was cerated in Kaggle, so directories are made accodingly)
base_dir='/kaggle/input/flickr8k'
working_dir='/kaggle/working'
features={}
directory=os.path.join(base_dir,'Images')
#===================================================== Extracting Features from the images
for img_name in tqdm(os.listdir(directory)):
img_path=directory+'/'+img_name
image=load_img(img_path,target_size=(224,224))
image=img_to_array(image)
image=image.reshape((1,image.shape[0],image.shape[1],image.shape[2]))
image=preprocess_input(image)
feature=model.predict(image,verbose=0)
image_id=img_name.split('.')[0]
features[image_id]=feature
#====================================================== Saving features
pickle.dump(features,open(os.path.join(working_dir,'features.pkl'),'wb'))
#====================================================== Loading Saved Features
with open(os.path.join(working_dir,'/kaggle/input/featuresarmaanaura-pkl/features.pkl'),'rb')as f:
features =pickle.load(f)
#======================================================= Loading Captions data
with open(os.path.join(base_dir,'captions.txt'),'r')as f:
next(f)
captions_doc=f.read()
mapping = {}
for line in tqdm(captions_doc.split('\n')):
tokens = line.split(',')
if len(line) < 2:
continue
image_id, caption = tokens[0], tokens[1:]
image_id = image_id.split('.')[0]
caption = " ".join(caption)
if image_id not in mapping:
mapping[image_id] = []
mapping[image_id].append(caption)
#====================================================== Cleaning the captions data
def clean(mapping):
for key, captions in mapping.items():
for i in range(len(captions)):
# take one caption at a time
caption = captions[i]
# preprocessing steps
# convert to lowercase
caption = caption.lower()
# delete digits, special chars, etc.,
caption = caption.replace('[^A-Za-z]', '')
# delete additional spaces
caption = caption.replace('\s+', ' ')
# add start and end tags to the caption
caption = 'startseq ' + " ".join([word for word in caption.split() if len(word)>1]) + ' endseq'
captions[i] = caption
all_captions = []
for key in mapping:
for caption in mapping[key]:
all_captions.append(caption)
#====================================================== Tokenizing the text
tokenizer = Tokenizer()
tokenizer.fit_on_texts(all_captions)
vocab_size = len(tokenizer.word_index) + 1
max_length = max(len(caption.split()) for caption in all_captions)
#====================================================== Train Test Split
image_ids = list(mapping.keys())
split = int(len(image_ids) * 0.90)
train = image_ids[:split]
test = image_ids[split:]
# =====================================================Creating data generator to get data in batch (avoids session crash)
def data_generator(data_keys, mapping, features, tokenizer, max_length, vocab_size, batch_size):
# loop over images
X1, X2, y = list(), list(), list()
n = 0
while 1:
for key in data_keys:
n += 1
captions = mapping[key]
# process each caption
for caption in captions:
# encode the sequence
seq = tokenizer.texts_to_sequences([caption])[0]
# split the sequence into X, y pairs
for i in range(1, len(seq)):
# split into input and output pairs
in_seq, out_seq = seq[:i], seq[i]
# pad input sequence
in_seq = pad_sequences([in_seq], maxlen=max_length)[0]
# encode output sequence
out_seq = to_categorical([out_seq], num_classes=vocab_size)[0]
# store the sequences
X1.append(features[key][0])
X2.append(in_seq)
y.append(out_seq)
if n == batch_size:
X1, X2, y = np.array(X1), np.array(X2), np.array(y)
yield [X1, X2], y
X1, X2, y = list(), list(), list()
n = 0
#===================================================== Model creation
# encoder model
# image feature layers
inputs1 = Input(shape=(4096,))
fe1 = Dropout(0.4)(inputs1)
fe2 = Dense(256, activation='relu')(fe1)
# sequence feature layers
inputs2 = Input(shape=(max_length,))
se1 = Embedding(vocab_size, 256, mask_zero=True)(inputs2)
se2 = Dropout(0.4)(se1)
se3 = LSTM(256)(se2)
# decoder model
decoder1 = add([fe2, se3])
decoder2 = Dense(256, activation='relu')(decoder1)
outputs = Dense(vocab_size, activation='softmax')(decoder2)
model = Model(inputs=[inputs1, inputs2], outputs=outputs)
model.compile(loss='categorical_crossentropy', optimizer='adam')
#====================================================== Training the model
epochs = 20
batch_size = 32
steps = len(train) // batch_size
for i in range(epochs):
# create data generator
generator = data_generator(train, mapping, features, tokenizer, max_length, vocab_size, batch_size)
# fit for one epoch
model.fit(generator, epochs=1, steps_per_epoch=steps, verbose=1)
#====================================================== Saving the model
model.save(working_dir+'/final_model.h5')
#====================================================== Predicting Captions
def idx_to_word(integer, tokenizer):
for word, index in tokenizer.word_index.items():
if index == integer:
return word
return None
# generate caption for an image
def predict_caption(model, image, tokenizer, max_length):
# add start tag for generation process
in_text = 'startseq'
# iterate over the max length of sequence
for i in range(max_length):
# encode input sequence
sequence = tokenizer.texts_to_sequences([in_text])[0]
# pad the sequence
sequence = pad_sequences([sequence], max_length)
# predict next word
yhat = model.predict([image, sequence], verbose=0)
# get index with high probability
yhat = np.argmax(yhat)
# convert index to word
word = idx_to_word(yhat, tokenizer)
# stop if word not found
if word is None:
break
# append word as input for generating next word
in_text += " " + word
# stop if we reach end tag
if word == 'endseq':
break
return in_text
from nltk.translate.bleu_score import corpus_bleu
# validate with test data
actual, predicted = list(), list()
for key in tqdm(test):
# get actual caption
captions = mapping[key]
# predict the caption for image
y_pred = predict_caption(model, features[key], tokenizer, max_length)
# split into words
actual_captions = [caption.split() for caption in captions]
y_pred = y_pred.split()
# append to the list
actual.append(actual_captions)
predicted.append(y_pred)
# Calcuate BLEU score
print("BLEU-1: %f" % corpus_bleu(actual, predicted, weights=(1.0, 0, 0, 0)))
print("BLEU-2: %f" % corpus_bleu(actual, predicted, weights=(0.5, 0.5, 0, 0)))
#====================================================== Improving output with help of OpenAI API
def modifiedByOpenAI(string):
openai.api_key="Enter Your API KEY HERE"
prompt="Without changing the narrative, make the sentence grammaratically correct: "+string
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=1,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
output=response["choices"][0]["text"]
return output[2::]
#======================================================Predicting the Captions of the given image in raw form
def predictRawCaption(image_name):
image_id = image_name.split('.')[0]
img_path = os.path.join(base_dir, "Images", image_name)
image = Image.open(img_path)
y_pred = predict_caption(model, features[image_id], tokenizer, max_length)
print(y_pred)
plt.imshow(image)
return y_pred
#======================================================Combining all functions in one
def showCaption(image_name):
rawString=predictedRawCaption(image_name)
#Trimming the raw caption returned by the function PredictedRawCaption
prompt=rawString[9:len(predictedString)-7]
caption=modifiedByOpenAI(prompt)
print("Caption for the given image is: "+caption)