-
Notifications
You must be signed in to change notification settings - Fork 24
/
extended-morphological-profiles.py
316 lines (257 loc) · 10.9 KB
/
extended-morphological-profiles.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as io
# Importing the dataset from Matlab format
dataset = io.loadmat('indianpines_dataset.mat')
number_of_bands = int(dataset['number_of_bands'])
number_of_rows = int(dataset['number_of_rows'])
number_of_columns = int(dataset['number_of_columns'])
pixels = np.transpose(dataset['pixels'])
# Importing the Groundtruth
groundtruth = io.loadmat('indianpines_gt.mat')
gt = np.transpose(groundtruth['pixels'])
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
pixels = sc.fit_transform(pixels)
# visualizing the indian pines dataset hyperspectral cube in RGB
indianpines_colors = np.array([[255, 255, 255],
[255, 254, 137], [3, 28, 241], [255, 89, 1], [5, 255, 133],
[255, 2, 251], [89, 1, 255], [3, 171, 255], [12, 255, 7],
[172, 175, 84], [160, 78, 158], [101, 173, 255], [60, 91, 112],
[104, 192, 63], [139, 69, 46], [119, 255, 172], [254, 255, 3]])
import sklearn.preprocessing
indianpines_colors = sklearn.preprocessing.minmax_scale(indianpines_colors, feature_range=(0, 1))
pixels_normalized = sklearn.preprocessing.minmax_scale(pixels, feature_range=(0, 1))
gt_thematic_map = np.zeros(shape=(number_of_rows, number_of_columns, 3))
rgb_hyperspectral_image = np.zeros(shape=(number_of_rows, number_of_columns, 3))
cont = 0
for i in range(number_of_rows):
for j in range(number_of_columns):
rgb_hyperspectral_image[i, j, 0] = pixels_normalized[cont, 29]
rgb_hyperspectral_image[i, j, 1] = pixels_normalized[cont, 42]
rgb_hyperspectral_image[i, j, 2] = pixels_normalized[cont, 89]
gt_thematic_map[i, j, :] = indianpines_colors[gt[cont, 0]]
cont += 1
indianpines_class_names = ['background',
'alfalfa', 'corn-notill', 'corn-min', 'corn',
'grass/pasture', 'grass/trees', 'grass/pasture-mowed', 'hay-windrowed',
'oats', 'soybeans-notill', 'soybeans-min', 'soybean-clean',
'wheat', 'woods', 'bldg-grass-tree-drives', 'stone-steel towers']
fig = plt.figure(figsize=(15, 15))
columns = 2
rows = 1
fig.add_subplot(rows, columns, 1)
plt.imshow(gt_thematic_map)
fig.add_subplot(rows, columns, 2)
plt.imshow(rgb_hyperspectral_image)
import matplotlib.patches as mpatches
patches = [mpatches.Patch(color=indianpines_colors[i], label=indianpines_class_names[i]) for i in range(len(indianpines_colors))]
plt.legend(handles=patches, loc=4, borderaxespad=0.)
plt.show()
# Preprocessing
# Applying Principal Components Analysis (PCA)
from sklearn.decomposition import PCA
number_of_pc = 4
pca = PCA(n_components=number_of_pc)
pc = pca.fit_transform(pixels)
# Visualizing PCs
print(
f"The accumulated explained variance for the {number_of_pc} principal components is {np.sum(pca.explained_variance_ratio_)}")
print(f"Individual Explained Variance: {pca.explained_variance_ratio_}")
fig = plt.figure(figsize=(15, 15))
columns = number_of_pc
rows = 1
pc_images = np.zeros(shape=(number_of_rows, number_of_columns, number_of_pc))
for i in range(number_of_pc):
pc_images[:, :, i] = np.reshape(pc[:, i], (number_of_rows, number_of_columns))
fig.add_subplot(rows, columns, i+1)
plt.imshow(pc_images[:, :, i], cmap='gray', interpolation='bicubic')
plt.show()
# Building the Extended Morphological Profiles (EMP)
from skimage.morphology import reconstruction
from skimage.morphology import erosion
from skimage.morphology import disk
from skimage import util
def opening_by_reconstruction(image, se):
"""
Performs an Opening by Reconstruction.
Parameters:
image: 2D matrix.
se: structuring element
Returns:
2D matrix of the reconstructed image.
"""
eroded = erosion(image, se)
reconstructed = reconstruction(eroded, image)
return reconstructed
def closing_by_reconstruction(image, se):
"""
Performs a Closing by Reconstruction.
Parameters:
image: 2D matrix.
se: structuring element
Returns:
2D matrix of the reconstructed image.
"""
obr = opening_by_reconstruction(image, se)
obr_inverted = util.invert(obr)
obr_inverted_eroded = erosion(obr_inverted, se)
obr_inverted_eroded_rec = reconstruction(
obr_inverted_eroded, obr_inverted)
obr_inverted_eroded_rec_inverted = util.invert(obr_inverted_eroded_rec)
return obr_inverted_eroded_rec_inverted
def build_morphological_profiles(image, se_size=4, se_size_increment=2, num_openings_closings=4):
"""
Build the morphological profiles for a given image.
Parameters:
base_image: 2d matrix, it is the spectral information part of the MP.
se_size: int, initial size of the structuring element (or kernel). Structuring Element used: disk
se_size_increment: int, structuring element increment step
num_openings_closings: int, number of openings and closings by reconstruction to perform.
Returns:
emp: 3d matrix with both spectral (from the base_image) and spatial information
"""
x, y = image.shape
cbr = np.zeros(shape=(x, y, num_openings_closings))
obr = np.zeros(shape=(x, y, num_openings_closings))
it = 0
tam = se_size
while it < num_openings_closings:
se = disk(tam)
temp = closing_by_reconstruction(image, se)
cbr[:, :, it] = temp[:, :]
temp = opening_by_reconstruction(image, se)
obr[:, :, it] = temp[:, :]
tam += se_size_increment
it += 1
mp = np.zeros(shape=(x, y, (num_openings_closings*2)+1))
cont = num_openings_closings - 1
for i in range(num_openings_closings):
mp[:, :, i] = cbr[:, :, cont]
cont = cont - 1
mp[:, :, num_openings_closings] = image[:, :]
cont = 0
for i in range(num_openings_closings+1, num_openings_closings*2+1):
mp[:, :, i] = obr[:, :, cont]
cont += 1
return mp
def build_emp(base_image, se_size=4, se_size_increment=2, num_openings_closings=4):
"""
Build the extended morphological profiles for a given set of images.
Parameters:
base_image: 3d matrix, each 'channel' is considered for applying the morphological profile. It is the spectral information part of the EMP.
se_size: int, initial size of the structuring element (or kernel). Structuring Element used: disk
se_size_increment: int, structuring element increment step
num_openings_closings: int, number of openings and closings by reconstruction to perform.
Returns:
emp: 3d matrix with both spectral (from the base_image) and spatial information
"""
base_image_rows, base_image_columns, base_image_channels = base_image.shape
se_size = se_size
se_size_increment = se_size_increment
num_openings_closings = num_openings_closings
morphological_profile_size = (num_openings_closings * 2) + 1
emp_size = morphological_profile_size * base_image_channels
emp = np.zeros(
shape=(base_image_rows, base_image_columns, emp_size))
cont = 0
for i in range(base_image_channels):
# build MPs
mp_temp = build_morphological_profiles(
base_image[:, :, i], se_size, se_size_increment, num_openings_closings)
aux = morphological_profile_size * (i+1)
# build the EMP
cont_aux = 0
for k in range(cont, aux):
emp[:, :, k] = mp_temp[:, :, cont_aux]
cont_aux += 1
cont = morphological_profile_size * (i+1)
return emp
pc_images.shape
num_openings_closings = 4
morphological_profile_size = (num_openings_closings * 2) + 1
emp_image = build_emp(base_image=pc_images, num_openings_closings=num_openings_closings)
# Visualizing the EMP
fig = plt.figure(figsize=(15, 15))
columns = morphological_profile_size
rows = number_of_pc
print("Number of Base Images: "+str(rows))
print("Morphological Profiles size: "+str(columns))
print("EMP = "+str(emp_image.shape))
emp_size = morphological_profile_size * number_of_pc
for i in range(1, emp_size+1):
fig.add_subplot(rows, columns, i)
plt.imshow(emp_image[:, :, i-1], cmap='gray', interpolation='bicubic')
plt.show()
# building dataset for classification
dim_x, dim_y, dim_z = emp_image.shape
dim = dim_x * dim_y
x = np.zeros(shape=(dim, dim_z))
y = gt
cont = 0
for i in range(dim_x):
for j in range(dim_y):
x[cont, :] = emp_image[i, j, :]
cont += 1
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.75, random_state=0)
# Fitting Kernel SVM to the Training set
from sklearn.svm import SVC
classifier = SVC(kernel='rbf', random_state=0)
classifier.fit(x_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(x_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualizing the results
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cm, classes=indianpines_class_names, normalize=True, title='Normalized confusion matrix')
plt.show()
# Visualizing the thematic map
predicted_thematic_map = np.zeros(shape=(number_of_rows, number_of_columns, 3))
predicted_dataset = classifier.predict(x)
cont = 0
for i in range(number_of_rows):
for j in range(number_of_columns):
gt_thematic_map[i, j, :] = indianpines_colors[gt[cont, 0]]
predicted_thematic_map[i, j, :] = indianpines_colors[predicted_dataset[cont]]
cont += 1
fig = plt.figure(figsize=(15, 15))
columns = 2
rows = 1
fig.add_subplot(rows, columns, 1)
plt.imshow(gt_thematic_map)
fig.add_subplot(rows, columns, 2)
plt.imshow(predicted_thematic_map)
plt.show()