-
Notifications
You must be signed in to change notification settings - Fork 15
/
metricChen.m
155 lines (110 loc) · 3.03 KB
/
metricChen.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
function res=metricChen(im1,im2,fim)
% function res=metricChen(im1,im2,fim)
%
% This function implements Chen's algorithm for fusion metric.
% im1, im2 -- input images;
% fim -- fused image;
% res -- metric value.
%
% IMPORTANT: The size of the images need to be 2X.
% See also: evalu_fusion.m
%
% Z. Liu [July 2009]
%
% Ref: A human perception inspired qualtiy metric for image fusion based on
% regional information, Information Fusion, Vol.8, pp193-207, 2007
% by Hao Chen et al.
%
%% set up the constant values
alpha_c=1;
alpha_s=0.685;
f_c=97.3227;
f_s=12.1653;
% local window size 16 x 16
windowSize=16;
% alpha = 1, 2, 3, 4, 5, 10, 15. This value is adjustable.;-)
alpha=5;
%% pre-processing
im1=double(im1);
im2=double(im2);
fim=double(fim);
im1=normalize1(im1);
im2=normalize1(im2);
fim=normalize1(fim);
%% Step 1: extract edge information
flt1=[-1 0 1 ; -2 0 2 ; -1 0 1];
flt2=[-1 -2 -1; 0 0 0; 1 2 1];
% 1) get the map
fuseX=filter2(flt1,fim,'same');
fuseY=filter2(flt2,fim,'same');
fuseG=sqrt(fuseX.*fuseX+fuseY.*fuseY);
buffer=(fuseX==0);
buffer=buffer*0.00001;
fuseX=fuseX+buffer;
fuseA=atan(fuseY./fuseX);
img1X=filter2(flt1,im1,'same');
img1Y=filter2(flt2,im1,'same');
im1G=sqrt(img1X.*img1X+img1Y.*img1Y);
buffer=(img1X==0);
buffer=buffer*0.00001;
img1X=img1X+buffer;
im1A=atan(img1Y./img1X);
img2X=filter2(flt1,im2,'same');
img2Y=filter2(flt2,im2,'same');
im2G=sqrt(img2X.*img2X+img2Y.*img2Y);
buffer=(img2X==0);
buffer=buffer*0.00001;
img2X=img2X+buffer;
im2A=atan(img2Y./img2X);
%% calculate the local region saliency
% seperate the image into local regions
[hang,lie]=size(im1);
H=floor(hang/windowSize);
L=floor(lie/windowSize);
fun=@(x) sum(sum(x.^alpha));
ramda1=blkproc(im1G, [windowSize windowSize],[0 0],fun);
ramda2=blkproc(im2G, [windowSize windowSize],[0 0],fun);
%% similarity measurement
f1=im1-fim;
f2=im2-fim;
% filtering with CSF filters
% first build the three filters in frequency domain:
[u,v]=freqspace([hang,lie],'meshgrid');
%---------------
% length 1
%u=lie/2*u; v=hang/2*v;
% length 2
%u=lie/4*u; v=hang/4*v;
% length 3
u=lie/8*u; v=hang/8*v;
r=sqrt(u.^2+v.^2);
% Mannos-Skarison's filter
theta_m=2.6*(0.0192+0.144*r).*exp(-(0.144*r).^1.1);
% Daly's filter
% avoid being divided by zero
index=find(r==0);
r(index)=1;
buff=0.008./(r.^3)+1;
%buff(index)=0;
buff=buff.^(-0.2);
buff1=-0.3*r.*sqrt(1+0.06*exp(0.3*r));
theta_d=((buff).^(-0.2)).*(1.42*r.*exp(buff1));
theta_d(index)=0;
clear buff;
clear buff1;
% Ahumada filter
theta_a=alpha_c*exp(-(r/f_c).^2)-alpha_s*exp(-(r/f_s).^2);
% second, filter the image in frequency domain
ff1=fft2(f1);
ff2=fft2(f2);
% here filter 1 is used.
Df1=ifft2(ifftshift(fftshift(ff1).*theta_m));
Df2=ifft2(ifftshift(fftshift(ff2).*theta_m));
fun2=@(x) mean2(x.^2);
D1=blkproc(Df1, [windowSize windowSize],[0 0],fun2);
D2=blkproc(Df2, [windowSize windowSize],[0 0],fun2);
%% global quality
% pay attention to this equation, the author might be WRONG!!!
%Q=sum(sum(ramda1.*D1+ramda2.*D2/(ramda1+ramda2)));
Q=sum(sum(ramda1.*D1+ramda2.*D2))/sum(sum(ramda1+ramda2));
res=Q;