-
Notifications
You must be signed in to change notification settings - Fork 1
/
pso.py
314 lines (250 loc) · 11.3 KB
/
pso.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
"""
Multivariate Regression and Classification Using an Adaptive Neuro-Fuzzy
Inference System (Takagi-Sugeno) and Particle Swarm Optimization.
Copyright (c) 2020 Gabriele Gilardi
"""
import numpy as np
def PSO(func, LB, UB, nPop=40, epochs=500, K=0, phi=2.05, vel_fact=0.5,
conf_type='RB', IntVar=None, normalize=False, rad=0.1, args=None):
"""
func Function to minimize
LB Lower boundaries of the search space
UB Upper boundaries of the search space
nPop Number of agents (population)
epochs Number of iterations
K Average size of each agent's group of informants
phi Coefficient to calculate the two confidence coefficients
vel_fact Velocity factor to calculate the maximum and the minimum
allowed velocities
conf_type Confinement type (on the velocities)
IntVar List of indexes specifying which variable should be treated
as integers
normalize Specifies if the search space should be normalized (to
improve convergency)
rad Normalized radius of the hypersphere centered on the best
particle.
args Tuple containing any parameter that needs to be passed to
the function
Dimensions:
(nVar, ) LB, UB, LB_orig, UB_orig, vel_max, vel_min, swarm_best_pos
(nPop, nVar) agent_pos, agent_vel, agent_best_pos, Gr, group_best_pos,
agent_pos_orig, agent_pos_tmp, vel_conf, out, x_sphere, u
(nPop, nPop) informants, informants_cost
(nPop) agent_best_cost, agent_cost, p_equal_g, better, r_max, r,
norm
(0-nVar, ) IntVar
"""
# Dimension of the search space and max. allowed velocities
nVar = len(LB)
vel_max = vel_fact * (UB - LB)
vel_min = - vel_max
# Confidence coefficients
w = 1.0 / (phi - 1.0 + np.sqrt(phi**2 - 2.0 * phi))
cmax = w * phi
# Probability an agent is an informant
p_informant = 1.0 - (1.0 - 1.0 / float(nPop)) ** K
# Normalize search space if requested
if (normalize):
LB_orig = LB.copy()
UB_orig = UB.copy()
LB = np.zeros(nVar)
UB = np.ones(nVar)
# Define (if any) which variables are treated as integers (indexes are in
# the range 1 to nVar)
if (IntVar is None):
nIntVar = 0
elif (IntVar == 'all'):
IntVar = np.arange(nVar, dtype=int)
nIntVar = nVar
else:
IntVar = np.asarray(IntVar, dtype=int) - 1
nIntVar = len(IntVar)
# Initial position of each agent
agent_pos = LB + np.random.rand(nPop, nVar) * (UB - LB)
if (nIntVar > 0):
agent_pos[:, IntVar] = np.round(agent_pos[:, IntVar])
# Initial velocity of each agent (with velocity limits)
agent_vel = (LB - agent_pos) + np.random.rand(nPop, nVar) * (UB - LB)
agent_vel = np.fmin(np.fmax(agent_vel, vel_min), vel_max)
# Initial cost of each agent
if (normalize):
agent_pos_orig = LB_orig + agent_pos * (UB_orig - LB_orig)
agent_cost = func(agent_pos_orig, args)
else:
agent_cost = func(agent_pos, args)
# Initial best position/cost of each agent
agent_best_pos = agent_pos.copy()
agent_best_cost = agent_cost.copy()
# Initial best position/cost of the swarm
idx = np.argmin(agent_best_cost)
swarm_best_pos = agent_best_pos[idx, :]
swarm_best_cost = agent_best_cost[idx]
swarm_best_idx = idx
# Initial best position of each agent using the swarm
if (K == 0):
group_best_pos = np.tile(swarm_best_pos, (nPop, 1))
p_equal_g = \
(np.where(np.arange(nPop) == idx, 0.75, 1.0)).reshape(nPop, 1)
# Initial best position of each agent using informants
else:
informants = np.where(np.random.rand(nPop, nPop) < p_informant, 1, 0)
np.fill_diagonal(informants, 1)
group_best_pos, p_equal_g = group_best(informants, agent_best_pos,
agent_best_cost)
# Main loop
for epoch in range(epochs):
# Determine the updated velocity for each agent
Gr = agent_pos + (1.0 / 3.0) * cmax * \
(agent_best_pos + group_best_pos - 2.0 * agent_pos) * p_equal_g
x_sphere = hypersphere_point(Gr, agent_pos)
agent_vel = w * agent_vel + Gr + x_sphere - agent_pos
# Impose velocity limits
agent_vel = np.fmin(np.fmax(agent_vel, vel_min), vel_max)
# Temporarly update the position of each agent to check if it is
# outside the search space
agent_pos_tmp = agent_pos + agent_vel
if (nIntVar > 0):
agent_pos_tmp[:, IntVar] = np.round(agent_pos_tmp[:, IntVar])
out = np.logical_not((agent_pos_tmp > LB) * (agent_pos_tmp < UB))
# Apply velocity confinement rules
if (conf_type == 'RB'):
vel_conf = random_back_conf(agent_vel)
elif (conf_type == 'HY'):
vel_conf = hyperbolic_conf(agent_pos, agent_vel, UB, LB)
elif (conf_type == 'MX'):
vel_conf = mixed_conf(agent_pos, agent_vel, UB, LB)
# Update velocity and position of each agent (all <vel_conf> velocities
# are smaller than the max. allowed velocity)
agent_vel = np.where(out, vel_conf, agent_vel)
agent_pos += agent_vel
if (nIntVar > 0):
agent_pos[:, IntVar] = np.round(agent_pos[:, IntVar])
# Apply position confinement rules to agents outside the search space
agent_pos = np.fmin(np.fmax(agent_pos, LB), UB)
if (nIntVar > 0):
agent_pos[:, IntVar] = np.fmax(agent_pos[:, IntVar],
np.ceil(LB[IntVar]))
agent_pos[:, IntVar] = np.fmin(agent_pos[:, IntVar],
np.floor(UB[IntVar]))
# Calculate new cost of each agent
if (normalize):
agent_pos_orig = LB_orig + agent_pos * (UB_orig - LB_orig)
agent_cost = func(agent_pos_orig, args)
else:
agent_cost = func(agent_pos, args)
# Update best position/cost of each agent
better = (agent_cost < agent_best_cost)
agent_best_pos[better, :] = agent_pos[better, :]
agent_best_cost[better] = agent_cost[better]
# Update best position/cost of the swarm
idx = np.argmin(agent_best_cost)
if (agent_best_cost[idx] < swarm_best_cost):
swarm_best_pos = agent_best_pos[idx, :]
swarm_best_cost = agent_best_cost[idx]
swarm_best_idx = idx
# If the best cost of the swarm did not improve ....
else:
# .... when using swarm -> do nothing
if (K == 0):
pass
# .... when using informants -> change informant groups
else:
informants = \
np.where(np.random.rand(nPop, nPop) < p_informant, 1, 0)
np.fill_diagonal(informants, 1)
# Update best position of each agent using the swarm
if (K == 0):
group_best_pos = np.tile(swarm_best_pos, (nPop, 1))
# Update best position of each agent using informants
else:
group_best_pos, p_equal_g, = group_best(informants, agent_best_pos,
agent_best_cost)
# If necessary de-normalize and determine the (normalized) distance between
# the best particle and all the others
if (normalize):
delta = agent_best_pos - swarm_best_pos # (UB-LB = 1)
swarm_best_pos = LB_orig + swarm_best_pos * (UB_orig - LB_orig)
else:
deltaB = np.fmax(UB-LB, 1.e-10) # To avoid /0 when LB = UB
delta = (agent_best_pos - swarm_best_pos) / deltaB
# Number of particles in the hypersphere of radius <rad> around the best
# particle
dist = np.linalg.norm(delta/np.sqrt(nPop), axis=1)
in_rad = (dist < rad).sum()
# Return info about the solution
info = (swarm_best_cost, swarm_best_idx, in_rad)
return swarm_best_pos, info
def group_best(informants, agent_best_pos, agent_best_cost):
"""
Determine the group best position of each agent based on the agent
informants.
"""
nPop, nVar = agent_best_pos.shape
# Determine the cost of each agent in each group (set to infinity the value
# for agents that are not informants of the group)
informants_cost = np.where(informants == 1, agent_best_cost, np.inf)
# For each agent determine the agent with the best cost in the group and
# assign its position to it
idx = np.argmin(informants_cost, axis=1)
group_best_pos = agent_best_pos[idx, :]
# Build the vector to correct the velocity update for the corner case where
# the agent is also the group best
p_equal_g = (np.where(np.arange(nPop) == idx, 0.75, 1.0)).reshape(nPop, 1)
return group_best_pos, p_equal_g
def hypersphere_point(Gr, agent_pos):
"""
For each agent determine a random point inside the hypersphere (Gr,|Gr-X|),
where Gr is its center, |Gr-X| is its radius, and X is the agent position.
"""
nPop, nVar = agent_pos.shape
# Hypersphere radius of each agent
r_max = np.linalg.norm(Gr - agent_pos, axis=1)
# Randomly pick a direction using a normal distribution and a radius
# (inside the hypersphere)
u = np.random.normal(0.0, 1.0, (nPop, nVar))
norm = np.linalg.norm(u, axis=1)
r = np.random.uniform(0.0, r_max, nPop)
# Coordinates of the point with direction <u> and at distance <r> from the
# hypersphere center
x_sphere = u * (r / norm).reshape(nPop, 1)
return x_sphere
def hyperbolic_conf(agent_pos, agent_vel, UB, LB):
"""
Apply hyperbolic confinement to agent velocities (calculation is done on
all agents to avoid loops but the change will be applied only to the agents
actually outside the search space).
"""
# If the update velocity is > 0
if_pos_vel = agent_vel / (1.0 + np.abs(agent_vel / (UB - agent_pos)))
# If the update velocity is <= 0
if_neg_vel = agent_vel / (1.0 + np.abs(agent_vel / (agent_pos - LB)))
# Confinement velocity
vel_conf = np.where(agent_vel > 0, if_pos_vel, if_neg_vel)
return vel_conf
def random_back_conf(agent_vel):
"""
Apply random-back confinement to agent velocities (calculation is done on
all agents to avoid loops but the change will be applied only to the agents
actually outside the search space).
"""
nPop, nVar = agent_vel.shape
# Confinement velocity
vel_conf = - np.random.rand(nPop, nVar) * agent_vel
return vel_conf
def mixed_conf(agent_pos, agent_vel, UB, LB):
"""
Apply a mixed-type confinement to agent velocities (calculation is done on
all agents to avoid loops but the change will be applied only to the agents
actually outside the search space).
For each agent the confinement type (hyperbolic or random-back) is choosen
randomly.
"""
nPop, nVar = agent_pos.shape
# Hyperbolic confinement
vel_conf_HY = hyperbolic_conf(agent_pos, agent_vel, UB, LB)
# random-back confinement
vel_conf_RB = random_back_conf(agent_vel)
# Confinement velocity
gamma = np.random.rand(nPop, nVar)
vel_conf = np.where(gamma >= 0.5, vel_conf_HY, vel_conf_RB)
return vel_conf