-
Notifications
You must be signed in to change notification settings - Fork 3
/
optimization.py
223 lines (178 loc) · 6.66 KB
/
optimization.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
import random
import math
def randomoptimize(domain, costf):
best = 999999999
bestr = None
for i in range(100000):
#Create a random solution
r = [random.randint(domain[j][0], domain[j][1])
for j in range(len(domain))]
#Get the cost
cost = costf(r)
#Compare it to the best one so far
if cost < best:
best = cost
bestr = r
return r
def hillclimb(domain, costf):
#Create a random solution
sol = [random.randint(domain[j][0], domain[j][1])
for j in range(len(domain))]
count = 0
#Main loop
while 1:
count += 1
print "----------"
print "Try #%s" % count
print "----------"
print "%s, cost = %s" % (sol, costf(sol))
print "----------"
#Create list of neighboring solutions.
#A "neighboring solution" is a list that is the same as
#a list except for one of it's items is either greater or
#less by 1
neighbors = []
for j in range(len(domain)):
#One away in each direction
if sol[j] > domain[j][0]:
neighbors.append(sol[0:j] + [sol[j] - 1] + sol[j+1:])
if sol[j] < domain[j][1]:
neighbors.append(sol[0:j] + [sol[j] + 1] + sol[j+1:])
print "%s len neighbors = %s" % (j + 1, len(neighbors))
######################################
#The purpose of this block is to try and find a better cost
#from among the neighbors of the current best solution
current = costf(sol)
best = current
print "Current best cost = %s" % current
for j in range(len(neighbors)):
cost = costf(neighbors[j])
if cost < best:
best = cost
oldsol = sol
sol = neighbors[j]
print "We found a better configuration at %s of %s:" % (j + 1, len(neighbors))
print "new: %s, cost = %s" % (sol, best)
print "old: %s" % oldsol
#If there's no improvement, then we've reached the bottom of the "hill"
if best == current:
print "There was no improvement"
break
######################################
return sol
#The annealing method seems to be a way of determining the riskiness
#of choosing a worse solution in the hopes that it'll yield a better
#solution ultimately. You start out very open to risk - considering
#that you've begun in an arbitrary place anyway so what's to
#loose, right? - but as you go on you become more risk-averse. Time
#running out/temperature decreasing might have something to do
#with it too ...
#This config produces consistently low costs
#def annealingoptimize(domain, costf, T=1000000.0, cool=0.99, step=3):
#Discovery: bigger steps result in more consistently lower costs
def annealingoptimize(domain, costf, T=10000.0, cool=0.95, step=8):
#Initialize the values randomly
vec = [random.randint(domain[i][0], domain[i][1])
for i in range(len(domain))]
while T > 0.1:
#Choose one of the indices
i = random.randint(0, len(domain) - 1)
#Choose a direction to change it
dir = random.randint(-step,step)
#Create a new list with one of the values changed
vecb = vec[:]
vecb[i] += dir
if vecb[i] < domain[i][0]: vecb[i] = domain[i][0]
elif vecb[i] > domain[i][1]: vecb[i] = domain[i][1]
#Calculate the current cost and the new cost
ea = costf(vec)
eb = costf(vecb)
print "Temperature: %s" % T
print "Vector A cost = %s, Vector B cost = %s" % (ea, eb)
#Calculate the probability cutoff
p = pow(math.e, (-eb-ea) / T)
print "Probability cutoff = pow(math.e, (-eb-ea) / T)"
print "= pow(%s, (%s) / %s) = %s" % (math.e, (-eb-ea), T, p)
print "Old vector: %s, cost %s" % (vec, ea)
print "New vector: %s, cost %s" % (vecb, eb)
#Is it better, or does it make the probability cutoff?
if eb < ea:
print "New vector is BETTER"
print "--------------------"
vec = vecb
else:
rand = random.random()
#p is simply the probability something will be as expected,
#and to actually decide to risk trying it you need to flip a
#coin a few times. That's what the randomness is for here.
#It's for ACTING on something (acting on the probability?).
#Also, Here "Acceptable" and "Not acceptable" really means "I'm willing to
#risk it and "I'm not willing to risk it"
if rand < p:
print "New vector is WORSE, but ................... ACCEPTABLE"
vec = vecb
print "Here's why: Random number %s is < probability cutoff %s" % (rand, p)
else:
print "New vector is WORSE, and NOT acceptable"
print "Here's why: Random number %s is > probability cutoff %s" % (rand, p)
print "--------------------"
#Decrease the temperature
T = T * cool
return vec
def geneticoptimize(domain, costf, popsize=50, step=1,
mutprob=0.2, elite=0.2, maxiter=100):
#Mutation Operation
def mutate(vec):
#this is something like 0 or 3 or 8
i = random.randint(0,len(domain)-1)
rand = random.random()
if rand < 0.5 and vec[i] > domain[i][0]:
return vec[0:i] + [vec[i] - step] + vec[i+1:]
elif vec[i] < domain[i][1]:
return vec[0:i] + [vec[i] + step] + vec[i+1:]
else:
return vec[0:i] + [vec[i] - step] + vec[i+1:]
# Crossover Operation
def crossover(r1,r2):
i=random.randint(1,len(domain)-2)
return r1[0:i]+r2[i:]
# Build the initial population randomly
pop=[]
for i in range(popsize):
vec=[random.randint(domain[i][0],domain[i][1])
for i in range(len(domain))]
pop.append(vec)
# How many winners from each generation?
topelite=int(elite*popsize)
#Main loop - this is where new populations (or "generations" in the
#genetic metaphor) are derrived from the fittest of previous ones
for i in range(maxiter):
#Rank the current population
scores=[(costf(v),v) for v in pop]
scores.sort()
ranked=[v for (s,v) in scores]
#########
#Create a new population (or generation in the metaphor)
#consisting at first of only the "fittest" elite
pop=ranked[0:topelite]
###########################
#Then we add mutated and bred forms of those "fittest" elite
#to the new population (or generation)
mutations=0
breedings=0
while len(pop)<popsize:
if random.random()<mutprob:
# Mutation
mutations += 1
c=random.randint(0,topelite)
pop.append(mutate(ranked[c]))
else:
# Crossover
breedings += 1
c1=random.randint(0,topelite)
c2=random.randint(0,topelite)
pop.append(crossover(ranked[c1],ranked[c2]))
##########################
#Print current best score
print "The fittest: %s %s - mutations and breedings in this gen: m %s b %s" % (scores[0][0], scores[0][1], mutations, breedings)
return scores[0][1]