Skip to content

Commit

Permalink
refactor algorithms to not deal with cumulative fit
Browse files Browse the repository at this point in the history
  • Loading branch information
schoonhovenrichard committed Aug 9, 2021
1 parent fd64c6e commit ba4c951
Show file tree
Hide file tree
Showing 12 changed files with 24 additions and 278 deletions.
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@ git clone https://github.com/schoonhovenrichard/BlooPy.git
- bitarray
- pyswarms
- networkx
- pytest
- hypothesis

## Implemented algorithms
### Discrete local search algorithms
Expand Down
2 changes: 0 additions & 2 deletions bloopy/algorithms/discrete_diffevo.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,6 @@ def create_offspring2(self, parents):
else:
bprime.fitness = self.ffunc(bprime.bitstring)
self.visited_cache[bsstr] = bprime.fitness
self.cumulative_fit += bprime.fitness
self.func_evals += 1

# If better, replace candidate
Expand Down Expand Up @@ -227,7 +226,6 @@ def create_offspring(self, parents):
else:
bprime.fitness = self.ffunc(bprime.bitstring)
self.visited_cache[bsstr] = bprime.fitness
self.cumulative_fit += bprime.fitness
self.func_evals += 1

# If better, replace candidate
Expand Down
7 changes: 1 addition & 6 deletions bloopy/algorithms/genetic_algorithm.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ class and overriding the 'point_mutate' method.
self.boundary_list = utils.generate_boundary_list(searchspace)
self.visited_cache = dict()
self.func_evals = 0
self.cumulative_fit = 0
if mutation is not None:
self.mutation = max(1, int(self.bs_size * mutation))
else:
Expand All @@ -90,7 +89,6 @@ def set_fit_pop(self, pop):
else:
pop[i].fitness = self.ffunc(pop[i].bitstring)
self.visited_cache[bsstr] = pop[i].fitness
self.cumulative_fit += pop[i].fitness
self.func_evals += 1

def generate_random_pop(self):
Expand All @@ -105,7 +103,6 @@ def generate_random_pop(self):
new_specimen.fitness = self.visited_cache[bsstr]
else:
new_specimen.fitness = self.ffunc(new_specimen.bitstring)
self.cumulative_fit += new_specimen.fitness
self.func_evals += 1
self.visited_cache[bsstr] = new_specimen.fitness
self.current_pop.append(new_specimen)
Expand Down Expand Up @@ -221,8 +218,6 @@ def solve(self,
best_gen_so_far (int): Generation when best was found.
variance (float): Termination variance of population.
self.func_evals (int): Total number of Fevals performed.
self.cumulative_fit (float): Cumulative fitness of all
fitness function evaluations.
"""
generation = 0
best_fit = self.current_best().fitness
Expand Down Expand Up @@ -277,4 +272,4 @@ def solve(self,

if verbose:
print("Terminated after {0} generations with best fitness: {1:.3f} | # of fitness evals: {2}".format(generation, best_fit, self.func_evals))
return (best_fit, self.current_best(), best_gen_so_far, variance, self.func_evals, self.cumulative_fit)
return (best_fit, self.current_best(), best_gen_so_far, variance, self.func_evals)
4 changes: 1 addition & 3 deletions bloopy/algorithms/genetic_local_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,12 +68,10 @@ def one_generation(self):
else:
children[i].fitness = self.ffunc(children[i].bitstring)
self.visited_cache[bsstr] = children[i].fitness
self.cumulative_fit += children[i].fitness
self.func_evals += 1

children[i], feval, tot_fit, self.visited_cache = self.hillclimber(children[i], self.ffunc, self.minmax, self.func_evals, self.maxfeval, self.visited_cache, self.nbour_method)
children[i], feval, self.visited_cache = self.hillclimber(children[i], self.ffunc, self.minmax, self.func_evals, self.maxfeval, self.visited_cache, self.nbour_method)
self.func_evals += feval
self.cumulative_fit += tot_fit

# Selection step
self.current_pop = self.selector(parents, children, self.minmax)
20 changes: 4 additions & 16 deletions bloopy/algorithms/hillclimbers.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ def RandomGreedyHillclimb(candidate, ffunc, minmax, totfevals, maxfeval, visited
splits = candidate.boundary_list
foundimprove = True
func_evals = 0
total_fit = 0
bs_size = candidate.size
while foundimprove:
child = individual(candidate.size, bitstring=copy.deepcopy(candidate.bitstring), boundary_list=splits)
Expand All @@ -34,7 +33,6 @@ def RandomGreedyHillclimb(candidate, ffunc, minmax, totfevals, maxfeval, visited
child.fitness = ffunc(child.bitstring)
visited_cache[bsstr] = child.fitness
func_evals += 1
total_fit += child.fitness
if minmax * child.fitness > minmax * candidate.fitness:
candidate = copy.deepcopy(child)
foundimprove = True
Expand Down Expand Up @@ -71,7 +69,6 @@ def RandomGreedyHillclimb(candidate, ffunc, minmax, totfevals, maxfeval, visited
child.fitness = ffunc(child.bitstring)
visited_cache[bsstr] = child.fitness
func_evals += 1
total_fit += child.fitness
if minmax * child.fitness > minmax * candidate.fitness:
candidate = copy.deepcopy(child)
indices[k] = i
Expand All @@ -84,15 +81,14 @@ def RandomGreedyHillclimb(candidate, ffunc, minmax, totfevals, maxfeval, visited
child.fitness = candidate.fitness
if foundimprove and restart:
break
return candidate, func_evals, total_fit, visited_cache
return candidate, func_evals, visited_cache

def OrderedGreedyHillclimb(candidate, ffunc, minmax, totfevals, maxfeval, visited_cache, nbour_method, order, restart=True):
if nbour_method not in ["Hamming", "adjacent"]:
raise Exception("Unknown neighbour method.")
splits = candidate.boundary_list
foundimprove = True
func_evals = 0
total_fit = 0
bs_size = candidate.size
while foundimprove:
child = individual(candidate.size, bitstring=copy.deepcopy(candidate.bitstring), boundary_list=splits)
Expand All @@ -115,7 +111,6 @@ def OrderedGreedyHillclimb(candidate, ffunc, minmax, totfevals, maxfeval, visite
child.fitness = ffunc(child.bitstring)
visited_cache[bsstr] = child.fitness
func_evals += 1
total_fit += child.fitness
if minmax * child.fitness > minmax * candidate.fitness:
candidate = copy.deepcopy(child)
foundimprove = True
Expand Down Expand Up @@ -154,7 +149,6 @@ def OrderedGreedyHillclimb(candidate, ffunc, minmax, totfevals, maxfeval, visite
child.fitness = ffunc(child.bitstring)
visited_cache[bsstr] = child.fitness
func_evals += 1
total_fit += child.fitness
if minmax * child.fitness > minmax * candidate.fitness:
candidate = copy.deepcopy(child)
indices[k] = i
Expand All @@ -167,15 +161,14 @@ def OrderedGreedyHillclimb(candidate, ffunc, minmax, totfevals, maxfeval, visite
child.fitness = candidate.fitness
if foundimprove and restart:
break
return candidate, func_evals, total_fit, visited_cache
return candidate, func_evals, visited_cache

def BestHillclimb(candidate, ffunc, minmax, totfevals, maxfeval, visited_cache, nbour_method):
if nbour_method not in ["Hamming", "adjacent"]:
raise Exception("Unknown neighbour method.")
splits = candidate.boundary_list
foundimprove = True
func_evals = 0
total_fit = 0
bs_size = candidate.size
while foundimprove:
child = individual(candidate.size, bitstring=copy.deepcopy(candidate.bitstring), boundary_list=splits)
Expand All @@ -196,7 +189,6 @@ def BestHillclimb(candidate, ffunc, minmax, totfevals, maxfeval, visited_cache,
child.fitness = ffunc(child.bitstring)
visited_cache[bsstr] = child.fitness
func_evals += 1
total_fit += child.fitness

#If a neighbour is better, than the best found neighbours, save it
if minmax * child.fitness > minmax * best_child.fitness:
Expand Down Expand Up @@ -236,7 +228,6 @@ def BestHillclimb(candidate, ffunc, minmax, totfevals, maxfeval, visited_cache,
child.fitness = ffunc(child.bitstring)
visited_cache[bsstr] = child.fitness
func_evals += 1
total_fit += child.fitness

#If a neighbour is better, than the best found neighbours, save it
if minmax * child.fitness > minmax * best_child.fitness:
Expand All @@ -250,15 +241,14 @@ def BestHillclimb(candidate, ffunc, minmax, totfevals, maxfeval, visited_cache,
if minmax * best_child.fitness > minmax * candidate.fitness:
foundimprove = True
candidate = copy.deepcopy(best_child)
return candidate, func_evals, total_fit, visited_cache
return candidate, func_evals, visited_cache

def StochasticHillclimb(candidate, ffunc, minmax, totfevals, maxfeval, visited_cache, nbour_method):
if nbour_method not in ["Hamming", "adjacent"]:
raise Exception("Unknown neighbour method.")
splits = candidate.boundary_list
foundimprove = True
func_evals = 0
total_fit = 0
bs_size = candidate.size
while foundimprove:
if maxfeval is not None and totfevals >= maxfeval:
Expand All @@ -280,7 +270,6 @@ def StochasticHillclimb(candidate, ffunc, minmax, totfevals, maxfeval, visited_c
child.fitness = ffunc(child.bitstring)
visited_cache[bsstr] = child.fitness
func_evals += 1
total_fit += child.fitness

if minmax * child.fitness > minmax * candidate.fitness:
improved_candidate = copy.deepcopy(child)
Expand Down Expand Up @@ -314,7 +303,6 @@ def StochasticHillclimb(candidate, ffunc, minmax, totfevals, maxfeval, visited_c
child.fitness = ffunc(child.bitstring)
visited_cache[bsstr] = child.fitness
func_evals += 1
total_fit += child.fitness
if minmax * child.fitness > minmax * candidate.fitness:
improved_candidate = copy.deepcopy(child)
uphill_moves.append((improved_candidate, abs(child.fitness-candidate.fitness)))
Expand All @@ -328,4 +316,4 @@ def StochasticHillclimb(candidate, ffunc, minmax, totfevals, maxfeval, visited_c
# Choose a random uphill move proportionate to fitness increase
index = np.random.choice(range(len(uphill_moves)),size=(1,),p=(np.array(probs)/float(sum(probs))).tolist())[0]
candidate = copy.deepcopy(uphill_moves[index][0])
return candidate, func_evals, total_fit, visited_cache
return candidate, func_evals, visited_cache
14 changes: 4 additions & 10 deletions bloopy/algorithms/iterative_local_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,6 @@ def randomwalk(self, maxfeval):
self.current_candidate.fitness = self.ffunc(self.current_candidate.bitstring)
self.visited_cache[bsstr] = self.current_candidate.fitness
self.func_evals += 1
self.cumulative_fit += self.current_candidate.fitness
else:
self.current_candidate = individual(self.best_candidate.size, bitstring=copy.deepcopy(self.best_candidate.bitstring), boundary_list=self.boundary_list)
self.current_candidate.fitness = self.best_candidate.fitness
Expand All @@ -73,7 +72,6 @@ def randomwalk(self, maxfeval):
self.current_candidate.fitness = self.ffunc(self.current_candidate.bitstring)
self.visited_cache[bsstr] = self.current_candidate.fitness
self.func_evals += 1
self.cumulative_fit += self.current_candidate.fitness

class BestILS(iterative_local_search_base):
def __init__(self, fitness_function, bitstring_size, minmax_problem, random_walk, noimprove=100, searchspace=None, neighbour='Hamming'):
Expand All @@ -97,9 +95,8 @@ def __init__(self, fitness_function, bitstring_size, minmax_problem, random_walk
super().__init__(fitness_function, bitstring_size, minmax_problem, random_walk, noimprove, searchspace=searchspace, neighbour=neighbour)

def hillclimb_candidate(self, maxfeval):
self.current_candidate, extra_fevals, totfit, self.visited_cache = hillclimb.BestHillclimb(self.current_candidate, self.ffunc, self.minmax, self.func_evals, maxfeval, self.visited_cache, self.nbour_method)
self.current_candidate, extra_fevals, self.visited_cache = hillclimb.BestHillclimb(self.current_candidate, self.ffunc, self.minmax, self.func_evals, maxfeval, self.visited_cache, self.nbour_method)
self.func_evals += extra_fevals
self.cumulative_fit += totfit

class RandomGreedyILS(iterative_local_search_base):
def __init__(self, fitness_function, bitstring_size, minmax_problem, random_walk, noimprove=100, searchspace=None, neighbour='Hamming', restart_search=True):
Expand Down Expand Up @@ -127,9 +124,8 @@ def __init__(self, fitness_function, bitstring_size, minmax_problem, random_walk
self.restart = restart_search

def hillclimb_candidate(self, maxfeval):
self.current_candidate, extra_fevals, totfit, self.visited_cache = hillclimb.RandomGreedyHillclimb(self.current_candidate, self.ffunc, self.minmax, self.func_evals, maxfeval, self.visited_cache, self.nbour_method, restart=self.restart)
self.current_candidate, extra_fevals, self.visited_cache = hillclimb.RandomGreedyHillclimb(self.current_candidate, self.ffunc, self.minmax, self.func_evals, maxfeval, self.visited_cache, self.nbour_method, restart=self.restart)
self.func_evals += extra_fevals
self.cumulative_fit += totfit

class OrderedGreedyILS(iterative_local_search_base):
def __init__(self, fitness_function, bitstring_size, minmax_problem, random_walk, noimprove=100, searchspace=None, neighbour='Hamming', restart_search=True, order=None):
Expand Down Expand Up @@ -160,9 +156,8 @@ def __init__(self, fitness_function, bitstring_size, minmax_problem, random_walk
self.order = order

def hillclimb_candidate(self, maxfeval):
self.current_candidate, extra_fevals, totfit, self.visited_cache = hillclimb.OrderedGreedyHillclimb(self.current_candidate, self.ffunc, self.minmax, self.func_evals, maxfeval, self.visited_cache, self.nbour_method, self.order, restart=self.restart)
self.current_candidate, extra_fevals, self.visited_cache = hillclimb.OrderedGreedyHillclimb(self.current_candidate, self.ffunc, self.minmax, self.func_evals, maxfeval, self.visited_cache, self.nbour_method, self.order, restart=self.restart)
self.func_evals += extra_fevals
self.cumulative_fit += totfit

class StochasticILS(iterative_local_search_base):
def __init__(self, fitness_function, bitstring_size, minmax_problem, random_walk, noimprove=100, searchspace=None, neighbour='Hamming'):
Expand All @@ -188,6 +183,5 @@ def __init__(self, fitness_function, bitstring_size, minmax_problem, random_walk
super().__init__(fitness_function, bitstring_size, minmax_problem, random_walk, noimprove, searchspace=searchspace, neighbour=neighbour)

def hillclimb_candidate(self, maxfeval):
self.current_candidate, extra_fevals, totfit, self.visited_cache = hillclimb.StochasticHillclimb(self.current_candidate, self.ffunc, self.minmax, self.func_evals, maxfeval, self.visited_cache, self.nbour_method)
self.current_candidate, extra_fevals, self.visited_cache = hillclimb.StochasticHillclimb(self.current_candidate, self.ffunc, self.minmax, self.func_evals, maxfeval, self.visited_cache, self.nbour_method)
self.func_evals += extra_fevals
self.cumulative_fit += totfit
Loading

0 comments on commit ba4c951

Please sign in to comment.