diff --git a/examples/dynamic_programming.py b/examples/dynamic_programming.py new file mode 100644 index 0000000..62f3a41 --- /dev/null +++ b/examples/dynamic_programming.py @@ -0,0 +1,101 @@ +import numpy as np +import fitness_functions as ff + +def bruteforce_MK_solve(mk_func): + r""" + Bruteforce computation of MK function optimum. + """ + best_fit = 0.0 + best_bitset = ff.create_bitstring(0, mk_func.m) + for i in range(2**mk_func.m): + bs = ff.create_bitstring(i, mk_func.m) + fitness = mk_func.get_fitness(bs) + if fitness > best_fit: + best_fit = fitness + best_bitset = bs + return best_fit, best_bitset + +def dp_solve_MK(mk_func): + r""" + Function to find optimal solution to adjecent MK function using + dynamic programming. + """ + m = mk_func.m + k = mk_func.k + if (m % (k-1) != 0): + raise Exception("We have not decided how to implement DP if there are bits left over") + tilde_functions = [] + q = int(m / (k-1)) + for i in range(q): + tilde_function = dict() + start = (k-1) * i + for a0 in range(2**(2*k-2)): + tilde_val = 0 + temp_bs = ff.create_bitstring(a0, 2*k-2) + for j in range(k-1): + current_mk_func = mk_func.fitness_map[start + j] + temp_bs1 = ff.create_bitstring(0, k) + for p in range(k): + temp_bs1[p] = temp_bs[j+p] + tilde_val += current_mk_func[tuple(temp_bs1)] + tilde_function[tuple(temp_bs)] = tilde_val + tilde_functions.append(tilde_function) + return dp_solve_adjacentMK_tilde(tilde_functions, q-1, k) + +def dp_solve_adjacentMK_tilde(tilde_funcs, pos, k): + r""" + Helper function to DP solve adjacent MK functions. + """ + if pos == 1: + best_val = 0 + for a1 in range(2**(k-1)): + for a2 in range(2**(k-1)): + bsa1 = ff.create_bitstring(a1, k-1) + bsa2 = ff.create_bitstring(a2, k-1) + tilde0_bs = ff.create_bitstring(0, 2*k-2) + tilde1_bs = ff.create_bitstring(0, 2*k-2) + for i in range(k-1): + tilde0_bs[i] = bsa1[i] + tilde1_bs[k-1+i] = bsa1[i] + for i in range(k-1): + tilde0_bs[k-1+i] = bsa2[i] + tilde1_bs[i] = bsa2[i] + val = tilde_funcs[0][tuple(tilde0_bs)] + tilde_funcs[1][tuple(tilde1_bs)] + if val > best_val: + best_val = val + return best_val + + new_tilde_funcs = [] + for i in range(pos-1): + new_tilde_funcs.append(tilde_funcs[i]) + tilde1 = tilde_funcs[pos-1] + tilde2 = tilde_funcs[pos] + new_tilde = dict() + for a1 in range(2**(k-1)): + for a2 in range(2**(k-1)): + bsa1 = ff.create_bitstring(a1, k-1) + bsa2 = ff.create_bitstring(a2, k-1) + new_tilde_inp = ff.create_bitstring(0, 2*k-2) + for i in range(k-1): + new_tilde_inp[i] = bsa1[i] + for i in range(k-1): + new_tilde_inp[k-1+i] = bsa2[i] + bs_tilde1 = ff.create_bitstring(0, 2*k-2) + bs_tilde2 = ff.create_bitstring(0, 2*k-2) + for i in range(k-1): + bs_tilde1[i] = bsa1[i] + for i in range(k-1): + bs_tilde2[k-1+i] = bsa2[i] + best_bval = 0 + for b in range(2**(k-1)): + bsb = ff.create_bitstring(b, k-1) + for j in range(k-1): + bs_tilde1[k-1+j] = bsb[j] + for j in range(k-1): + bs_tilde2[j] = bsb[j] + bval = tilde1[tuple(bs_tilde1)] + tilde2[tuple(bs_tilde2)] + if bval > best_bval: + best_bval = bval + new_tilde[tuple(new_tilde_inp)] = best_bval + new_tilde_funcs.append(new_tilde) + return dp_solve_adjacentMK_tilde(new_tilde_funcs, pos-1, k) diff --git a/examples/example_basin_hopping.py b/examples/example_basin_hopping.py new file mode 100644 index 0000000..9934bf6 --- /dev/null +++ b/examples/example_basin_hopping.py @@ -0,0 +1,87 @@ +import numpy as np +import sys +from timeit import default_timer as timer +import random + +import fitness_functions as ff +import dynamic_programming as dp +import bloopy.algorithms.basin_hopping as bashop +import bloopy.algorithms.local_minimize as minim +import bloopy.utils as utils + +## Generate a (randomized) MK fitness function +k = 4; +m = 33*(k-1); +randomMK = True +if randomMK: + mk_func = ff.random_MK_function(m, k) + mk_func.generate() +else: + mk_func = ff.adjacent_MK_function(m, k) + mk_func.generate() + +## Find optimal solution using dynamic programming for comparison +best_dp_fit = dp.dp_solve_MK(mk_func) +print("Max fitness DP:", best_dp_fit) + +# (We also have bruteforce solves but it is exponentially slow. +# Only use it for bitstrings of sizes < 20 to check. +#best_fit, sol = dp.bruteforce_MK_solve(mk_func) +#print("Max fitness bruteforce:", best_fit) + +ffunc = mk_func.get_fitness +bitstring_size = m + +# Continuous algorithms require a search space to operate +# NOTE: Continuous algorithms can be applied to low dimensional discrete +# problems with many values per parameter. Bitstring based problems, i.e. +# only 2 values per dimension are poorly suited. +searchspace = utils.create_bitstring_searchspace(m) +converter = utils.bitstring_as_discrete(searchspace, mk_func.get_fitness) +fitness_func = converter.get_fitness + +count = 1 +for vals in searchspace.values(): + count *= len(vals) +print("Points in searchspace:", count) + +BASH = True +MINIM = False + +if BASH: + ## Run basin hopping + # supported_methods = ["Nelder-Mead", "Powell", "CG", "L-BFGS-B", "COBYLA", "SLSQP", "BFGS"] + method = "SLSQP" + temperature = 1.0 + iterations = 10000 + test_bash = bashop.basin_hopping(fitness_func, + 1, + searchspace, + T=temperature, + method=method) + + x = test_bash.solve(max_iter=iterations, + max_time=20,#seconds + stopping_fitness=0.98*best_dp_fit, + max_funcevals=10000) + + print("Best fitness:",x[0],", fraction of optimal {0:.4f}".format(x[0]/float(best_dp_fit))) + print("Function evaluations:", x[2]) + +if MINIM: + ## Run local minimization + # supported_methods = ["Nelder-Mead", "Powell", "CG", "L-BFGS-B", "COBYLA", "SLSQP", "BFGS"] + method = "CG" + iterations = 10000 + test_minim = minim.local_minimizer(fitness_func, + 1, + searchspace, + method=method) + + x = test_minim.solve(max_iter=iterations, + max_time=10,#seconds + stopping_fitness=0.98*best_dp_fit, + max_funcevals=10000) + + print("Best fitness:",x[0],", fraction of optimal {0:.4f}".format(x[0]/float(best_dp_fit))) + print("Function evaluations:", x[2]) diff --git a/examples/example_differential_evolution.py b/examples/example_differential_evolution.py new file mode 100644 index 0000000..900635b --- /dev/null +++ b/examples/example_differential_evolution.py @@ -0,0 +1,75 @@ +import numpy as np +import sys +from timeit import default_timer as timer +import random + +import fitness_functions as ff +import dynamic_programming as dp +import bloopy.algorithms.differential_evolution as de +import bloopy.utils as utils + +## Generate a (randomized) MK fitness function +k = 4; +m = 33*(k-1); +randomMK = True +if randomMK: + mk_func = ff.random_MK_function(m, k) + mk_func.generate() +else: + mk_func = ff.adjacent_MK_function(m, k) + mk_func.generate() + +## Find optimal solution using dynamic programming for comparison +best_dp_fit = dp.dp_solve_MK(mk_func) +print("Max fitness DP:", best_dp_fit) + +# (We also have bruteforce solves but it is exponentially slow. +# Only use it for bitstrings of sizes < 20 to check. +#best_fit, sol = dp.bruteforce_MK_solve(mk_func) +#print("Max fitness bruteforce:", best_fit) + +ffunc = mk_func.get_fitness +bitstring_size = m + +# Continuous algorithms require a search space to operate +# NOTE: Continuous algorithms can be applied to low dimensional discrete +# problems with many values per parameter. Bitstring based problems, i.e. +# only 2 values per dimension are poorly suited. +searchspace = utils.create_bitstring_searchspace(m) +converter = utils.bitstring_as_discrete(searchspace, mk_func.get_fitness) +fitness_func = converter.get_fitness + +# Define the Basin Hopping algorithm + +count = 1 +for vals in searchspace.values(): + count *= len(vals) +print("Points in searchspace:", count) + +## Run differential evolution +method = "best1bin" +popsize = 150 +recomb = 0.7 +mutate = (0.2, 0.7) + +minvar = 0.1 +maxf = 10000 +iterations = int(maxf/(popsize * m) - 1) + +test_diffevo = de.differential_evolution(fitness_func, + 1, + searchspace, + method=method, + mutation=mutate, + recombination=recomb, + hillclimb=False,#For accurate feval measurements + pop_size=popsize) + +x = test_diffevo.solve(min_variance=minvar, + max_iter=iterations, + max_time=30,#seconds + stopping_fitness=0.98*best_dp_fit, + max_funcevals=maxf) + +print("Best fitness:",x[0],", fraction of optimal {0:.4f}".format(x[0]/float(best_dp_fit))) +print("Function evaluations:", x[2]) diff --git a/examples/example_dual_annealing.py b/examples/example_dual_annealing.py new file mode 100644 index 0000000..0e15602 --- /dev/null +++ b/examples/example_dual_annealing.py @@ -0,0 +1,62 @@ +import numpy as np +import sys +from timeit import default_timer as timer +import random + +import fitness_functions as ff +import dynamic_programming as dp +import bloopy.algorithms.dual_annealing as dsa +import bloopy.utils as utils + +## Generate a (randomized) MK fitness function +k = 4; +m = 33*(k-1); +randomMK = True +if randomMK: + mk_func = ff.random_MK_function(m, k) + mk_func.generate() +else: + mk_func = ff.adjacent_MK_function(m, k) + mk_func.generate() + +## Find optimal solution using dynamic programming for comparison +best_dp_fit = dp.dp_solve_MK(mk_func) +print("Max fitness DP:", best_dp_fit) + +# (We also have bruteforce solves but it is exponentially slow. +# Only use it for bitstrings of sizes < 20 to check. +#best_fit, sol = dp.bruteforce_MK_solve(mk_func) +#print("Max fitness bruteforce:", best_fit) + +ffunc = mk_func.get_fitness +bitstring_size = m + +# Continuous algorithms require a search space to operate +# NOTE: Continuous algorithms can be applied to low dimensional discrete +# problems with many values per parameter. Bitstring based problems, i.e. +# only 2 values per dimension are poorly suited. +searchspace = utils.create_bitstring_searchspace(m) +converter = utils.bitstring_as_discrete(searchspace, mk_func.get_fitness) +fitness_func = converter.get_fitness + +count = 1 +for vals in searchspace.values(): + count *= len(vals) +print("Points in searchspace:", count) + +## Run dual annealing +# supported_methods = ['COBYLA','L-BFGS-B','SLSQP','CG','Powell','Nelder-Mead', 'BFGS', 'trust-constr'] +method = "trust-constr" +iterations = 10000 +test_dsa = dsa.dual_annealing(fitness_func, + 1, + searchspace, + method=method) + +x = test_dsa.solve(max_iter=iterations, + max_time=10,#seconds + stopping_fitness=0.98*best_dp_fit, + max_funcevals=10000) + +print("Best fitness:",x[0],", fraction of optimal {0:.4f}".format(x[0]/float(best_dp_fit))) +print("Function evaluations:", x[2]) diff --git a/examples/example_ga.py b/examples/example_ga.py new file mode 100644 index 0000000..8cd6e3b --- /dev/null +++ b/examples/example_ga.py @@ -0,0 +1,55 @@ +import numpy as np +import sys +from timeit import default_timer as timer +import random + +import fitness_functions as ff +import dynamic_programming as dp +import bloopy.mutation_functions as mut +import bloopy.reproductive_functions as rep +import bloopy.selection_functions as sel +import bloopy.algorithms.genetic_algorithm as ga + +## Generate a (randomized) MK fitness function +k = 4; +m = 33*(k-1); +randomMK = True +if randomMK: + mk_func = ff.random_MK_function(m, k) + mk_func.generate() +else: + mk_func = ff.adjacent_MK_function(m, k) + mk_func.generate() + +## Find optimal solution using dynamic programming for comparison +best_dp_fit = dp.dp_solve_MK(mk_func) +print("Max fitness DP:", best_dp_fit) + +# (We also have bruteforce solves but it is exponentially slow. +# Only use it for bitstrings of sizes < 20 to check. + +#best_fit, sol = dp.bruteforce_MK_solve(mk_func) +#print("Max fitness bruteforce:", best_fit) + +fitness_func = mk_func.get_fitness +population_size = 1000 +reproductor = rep.twopoint_crossover +selector = sel.tournament2_selection +bitstring_size = m + +## We can optionally provide an input population +test_ga = ga.genetic_algorithm(fitness_func, + reproductor, + selector, + population_size, + bitstring_size, + min_max_problem=1, + input_pop=None) + +x = test_ga.solve(min_variance=0.1, + max_iter=1000, + no_improve=300, + max_time=15,#seconds + stopping_fitness=0.98*best_dp_fit, + max_funcevals=200000) +print("Best fitness:",x[0],", fraction of optimal {0:.4f}".format(x[0]/float(best_dp_fit))) diff --git a/examples/example_gls.py b/examples/example_gls.py new file mode 100644 index 0000000..011ddba --- /dev/null +++ b/examples/example_gls.py @@ -0,0 +1,57 @@ +import numpy as np +import sys +from timeit import default_timer as timer +import random + +import fitness_functions as ff +import dynamic_programming as dp +import bloopy.mutation_functions as mut +import bloopy.reproductive_functions as rep +import bloopy.selection_functions as sel +import bloopy.algorithms.genetic_local_search as gls +import bloopy.algorithms.hillclimbers as hill + +## Generate a (randomized) MK fitness function +k = 4; +m = 33*(k-1); +randomMK = True +if randomMK: + mk_func = ff.random_MK_function(m, k) + mk_func.generate() +else: + mk_func = ff.adjacent_MK_function(m, k) + mk_func.generate() + +## Find optimal solution using dynamic programming for comparison +best_dp_fit = dp.dp_solve_MK(mk_func) +print("Max fitness DP:", best_dp_fit) + +# (We also have bruteforce solves but it is exponentially slow. +# Only use it for bitstrings of sizes < 20 to check. + +#best_fit, sol = dp.bruteforce_MK_solve(mk_func) +#print("Max fitness bruteforce:", best_fit) + +fitness_func = mk_func.get_fitness +population_size = 30 +reproductor = rep.uniform_crossover +selector = sel.select_best_half +bitstring_size = m +hillclimber = hill.RandomGreedyHillclimb + +test_gls = gls.genetic_local_search(fitness_func, + reproductor, + selector, + population_size, + bitstring_size, + hillclimber, + min_max_problem=1, + input_pop=None) + +x = test_gls.solve(min_variance=0.1, + max_iter=1000, + no_improve=300, + max_time=60,#seconds + stopping_fitness=0.98*best_dp_fit, + max_funcevals=200000) +print("Best fitness:",x[0],", fraction of optimal {0:.4f}".format(x[0]/float(best_dp_fit))) diff --git a/examples/example_local_searchers.py b/examples/example_local_searchers.py new file mode 100644 index 0000000..bbbf551 --- /dev/null +++ b/examples/example_local_searchers.py @@ -0,0 +1,114 @@ +import numpy as np +from timeit import default_timer as timer +import random + +import fitness_functions as ff +import dynamic_programming as dp +import bloopy.algorithms.local_search as mls +import bloopy.algorithms.iterative_local_search as ils + +k = 4; +m = 33*(k-1); +randomMK = True +if randomMK: + mk_func = ff.random_MK_function(m, k) + mk_func.generate() +else: + mk_func = ff.adjacent_MK_function(m, k) + mk_func.generate() + +## Find optimal solution using dynamic programming for comparison +best_dp_fit = dp.dp_solve_MK(mk_func) +print("Max fitness DP:", best_dp_fit) + +# (We also have bruteforce solves but it is exponentially slow. +# Only use it for bitstrings of sizes < 20 to check. + +#best_fit, sol = dp.bruteforce_MK_solve(mk_func) +#print("Max fitness bruteforce:", best_fit) + + +fitness_func = mk_func.get_fitness +iterations = 1000 +bitstring_size = m + +MLS = False +ILS = True + +if MLS: + MLS_TYPE = "RandomGreedy" + #MLS_TYPE = "Best" + #MLS_TYPE = "OrderedGreedy" + #MLS_TYPE = "Stochastic" + + restart = False + if MLS_TYPE == "RandomGreedy": + test_mls = mls.RandomGreedyMLS(fitness_func, + bitstring_size, + 1, + restart_search=restart) + elif MLS_TYPE == "OrderedGreedy": + test_mls = mls.OrderedGreedyMLS(fitness_func, + bitstring_size, + 1, + restart_search=restart, + order=None) + elif MLS_TYPE == "Best": + test_mls = mls.BestMLS(fitness_func, + bitstring_size, + 1) + elif MLS_TYPE == "Stochastic": + test_mls = mls.StochasticMLS(fitness_func, + bitstring_size, + 1) + + x = test_mls.solve(iterations, + max_time=60,#seconds + stopping_fitness=0.98*best_dp_fit, + max_funcevals=200000, + verbose=True) + print("Best fitness:",x[0],", fraction of optimal {0:.4f}".format(x[0]/float(best_dp_fit))) + +if ILS: + ILS_TYPE = "RandomGreedy" + #ILS_TYPE = "Best" + #ILS_TYPE = "OrderedGreedy" + #ILS_TYPE = "Stochastic" + + restart = True + walksize = 50 + no_improve = 100 + if ILS_TYPE == "RandomGreedy": + test_ils = ils.RandomGreedyILS(fitness_func, + bitstring_size, + 1, + walksize, + noimprove=no_improve, + restart_search=restart) + elif ILS_TYPE == "OrderedGreedy": + test_ils = ils.OrderedGreedyILS(fitness_func, + bitstring_size, + 1, + walksize, + noimprove=no_improve, + restart_search=restart, + order=None) + elif ILS_TYPE == "Best": + test_ils = ils.BestILS(fitness_func, + bitstring_size, + 1, + walksize, + noimprove=no_improve) + elif ILS_TYPE == "Stochastic": + test_ils = ils.StochasticILS(fitness_func, + bitstring_size, + 1, + walksize, + noimprove=no_improve) + + x = test_ils.solve(iterations, + max_time=60,#seconds + stopping_fitness=best_dp_fit, + max_funcevals=200000, + verbose=True) + print("Best fitness:",x[0],", fraction of optimal {0:.4f}".format(x[0]/float(best_dp_fit))) diff --git a/examples/example_ltga.py b/examples/example_ltga.py new file mode 100644 index 0000000..c2a27a8 --- /dev/null +++ b/examples/example_ltga.py @@ -0,0 +1,50 @@ +import numpy as np +import sys +from timeit import default_timer as timer +import random + +import fitness_functions as ff +import dynamic_programming as dp +import bloopy.algorithms.ltga as ltga +import bloopy.mutation_functions as mut +import bloopy.reproductive_functions as rep +import bloopy.selection_functions as sel + +## Generate a (randomized) MK fitness function +k = 4; +m = 33*(k-1); +randomMK = True +if randomMK: + mk_func = ff.random_MK_function(m, k) + mk_func.generate() +else: + mk_func = ff.adjacent_MK_function(m, k) + mk_func.generate() + +## Find optimal solution using dynamic programming for comparison +best_dp_fit = dp.dp_solve_MK(mk_func) +print("Max fitness DP:", best_dp_fit) + +# (We also have bruteforce solves but it is exponentially slow. +# Only use it for bitstrings of sizes < 20 to check. + +#best_fit, sol = dp.bruteforce_MK_solve(mk_func) +#print("Max fitness bruteforce:", best_fit) + +fitness_func = mk_func.get_fitness +population_size = 200 +bitstring_size = m +test_ltga = ltga.ltga(fitness_func, + population_size, + bitstring_size, + min_max_problem=1, + input_pop=None, + maxdepth=8) + +x = test_ltga.solve(min_variance=0.1, + max_iter=1000, + no_improve=300, + max_time=30,#seconds + stopping_fitness=0.98*best_dp_fit, + max_funcevals=200000) +print("Best fitness:",x[0],", fraction of optimal {0:.4f}".format(x[0]/float(best_dp_fit))) diff --git a/examples/example_ltga_greedy.py b/examples/example_ltga_greedy.py new file mode 100644 index 0000000..97b18fe --- /dev/null +++ b/examples/example_ltga_greedy.py @@ -0,0 +1,49 @@ +import numpy as np +import sys +from timeit import default_timer as timer + +import fitness_functions as ff +import dynamic_programming as dp +import bloopy.mutation_functions as mut +import bloopy.reproductive_functions as rep +import bloopy.selection_functions as sel +import bloopy.algorithms.ltga_greedy as lg + +## Generate a (randomized) MK fitness function +k = 4; +m = 33*(k-1); +randomMK = True +if randomMK: + mk_func = ff.random_MK_function(m, k) + mk_func.generate() +else: + mk_func = ff.adjacent_MK_function(m, k) + mk_func.generate() + +## Find optimal solution using dynamic programming for comparison +best_dp_fit = dp.dp_solve_MK(mk_func) +print("Max fitness DP:", best_dp_fit) + +# (We also have bruteforce solves but it is exponentially slow. +# Only use it for bitstrings of sizes < 20 to check. + +#best_fit, sol = dp.bruteforce_MK_solve(mk_func) +#print("Max fitness bruteforce:", best_fit) + +fitness_func = mk_func.get_fitness +population_size = 200 +bitstring_size = m +test_ltga = lg.ltga_greedy(fitness_func, + population_size, + bitstring_size, + min_max_problem=1, + input_pop=None, + maxdepth=8) + +x = test_ltga.solve(min_variance=0.1, + max_iter=1000, + no_improve=300, + max_time=30,#seconds + stopping_fitness=0.98*best_dp_fit, + max_funcevals=200000) +print("\nBest fitness:",x[0],", fraction of optimal {0:.4f}".format(x[0]/float(best_dp_fit))) diff --git a/examples/example_mk_functions.py b/examples/example_mk_functions.py new file mode 100644 index 0000000..6317d4b --- /dev/null +++ b/examples/example_mk_functions.py @@ -0,0 +1,32 @@ +import numpy as np +import sys +from timeit import default_timer as timer +import random + +import fitness_functions as ff +import dynamic_programming as dp + +random.seed(123) + +k = 4; +m = 5*(k-1); + +randomMK = True +if randomMK: + mk_func = ff.random_MK_function(m, k) + mk_func.generate() +else: + mk_func = ff.adjacent_MK_function(m, k) + mk_func.generate() + +start1 = timer() +best_fit, sol = dp.bruteforce_MK_solve(mk_func) +end1 = timer() +print("Bruteforce evaluation time (ms):", 1000*(end1-start1)) +print("Max fitness bruteforce:", best_fit) + +start2 = timer() +best_dp_fit = dp.dp_solve_MK(mk_func) +end2 = timer() +print("DP evaluation time (ms):", 1000*(end2-start2)) +print("Max fitness DP:", best_dp_fit) diff --git a/examples/example_pso.py b/examples/example_pso.py new file mode 100644 index 0000000..2d19494 --- /dev/null +++ b/examples/example_pso.py @@ -0,0 +1,73 @@ +import numpy as np +import sys +from timeit import default_timer as timer +import random + +import fitness_functions as ff +import dynamic_programming as dp +import bloopy.algorithms.pso_pyswarms as pso +import bloopy.utils as utils + +## Generate a (randomized) MK fitness function +k = 4; +m = 33*(k-1); +randomMK = True +if randomMK: + mk_func = ff.random_MK_function(m, k) + mk_func.generate() +else: + mk_func = ff.adjacent_MK_function(m, k) + mk_func.generate() + +## Find optimal solution using dynamic programming for comparison +best_dp_fit = dp.dp_solve_MK(mk_func) +print("Max fitness DP:", best_dp_fit) + +# (We also have bruteforce solves but it is exponentially slow. +# Only use it for bitstrings of sizes < 20 to check. +#best_fit, sol = dp.bruteforce_MK_solve(mk_func) +#print("Max fitness bruteforce:", best_fit) + +ffunc = mk_func.get_fitness +bitstring_size = m + +# Continuous algorithms require a search space to operate +# NOTE: Continuous algorithms can be applied to low dimensional discrete +# problems with many values per parameter. Bitstring based problems, i.e. +# only 2 values per dimension are poorly suited. +searchspace = utils.create_bitstring_searchspace(m) +converter = utils.bitstring_as_discrete(searchspace, mk_func.get_fitness) +fitness_func = converter.get_fitness + +# Define the Basin Hopping algorithm + +count = 1 +for vals in searchspace.values(): + count *= len(vals) +print("Points in searchspace:", count) + +## Run Particle Swarm Optimization (100,50) +nr_parts = 100 +k = 50 # Should be less than nr_parts +# Scaled parameters equidistant to [0,scaling], +# Strangely, [0,1] is not necessarily the same as e.g. [0,100] +# for the pyswarms library +# NOTE: Users should try None, 1, 10, 100, 1000 +scaling = 100 + +test_pso = pso.pyswarms_pso(fitness_func, + 1, + searchspace, + n_particles=nr_parts, + k=k, + scaling=scaling) + +nprocs = None +iterations = 10000 + +x = test_pso.solve(max_iter=iterations, + max_funcevals=8000, + n_procs=nprocs) + +print("Best fitness:",x[0],", fraction of optimal {0:.4f}".format(x[0]/float(best_dp_fit))) +print("Function evaluations:", x[2]) diff --git a/examples/example_randomsampling.py b/examples/example_randomsampling.py new file mode 100644 index 0000000..2f96b87 --- /dev/null +++ b/examples/example_randomsampling.py @@ -0,0 +1,42 @@ +import numpy as np +import sys +from timeit import default_timer as timer +import random + +import fitness_functions as ff +import dynamic_programming as dp +import bloopy.algorithms.random_sampling as randsl + +## Generate a (randomized) MK fitness function +k = 4; +m = 33*(k-1); +randomMK = True +if randomMK: + mk_func = ff.random_MK_function(m, k) + mk_func.generate() +else: + mk_func = ff.adjacent_MK_function(m, k) + mk_func.generate() + +## Find optimal solution using dynamic programming for comparison +best_dp_fit = dp.dp_solve_MK(mk_func) +print("Max fitness DP:", best_dp_fit) + +# (We also have bruteforce solves but it is exponentially slow. +# Only use it for bitstrings of sizes < 20 to check. + +#best_fit, sol = dp.bruteforce_MK_solve(mk_func) +#print("Max fitness bruteforce:", best_fit) + +fitness_func = mk_func.get_fitness +bitstring_size = m + +test_rand = randsl.random_sampling(fitness_func, + bitstring_size, + 1) + +x = test_rand.solve(max_time=15,#seconds + stopping_fitness=0.98*best_dp_fit, + max_funcevals=10000, + verbose=True) +print("Best fitness:",x[0],", fraction of optimal {0:.4f}".format(x[0]/float(best_dp_fit))) diff --git a/examples/example_sa.py b/examples/example_sa.py new file mode 100644 index 0000000..2e8412b --- /dev/null +++ b/examples/example_sa.py @@ -0,0 +1,51 @@ +import numpy as np +import sys +from timeit import default_timer as timer +import random + +import fitness_functions as ff +import dynamic_programming as dp +import bloopy.algorithms.simulated_annealing as sa +import bloopy.algorithms.hillclimbers as hill + +## Generate a (randomized) MK fitness function +k = 4; +m = 33*(k-1); +randomMK = True +if randomMK: + mk_func = ff.random_MK_function(m, k) + mk_func.generate() +else: + mk_func = ff.adjacent_MK_function(m, k) + mk_func.generate() + +## Find optimal solution using dynamic programming for comparison +best_dp_fit = dp.dp_solve_MK(mk_func) +print("Max fitness DP:", best_dp_fit) + +# (We also have bruteforce solves but it is exponentially slow. +# Only use it for bitstrings of sizes < 20 to check. + +#best_fit, sol = dp.bruteforce_MK_solve(mk_func) +#print("Max fitness bruteforce:", best_fit) + +fitness_func = mk_func.get_fitness +iterations = 1000 +bitstring_size = m +explore_param = 6 +hillclimber = hill.StochasticHillclimb +#hillclimber = hill.RandomGreedyHillclimb +#hillclimber = hill.BestHillclimb +test_sa = sa.simulated_annealing(fitness_func, + bitstring_size, + 1, + explore_param, + hillclimb=hillclimber) + +x = test_sa.solve(iterations, + max_time=15,#seconds + stopping_fitness=0.98*best_dp_fit, + max_funcevals=200000, + verbose=True) +print("Best fitness:",x[0],", fraction of optimal {0:.4f}".format(x[0]/float(best_dp_fit))) +print("Function evals:", x[2]) diff --git a/examples/example_tabu.py b/examples/example_tabu.py new file mode 100644 index 0000000..a6bce66 --- /dev/null +++ b/examples/example_tabu.py @@ -0,0 +1,55 @@ +import numpy as np +import sys +from timeit import default_timer as timer +import random + +import fitness_functions as ff +import dynamic_programming as dp +import bloopy.algorithms.tabu_search as tabu + +## Generate a (randomized) MK fitness function +k = 4; +m = 33*(k-1); +randomMK = True +if randomMK: + mk_func = ff.random_MK_function(m, k) + mk_func.generate() +else: + mk_func = ff.adjacent_MK_function(m, k) + mk_func.generate() + +## Find optimal solution using dynamic programming for comparison +best_dp_fit = dp.dp_solve_MK(mk_func) +print("Max fitness DP:", best_dp_fit) + +# (We also have bruteforce solves but it is exponentially slow. +# Only use it for bitstrings of sizes < 20 to check. + +#best_fit, sol = dp.bruteforce_MK_solve(mk_func) +#print("Max fitness bruteforce:", best_fit) + +fitness_func = mk_func.get_fitness +iterations = 100000 +bitstring_size = m +tabu_size = 1024 + +TABU_TYPE = "RandomGreedy" +#TABU_TYPE = "Best" + +if TABU_TYPE == "RandomGreedy": + test_tabu = tabu.RandomGreedyTabu(fitness_func, + bitstring_size, + 1, + tabu_size) +elif TABU_TYPE == "Best": + test_tabu = tabu.BestTabu(fitness_func, + bitstring_size, + 1, + tabu_size) + +x = test_tabu.solve(iterations, + max_time=30,#seconds + stopping_fitness=0.98*best_dp_fit, + max_funcevals=200000, + verbose=True) +print("Best fitness:",x[0],", fraction of optimal {0:.4f}".format(x[0]/float(best_dp_fit)), "Function evals:",x[2]) diff --git a/examples/fitness_functions.py b/examples/fitness_functions.py new file mode 100644 index 0000000..1a0b293 --- /dev/null +++ b/examples/fitness_functions.py @@ -0,0 +1,164 @@ +import numpy as np +import random +import copy +from operator import itemgetter +import multiprocessing as mp +from bitarray import bitarray + +r""" +Fitness functions must take a bitstring (bitarray()) as input, + and return the fitness (float) +""" + +def create_bitstring(val, length): + r""" + Helper functions for MK functions + + Args: + val (int): integer to be encoded as bitstring. + length (int): specify length of bitstring. + """ + if length < 0 or val < 0: + raise Exception("Invalid input for create bitstring") + if 2**length -1 < val: + raise Exception("Bitstring too short to encode value") + bitstring = bitarray(length) + bitstring.setall(False) + bs = bin(val) + for k in range(length): + if bs[-k-1] == 'b': + break + bitstring[k] = bool(int(bs[-k-1])) + return bitstring + +class MK_function: + def __init__(self, m, k, bit_mask=None, fitness_map=None): + r""" + Base class for MK functions. + + Args: + m (int): Bitstring length. + k (int): Length of subfunctions present in MK function (m must be divisible) + by k-1. + """ + if m < 2 or k < 2: + raise Exception("Invalid values for MK-function") + self.m = m + self.k = k + if bit_mask is None: + self.bit_mask = [] + else: + self.bit_mask = bit_mask + if fitness_map is None: + self.fitness_map = [] + else: + self.fitness_map = fitness_map + + def generate(): + pass + + def generate_subfunction(): + pass + + def apply_fitness(): + pass + +class adjacent_MK_function(MK_function): + def __init__(self, m, k): + r""" + Class for adjacent MK functions, i.e. the subfunctions are located adjactenly in + the bitstring. + + Args: + m (int): Bitstring length. + k (int): Length of subfunctions present in MK function (m must be divisible) + by k-1. + """ + super().__init__(m, k) + + def generate(self): + r""" + Generates a adjacent MK function. + """ + for j in range(self.m): + mk_func = self.generate_subfunction() + self.fitness_map.append(mk_func) + mask = [] + for i in range(self.k): + mask.append((j + i) % self.m) + self.bit_mask.append(mask) + + def generate_subfunction(self): + r""" + Helper function to generate a random subfunction. + """ + mk_func = dict() + size = 2**self.k + v = list(range(1,size+1)) + random.shuffle(v) + for i in range(size): + bitset = create_bitstring(i, self.k) + mk_func[tuple(bitset)] = v[i] + return mk_func + + def get_fitness(self, input_bs): + r""" + Returns fitness value of input bitstring + + Args: + input_vs (bitarray()): Bitstring to determine fitness for. + + Returns: + fit (float): Fitness value of input bitstring + """ + if len(input_bs) > self.m: + raise Exception("Input string should be of size {}".format(self.m)) + fit = 0.0 + for j in range(len(self.fitness_map)): + mk_func = self.fitness_map[j] + current_mask = self.bit_mask[j] + getter = itemgetter(*current_mask) + bitset = getter(input_bs) + fit += mk_func[tuple(bitset)] + return fit + +class random_MK_function(adjacent_MK_function): + def __init__(self, m, k): + r""" + Class for randomized MK functions, i.e. the input bits for each subfunction are + located at random positions throughout the bitstring. + + Args: + m (int): Bitstring length. + k (int): Length of subfunctions present in MK function (m must be divisible) + by k-1. + """ + super().__init__(m, k) + self.adj_bit_mask = None + + def set_random_bitmask(self): + r""" + Set the random positions for inputs for each subfunction. + """ + if self.adj_bit_mask is None: + self.adj_bit_mask = copy.deepcopy(self.bit_mask) # Save original adjacent mask + + permutation = list(range(self.m)) + random.shuffle(permutation) + for i in range(len(self.bit_mask)): + for j in range(len(self.bit_mask[i])): + pos = self.bit_mask[i][j] + self.bit_mask[i][j] = permutation[pos] + + def generate(self): + r""" + Generates a randomized MK function. + """ + for j in range(self.m): + mk_func = self.generate_subfunction() + self.fitness_map.append(mk_func) + mask = [] + for i in range(self.k): + mask.append((j + i) % self.m) + self.bit_mask.append(mask) + self.set_random_bitmask() diff --git a/examples/simple_bitstring_example.py b/examples/simple_bitstring_example.py new file mode 100644 index 0000000..aa53765 --- /dev/null +++ b/examples/simple_bitstring_example.py @@ -0,0 +1,27 @@ +import numpy as np +from bitarray.util import ba2int +import bloopy.algorithms.local_search as mls + +# Define fitness function and searchspace +N = 20 # size of bitstring + +fitness_values = np.arange(1, 1 + 2**N) +np.random.shuffle(fitness_values) +fitness_func = lambda bs: fitness_values[ba2int(bs)] +print("Size of search space:", 2**N) + +# Configure Local Search algorithm (RandomGreedy MLS in this case) +iterations = 10000 # Max number of random restarts +minmax = -1 # -1 for minimization problem, +1 for maximization problem +maxfeval = 100000 # Number of unique fitness queries MLS is allowed + +test_mls = mls.RandomGreedyMLS(fitness_func, + N, + minmax) + +best_fit, _, fevals = test_mls.solve(iterations, + max_time=10,#seconds + stopping_fitness=1,#1 is optimal value so we can stop + max_funcevals=maxfeval, + verbose=True) +print("Best fitness found:", best_fit, "in", fevals, "evaluations | optimal fitness:", 1) diff --git a/examples/simple_continuous_example.py b/examples/simple_continuous_example.py new file mode 100644 index 0000000..8841e6f --- /dev/null +++ b/examples/simple_continuous_example.py @@ -0,0 +1,39 @@ +import numpy as np +import itertools as it + +from simple_discrete_example import categorical_fitness +import bloopy.algorithms.dual_annealing as dsa +import bloopy.utils as utils + +### Construct some categorical discrete space +searchspace = {"x1": [1,2,3,4,5,6], + "x2": ["foo", "bar"], + "x3": [16, 32, 64, 128], + "x4": ["a", "b", "c", "d", "e"]} + +# Continuous algorithms require a search space to operate +categorical_fit = categorical_fitness(searchspace) +disc_space = utils.discrete_space(categorical_fit.fitness, searchspace) + + +## Run dual annealing +# supported_methods = ['COBYLA','L-BFGS-B','SLSQP','CG','Powell','Nelder-Mead', 'BFGS', 'trust-constr'] +method = "trust-constr" +iterations = 10000 +minmax = -1 # -1 for minimization problem, +1 for maximization problem +if minmax == 1: + optfit = len(categorical_fit.possible_xs) +elif minmax == -1: + optfit = 1 +maxfeval = 100000 # Number of unique fitness queries MLS is allowed + +test_dsa = dsa.dual_annealing(disc_space.fitness, + minmax, + searchspace, + method=method) + +best_fit, _, fevals = test_dsa.solve(max_iter=iterations, + max_time=10,#seconds + stopping_fitness=optfit, + max_funcevals=maxfeval) +print("Best fitness found:", best_fit, "in", fevals, "evaluations | optimal fitness:", optfit) diff --git a/examples/simple_discrete_example.py b/examples/simple_discrete_example.py new file mode 100644 index 0000000..54f71b2 --- /dev/null +++ b/examples/simple_discrete_example.py @@ -0,0 +1,84 @@ +import numpy as np +import itertools as it + +import bloopy.algorithms.local_search as mls +import bloopy.utils as utils + +class categorical_fitness: + def __init__(self, sspace): + self.sspace = sspace + self.ssvalues = list(self.sspace.values())#Shorthand + + ### Give all possible (x1,x2,x3,x4) a random fitness value + var_names = sorted(self.sspace) + self.possible_xs = list(it.product(*(sspace[key] for key in var_names))) + print("Size of search space:", len(self.possible_xs)) + + # Define fitness function + self.fitness_values = np.arange(1, 1 + len(self.possible_xs)) + np.random.shuffle(self.fitness_values) + + # Calculate bitstring size + self.bsize = utils.calculate_bitstring_length(self.sspace) + print("Size of bitstring:", self.bsize) + + def map_listvariable_to_index(self, vec): + r"""For discrete categorical problems, bitstrings are implemented + as segments where one bit is active in each segment, and this bit + designates the parameter value for that variable.""" + # This function looks complicated, but it merely uniquely maps each + # possible vector to an index to get a random fitness value. + indices = [] + it = 0 + for j, var in enumerate(vec): + vals = self.ssvalues[j] + for k, x in enumerate(vals): + if x == var: + indices.append(k+it) + break + multip = len(self.possible_xs) + index = 0 + for i, key in enumerate(self.sspace.keys()): + add = indices[i] + multip /= len(self.sspace[key]) + add *= multip + index += add + return int(index) + + def fitness(self, vec): + # Map each entry to a unique index, which points to a random fitness value + return self.fitness_values[self.map_listvariable_to_index(vec)] + + +### Construct some categorical discrete space +searchspace = {"x1": [1,2,3,4,5,6], + "x2": ["foo", "bar"], + "x3": [16, 32, 64, 128], + "x4": ["a", "b", "c", "d", "e"]} + +categorical_fit = categorical_fitness(searchspace) + +# Create discrete space class +disc_space = utils.discrete_space(categorical_fit.fitness, searchspace) + + +### Configure Local Search algorithm (RandomGreedy MLS in this case) +iterations = 10000 # Max number of random restarts +minmax = -1 # -1 for minimization problem, +1 for maximization problem +if minmax == 1: + optfit = len(categorical_fit.possible_xs) +elif minmax == -1: + optfit = 1 +maxfeval = 100000 # Number of unique fitness queries MLS is allowed + +test_mls = mls.RandomGreedyMLS(disc_space.fitness, + categorical_fit.bsize, + minmax, + searchspace=searchspace) + +best_fit, _, fevals = test_mls.solve(iterations, + max_time=10,#seconds + stopping_fitness=optfit,#1 is optimal value so we can stop + max_funcevals=maxfeval, + verbose=True) +print("Best fitness found:", best_fit, "in", fevals, "evaluations | optimal fitness:", optfit)