diff --git a/docs/api/_pyswarms.utils.rst b/docs/api/_pyswarms.utils.rst index bf8dd659..131289e2 100644 --- a/docs/api/_pyswarms.utils.rst +++ b/docs/api/_pyswarms.utils.rst @@ -10,3 +10,4 @@ functionalities. pyswarms.utils.functions pyswarms.utils.search pyswarms.utils.plotters + pyswarms.utils.reporter diff --git a/docs/api/pyswarms.utils.reporter.rst b/docs/api/pyswarms.utils.reporter.rst new file mode 100644 index 00000000..ba18e082 --- /dev/null +++ b/docs/api/pyswarms.utils.reporter.rst @@ -0,0 +1,10 @@ +pyswarms.utils.reporter package +================================ + +.. automodule:: pyswarms.utils.reporter.reporter + :members: + :undoc-members: + :show-inheritance: + :private-members: + :special-members: __init__ + diff --git a/pyswarms/backend/generators.py b/pyswarms/backend/generators.py index d475fbab..91f7dbf6 100644 --- a/pyswarms/backend/generators.py +++ b/pyswarms/backend/generators.py @@ -9,12 +9,15 @@ """ -# Import modules +import logging + import numpy as np -# Import from package +from ..utils.reporter import Reporter from .swarms import Swarm +rep = Reporter(logger=logging.getLogger(__name__)) + def generate_swarm( n_particles, dimensions, bounds=None, center=1.00, init_pos=None @@ -67,7 +70,12 @@ def generate_swarm( low=min_bounds, high=max_bounds, size=(n_particles, dimensions) ) except ValueError: + rep.logger.exception( + "Please check the size and value of bounds and dimensions" + ) raise + except TypeError: + rep.logger.exception("Invalid input type!") else: return pos @@ -103,7 +111,11 @@ def generate_discrete_swarm( size=(n_particles, dimensions) ).argsort(axis=1) except ValueError: - raise + rep.logger.exception( + "Please check the size and value of bounds and dimensions" + ) + except TypeError: + rep.logger.exception("Invalid input type!") else: return pos @@ -132,8 +144,12 @@ def generate_velocity(n_particles, dimensions, clamp=None): velocity = (max_velocity - min_velocity) * np.random.random_sample( size=(n_particles, dimensions) ) + min_velocity - except (ValueError, TypeError): - raise + except ValueError: + rep.logger.exception( + "Please check the size and value of clamp and dimensions" + ) + except TypeError: + rep.logger.exception("Invalid input type!") else: return velocity diff --git a/pyswarms/backend/operators.py b/pyswarms/backend/operators.py index fb42ecd2..7714f241 100644 --- a/pyswarms/backend/operators.py +++ b/pyswarms/backend/operators.py @@ -8,14 +8,13 @@ to specify how the swarm will behave. """ -# Import from stdlib import logging -# Import modules import numpy as np -# Create a logger -logger = logging.getLogger(__name__) +from ..utils.reporter import Reporter + +rep = Reporter(logger=logging.getLogger(__name__)) def compute_pbest(swarm): @@ -67,9 +66,9 @@ def compute_pbest(swarm): ~mask_cost, swarm.pbest_cost, swarm.current_cost ) except AttributeError: - msg = "Please pass a Swarm class. You passed {}".format(type(swarm)) - logger.error(msg) - raise + rep.logger.exception( + "Please pass a Swarm class. You passed {}".format(type(swarm)) + ) else: return (new_pbest_pos, new_pbest_cost) @@ -137,13 +136,11 @@ def compute_velocity(swarm, clamp): ) updated_velocity = np.where(~mask, swarm.velocity, temp_velocity) except AttributeError: - msg = "Please pass a Swarm class. You passed {}".format(type(swarm)) - logger.error(msg) - raise + rep.logger.exception( + "Please pass a Swarm class. You passed {}".format(type(swarm)) + ) except KeyError: - msg = "Missing keyword in swarm.options" - logger.error(msg) - raise + rep.logger.exception("Missing keyword in swarm.options") else: return updated_velocity @@ -187,8 +184,8 @@ def compute_position(swarm, bounds): temp_position = np.where(~mask, swarm.position, temp_position) position = temp_position except AttributeError: - msg = "Please pass a Swarm class. You passed {}".format(type(swarm)) - logger.error(msg) - raise + rep.logger.exception( + "Please pass a Swarm class. You passed {}".format(type(swarm)) + ) else: return position diff --git a/pyswarms/backend/swarms.py b/pyswarms/backend/swarms.py index a6d4db06..71a6d730 100644 --- a/pyswarms/backend/swarms.py +++ b/pyswarms/backend/swarms.py @@ -8,9 +8,8 @@ as input to most backend cases. """ -# Import modules import numpy as np -from attr import attrs, attrib +from attr import attrib, attrs from attr.validators import instance_of diff --git a/pyswarms/backend/topology/base.py b/pyswarms/backend/topology/base.py index 38a670a7..63129ad5 100644 --- a/pyswarms/backend/topology/base.py +++ b/pyswarms/backend/topology/base.py @@ -12,12 +12,10 @@ :mod:`pyswarms.backend.swarms.Swarm` module. """ -# Import from stdlib import abc import logging -# Import from package -from ...utils.console_utils import cli_print +from ...utils.reporter import Reporter class Topology(abc.ABC): @@ -25,19 +23,17 @@ def __init__(self, static, **kwargs): """Initializes the class""" # Initialize logger - self.logger = logging.getLogger(__name__) + self.rep = Reporter(logger=logging.getLogger(__name__)) # Initialize attributes self.static = static self.neighbor_idx = None if self.static: - cli_print( - "Running on `dynamic` topology, neighbors are updated regularly." - "Set `static=True` for fixed neighbors.", - 1, - 0, - self.logger, + self.rep.log( + "Running on `dynamic` topology," + "set `static=True` for fixed neighbors.", + lvl=10, ) @abc.abstractmethod diff --git a/pyswarms/backend/topology/pyramid.py b/pyswarms/backend/topology/pyramid.py index f1af7713..53b0fea4 100644 --- a/pyswarms/backend/topology/pyramid.py +++ b/pyswarms/backend/topology/pyramid.py @@ -6,20 +6,15 @@ This class implements a pyramid topology. In this topology, the particles are connected by N-dimensional simplices. """ -# Import from stdlib import logging -# Import modules import numpy as np from scipy.spatial import Delaunay -# Import from package from .. import operators as ops +from ...utils.reporter import Reporter from .base import Topology -# Create a logger -logger = logging.getLogger(__name__) - class Pyramid(Topology): def __init__(self, static=False): @@ -32,6 +27,7 @@ def __init__(self, static=False): is static or dynamic """ super(Pyramid, self).__init__(static) + self.rep = Reporter(logger=logging.getLogger(__name__)) def compute_gbest(self, swarm): """Update the global best using a pyramid neighborhood approach @@ -102,10 +98,9 @@ def compute_gbest(self, swarm): best_neighbor[np.argmin(swarm.pbest_cost[best_neighbor])] ] except AttributeError: - msg = "Please pass a Swarm class. You passed {}".format( - type(swarm) + self.rep.logger.exception( + "Please pass a Swarm class. You passed {}".format(type(swarm)) ) - logger.error(msg) raise else: return (best_pos, best_cost) diff --git a/pyswarms/backend/topology/random.py b/pyswarms/backend/topology/random.py index 91464ee6..6794af22 100644 --- a/pyswarms/backend/topology/random.py +++ b/pyswarms/backend/topology/random.py @@ -6,21 +6,16 @@ This class implements a random topology. All particles are connected in a random fashion. """ -# Import from stdlib -import logging import itertools +import logging -# Import modules import numpy as np from scipy.sparse.csgraph import connected_components, dijkstra -# Import from package from .. import operators as ops +from ...utils.reporter import Reporter from .base import Topology -# Create a logger -logger = logging.getLogger(__name__) - class Random(Topology): def __init__(self, static=False): @@ -32,6 +27,7 @@ def __init__(self, static=False): a boolean that decides whether the topology is static or dynamic""" super(Random, self).__init__(static) + self.rep = Reporter(logger=logging.getLogger(__name__)) def compute_gbest(self, swarm, k): """Update the global best using a random neighborhood approach @@ -91,10 +87,9 @@ def compute_gbest(self, swarm, k): ] except AttributeError: - msg = "Please pass a Swarm class. You passed {}".format( - type(swarm) + self.rep.logger.exception( + "Please pass a Swarm class. You passed {}".format(type(swarm)) ) - logger.error(msg) raise else: return (best_pos, best_cost) diff --git a/pyswarms/backend/topology/ring.py b/pyswarms/backend/topology/ring.py index 660899c4..54425e90 100644 --- a/pyswarms/backend/topology/ring.py +++ b/pyswarms/backend/topology/ring.py @@ -9,20 +9,15 @@ optimizers. """ -# Import from stdlib import logging -# Import modules import numpy as np from scipy.spatial import cKDTree -# Import from package from .. import operators as ops +from ...utils.reporter import Reporter from .base import Topology -# Create a logger -logger = logging.getLogger(__name__) - class Ring(Topology): def __init__(self, static=False): @@ -34,6 +29,7 @@ def __init__(self, static=False): a boolean that decides whether the topology is static or dynamic""" super(Ring, self).__init__(static) + self.rep = Reporter(logger=logging.getLogger(__name__)) def compute_gbest(self, swarm, p, k): """Update the global best using a ring-like neighborhood approach @@ -86,10 +82,9 @@ def compute_gbest(self, swarm, p, k): best_neighbor[np.argmin(swarm.pbest_cost[best_neighbor])] ] except AttributeError: - msg = "Please pass a Swarm class. You passed {}".format( - type(swarm) + self.rep.logger.exception( + "Please pass a Swarm class. You passed {}".format(type(swarm)) ) - logger.error(msg) raise else: return (best_pos, best_cost) diff --git a/pyswarms/backend/topology/star.py b/pyswarms/backend/topology/star.py index 8a8f3750..b341f7d9 100644 --- a/pyswarms/backend/topology/star.py +++ b/pyswarms/backend/topology/star.py @@ -9,23 +9,19 @@ optimizers. """ -# Import from stdlib import logging -# Import modules import numpy as np -# Import from package from .. import operators as ops +from ...utils.reporter import Reporter from .base import Topology -# Create a logger -logger = logging.getLogger(__name__) - class Star(Topology): def __init__(self): super(Star, self).__init__(static=True) + self.rep = Reporter(logger=logging.getLogger(__name__)) def compute_gbest(self, swarm): """Update the global best using a star topology @@ -68,10 +64,9 @@ def compute_gbest(self, swarm): best_pos = swarm.pbest_pos[np.argmin(swarm.pbest_cost)] best_cost = np.min(swarm.pbest_cost) except AttributeError: - msg = "Please pass a Swarm class. You passed {}".format( - type(swarm) + self.rep.logger.exception( + "Please pass a Swarm class. You passed {}".format(type(swarm)) ) - logger.error(msg) raise else: return (best_pos, best_cost) diff --git a/pyswarms/backend/topology/von_neumann.py b/pyswarms/backend/topology/von_neumann.py index 168ec8ec..bf33cabd 100644 --- a/pyswarms/backend/topology/von_neumann.py +++ b/pyswarms/backend/topology/von_neumann.py @@ -6,18 +6,16 @@ This class implements a Von Neumann topology. """ -# Import from stdlib import logging from .ring import Ring - -# Create a logger -logger = logging.getLogger(__name__) +from ...utils.reporter import Reporter class VonNeumann(Ring): def __init__(self): super(VonNeumann, self).__init__(static=True) + self.rep = Reporter(logger=logging.getLogger(__name__)) def compute_gbest(self, swarm, p, r): """Updates the global best using a neighborhood approach diff --git a/pyswarms/base/base_discrete.py b/pyswarms/base/base_discrete.py index 4f8ffd7f..9ac54099 100644 --- a/pyswarms/base/base_discrete.py +++ b/pyswarms/base/base_discrete.py @@ -28,14 +28,11 @@ """ -import os import abc -import yaml -import logging -import numpy as np -import logging.config from collections import namedtuple +import numpy as np + # Import from package from ..backend import create_swarm @@ -75,34 +72,6 @@ def assertions(self): if not all(key in self.options for key in ("c1", "c2", "w")): raise KeyError("Missing either c1, c2, or w in options") - def setup_logging( - self, - default_path="./config/logging.yaml", - default_level=logging.INFO, - env_key="LOG_CFG", - ): - """Setup logging configuration - - Parameters - ---------- - default_path : str (default is `./config/logging.yaml`) - the path where the logging configuration is stored - default_level: logging.LEVEL (default is `logging.INFO`) - the default logging level - env_key : str - the environment key for accessing the setup - """ - path = default_path - value = os.getenv(env_key, None) - if value: - path = value - if os.path.exists(path): - with open(path, "rt") as f: - config = yaml.safe_load(f.read()) - logging.config.dictConfig(config) - else: - logging.basicConfig(level=default_level) - def __init__( self, n_particles, @@ -147,7 +116,6 @@ def __init__( a dictionary containing the parameters for a specific optimization technique """ - self.setup_logging() # Initialize primary swarm attributes self.n_particles = n_particles self.dimensions = dimensions @@ -195,9 +163,7 @@ def _populate_history(self, hist): self.velocity_history.append(hist.velocity) @abc.abstractmethod - def optimize( - self, objective_func, iters, print_step=1, verbose=1, **kwargs - ): + def optimize(self, objective_func, iters, fast=False, **kwargs): """Optimize the swarm for a number of iterations Performs the optimization to evaluate the objective @@ -210,10 +176,8 @@ def optimize( objective function to be evaluated iters : int number of iterations - print_step : int (the default is 1) - amount of steps for printing into console. - verbose : int (the default is 1) - verbosity setting. + fast : bool (default is False) + if True, time.sleep is not executed kwargs : dict arguments for objective function diff --git a/pyswarms/base/base_single.py b/pyswarms/base/base_single.py index 2cee6cad..b804593b 100644 --- a/pyswarms/base/base_single.py +++ b/pyswarms/base/base_single.py @@ -30,15 +30,11 @@ :mod:`pyswarms.single.general_optimizer`: a more general PSO implementation with a custom topology """ -import os import abc -import yaml -import logging -import numpy as np -import logging.config from collections import namedtuple -# Import from package +import numpy as np + from ..backend import create_swarm @@ -108,34 +104,6 @@ def assertions(self): if not all(key in self.options for key in ("c1", "c2", "w")): raise KeyError("Missing either c1, c2, or w in options") - def setup_logging( - self, - default_path="./config/logging.yaml", - default_level=logging.INFO, - env_key="LOG_CFG", - ): - """Setup logging configuration - - Parameters - ---------- - default_path : str (default is `./config/logging.yaml`) - the path where the logging configuration is stored - default_level: logging.LEVEL (default is `logging.INFO`) - the default logging level - env_key : str - the environment key for accessing the setup - """ - path = default_path - value = os.getenv(env_key, None) - if value: - path = value - if os.path.exists(path): - with open(path, "rt") as f: - config = yaml.safe_load(f.read()) - logging.config.dictConfig(config) - else: - logging.basicConfig(level=default_level) - def __init__( self, n_particles, @@ -179,7 +147,6 @@ def __init__( ftol : float relative error in objective_func(best_pos) acceptable for convergence """ - self.setup_logging() # Initialize primary swarm attributes self.n_particles = n_particles self.dimensions = dimensions @@ -227,9 +194,7 @@ def _populate_history(self, hist): self.velocity_history.append(hist.velocity) @abc.abstractmethod - def optimize( - self, objective_func, iters, print_step=1, verbose=1, **kwargs - ): + def optimize(self, objective_func, iters, fast=False, **kwargs): """Optimize the swarm for a number of iterations Performs the optimization to evaluate the objective @@ -242,10 +207,8 @@ def optimize( objective function to be evaluated iters : int number of iterations - print_step : int (the default is 1) - amount of steps for printing into console. - verbose : int (the default is 1) - verbosity setting. + fast : bool (default is False) + if True, time.sleep is not executed kwargs : dict arguments for objective function diff --git a/pyswarms/discrete/binary.py b/pyswarms/discrete/binary.py index 93c481e6..be4d106d 100644 --- a/pyswarms/discrete/binary.py +++ b/pyswarms/discrete/binary.py @@ -51,17 +51,15 @@ Conference on Systems, Man, and Cybernetics, 1997. """ -# Import from stdlib import logging +from time import sleep -# Import modules import numpy as np -# Import from package from ..base import DiscreteSwarmOptimizer from ..backend.operators import compute_pbest from ..backend.topology import Ring -from ..utils.console_utils import cli_print, end_report +from ..utils.reporter import Reporter class BinaryPSO(DiscreteSwarmOptimizer): @@ -129,7 +127,7 @@ def __init__( the Euclidean (or L2) distance. """ # Initialize logger - self.logger = logging.getLogger(__name__) + self.rep = Reporter(logger=logging.getLogger(__name__)) # Assign k-neighbors and p-value as attributes self.k, self.p = options["k"], options["p"] # Initialize parent class @@ -148,10 +146,9 @@ def __init__( self.reset() # Initialize the topology self.top = Ring(static=False) + self.name = __name__ - def optimize( - self, objective_func, iters, print_step=1, verbose=1, **kwargs - ): + def optimize(self, objective_func, iters, fast=False, **kwargs): """Optimize the swarm for a number of iterations Performs the optimization to evaluate the objective @@ -163,10 +160,8 @@ def optimize( objective function to be evaluated iters : int number of iterations - print_step : int (the default is 1) - amount of steps for printing into console. - verbose : int (the default is 1) - verbosity setting. + fast : bool (default is False) + if True, time.sleep is not executed kwargs : dict arguments for objective function @@ -176,14 +171,14 @@ def optimize( the local best cost and the local best position among the swarm. """ - cli_print( - "Arguments Passed to Objective Function: {}".format(kwargs), - verbose, - 2, - logger=self.logger, + self.rep.log("Obj. func. args: {}".format(kwargs), lvl=10) + self.rep.log( + "Optimize for {} iters with {}".format(iters, self.options), lvl=20 ) - for i in range(iters): + for i in self.rep.pbar(iters, self.name): + if not fast: + sleep(0.01) # Compute cost for current position and personal best self.swarm.current_cost = objective_func( self.swarm.position, **kwargs @@ -200,15 +195,7 @@ def optimize( self.swarm, self.p, self.k ) # Print to console - if i % print_step == 0: - cli_print( - "Iteration {}/{}, cost: {}".format( - i + 1, iters, np.min(self.swarm.best_cost) - ), - verbose, - 2, - logger=self.logger, - ) + self.rep.hook(best_cost=self.swarm.best_cost) # Save to history hist = self.ToHistory( best_cost=self.swarm.best_cost, @@ -233,8 +220,11 @@ def optimize( # Obtain the final best_cost and the final best_position final_best_cost = self.swarm.best_cost.copy() final_best_pos = self.swarm.best_pos.copy() - end_report( - final_best_cost, final_best_pos, verbose, logger=self.logger + self.rep.log( + "Optimization finished | best cost: {}, best pos: {}".format( + final_best_cost, final_best_pos + ), + lvl=20, ) return (final_best_cost, final_best_pos) diff --git a/pyswarms/single/general_optimizer.py b/pyswarms/single/general_optimizer.py index 27edc268..af56bb54 100644 --- a/pyswarms/single/general_optimizer.py +++ b/pyswarms/single/general_optimizer.py @@ -55,17 +55,16 @@ Proceedings of the IEEE International Joint Conference on Neural Networks, 1995, pp. 1942-1948. """ -# Import from stdlib + import logging +from time import sleep -# Import modules import numpy as np -# Import from package -from ..base import SwarmOptimizer from ..backend.operators import compute_pbest -from ..backend.topology import Topology, Ring, Random, VonNeumann -from ..utils.console_utils import cli_print, end_report +from ..backend.topology import Random, Ring, Topology, VonNeumann +from ..base import SwarmOptimizer +from ..utils.reporter import Reporter class GeneralOptimizerPSO(SwarmOptimizer): @@ -158,7 +157,7 @@ def __init__( ) # Initialize logger - self.logger = logging.getLogger(__name__) + self.rep = Reporter(logger=logging.getLogger(__name__)) # Invoke assertions self.assertions() # Initialize the resettable attributes @@ -214,10 +213,9 @@ def __init__( "Delannoy number (number of neighbours) is" "between 0 and the no. of particles." ) + self.name = __name__ - def optimize( - self, objective_func, iters, print_step=1, verbose=1, **kwargs - ): + def optimize(self, objective_func, iters, fast=False, **kwargs): """Optimize the swarm for a number of iterations Performs the optimization to evaluate the objective @@ -229,10 +227,8 @@ def optimize( objective function to be evaluated iters : int number of iterations - print_step : int (default is 1) - amount of steps for printing into console. - verbose : int (default is 1) - verbosity setting. + fast : bool (default is False) + if True, time.sleep is not executed kwargs : dict arguments for the objective function @@ -241,15 +237,15 @@ def optimize( tuple the global best cost and the global best position. """ + if not fast: + sleep(0.01) - cli_print( - "Arguments Passed to Objective Function: {}".format(kwargs), - verbose, - 2, - logger=self.logger, + self.rep.log("Obj. func. args: {}".format(kwargs), lvl=10) + self.rep.log( + "Optimize for {} iters with {}".format(iters, self.options), lvl=20 ) - for i in range(iters): + for i in self.rep.pbar(iters, self.name): # Compute cost for current position and personal best self.swarm.current_cost = objective_func( self.swarm.position, **kwargs @@ -289,16 +285,7 @@ def optimize( self.swarm ) # Print to console - if i % print_step == 0: - cli_print( - "Iteration {}/{}, cost: {}".format( - i + 1, iters, self.swarm.best_cost - ), - verbose, - 2, - logger=self.logger, - ) - # Save to history + self.rep.hook(best_cost=self.swarm.best_cost) hist = self.ToHistory( best_cost=self.swarm.best_cost, mean_pbest_cost=np.mean(self.swarm.pbest_cost), @@ -325,7 +312,10 @@ def optimize( final_best_cost = self.swarm.best_cost.copy() final_best_pos = self.swarm.best_pos.copy() # Write report in log and return final cost and position - end_report( - final_best_cost, final_best_pos, verbose, logger=self.logger + self.rep.log( + "Optimization finished | best cost: {}, best pos: {}".format( + final_best_cost, final_best_pos + ), + lvl=20, ) return (final_best_cost, final_best_pos) diff --git a/pyswarms/single/global_best.py b/pyswarms/single/global_best.py index dbbc35ba..ebe0ffb0 100644 --- a/pyswarms/single/global_best.py +++ b/pyswarms/single/global_best.py @@ -55,17 +55,15 @@ Networks, 1995, pp. 1942-1948. """ -# Import from stdlib import logging +from time import sleep -# Import modules import numpy as np -# Import from package -from ..base import SwarmOptimizer from ..backend.operators import compute_pbest from ..backend.topology import Star -from ..utils.console_utils import cli_print, end_report +from ..base import SwarmOptimizer +from ..utils.reporter import Reporter class GlobalBestPSO(SwarmOptimizer): @@ -123,17 +121,16 @@ def __init__( ) # Initialize logger - self.logger = logging.getLogger(__name__) + self.rep = Reporter(logger=logging.getLogger(__name__)) # Invoke assertions self.assertions() # Initialize the resettable attributes self.reset() # Initialize the topology self.top = Star() + self.name = __name__ - def optimize( - self, objective_func, iters, print_step=1, verbose=1, **kwargs - ): + def optimize(self, objective_func, iters, fast=False, **kwargs): """Optimize the swarm for a number of iterations Performs the optimization to evaluate the objective @@ -145,10 +142,8 @@ def optimize( objective function to be evaluated iters : int number of iterations - print_step : int (default is 1) - amount of steps for printing into console. - verbose : int (default is 1) - verbosity setting. + fast : bool (default is False) + if True, time.sleep is not executed kwargs : dict arguments for the objective function @@ -158,14 +153,14 @@ def optimize( the global best cost and the global best position. """ - cli_print( - "Arguments Passed to Objective Function: {}".format(kwargs), - verbose, - 2, - logger=self.logger, + self.rep.log("Obj. func. args: {}".format(kwargs), lvl=10) + self.rep.log( + "Optimize for {} iters with {}".format(iters, self.options), lvl=20 ) - for i in range(iters): + for i in self.rep.pbar(iters, self.name): + if not fast: + sleep(0.01) # Compute cost for current position and personal best self.swarm.current_cost = objective_func( self.swarm.position, **kwargs @@ -182,16 +177,7 @@ def optimize( self.swarm.best_pos, self.swarm.best_cost = self.top.compute_gbest( self.swarm ) - # Print to console - if i % print_step == 0: - cli_print( - "Iteration {}/{}, cost: {}".format( - i + 1, iters, self.swarm.best_cost - ), - verbose, - 2, - logger=self.logger, - ) + self.rep.hook(best_cost=self.swarm.best_cost) # Save to history hist = self.ToHistory( best_cost=self.swarm.best_cost, @@ -219,7 +205,10 @@ def optimize( final_best_cost = self.swarm.best_cost.copy() final_best_pos = self.swarm.best_pos.copy() # Write report in log and return final cost and position - end_report( - final_best_cost, final_best_pos, verbose, logger=self.logger + self.rep.log( + "Optimization finished | best cost: {}, best pos: {}".format( + final_best_cost, final_best_pos + ), + lvl=20, ) return (final_best_cost, final_best_pos) diff --git a/pyswarms/single/local_best.py b/pyswarms/single/local_best.py index 06cb8ad2..3716c3db 100644 --- a/pyswarms/single/local_best.py +++ b/pyswarms/single/local_best.py @@ -64,17 +64,15 @@ Symposium on Micromachine and Human Science, 1995, pp. 39–43. """ -# Import from stdlib import logging +from time import sleep -# Import modules import numpy as np -# Import from package -from ..base import SwarmOptimizer from ..backend.operators import compute_pbest from ..backend.topology import Ring -from ..utils.console_utils import cli_print, end_report +from ..base import SwarmOptimizer +from ..utils.reporter import Reporter class LocalBestPSO(SwarmOptimizer): @@ -171,16 +169,17 @@ def __init__( ftol=ftol, init_pos=init_pos, ) + # Initialize logger + self.rep = Reporter(logger=logging.getLogger(__name__)) # Invoke assertions self.assertions() # Initialize the resettable attributes self.reset() # Initialize the topology self.top = Ring(static=static) + self.name = __name__ - def optimize( - self, objective_func, iters, print_step=1, verbose=1, **kwargs - ): + def optimize(self, objective_func, iters, fast=False, **kwargs): """Optimize the swarm for a number of iterations Performs the optimization to evaluate the objective @@ -192,10 +191,8 @@ def optimize( objective function to be evaluated iters : int number of iterations - print_step : int (default is 1) - amount of steps for printing into console. - verbose : int (default is 1) - verbosity setting. + fast : bool (default is False) + if True, time.sleep is not executed kwargs : dict arguments for the objective function @@ -205,14 +202,14 @@ def optimize( the local best cost and the local best position among the swarm. """ - cli_print( - "Arguments Passed to Objective Function: {}".format(kwargs), - verbose, - 2, - logger=self.logger, + self.rep.log("Obj. func. args: {}".format(kwargs), lvl=10) + self.rep.log( + "Optimize for {} iters with {}".format(iters, self.options), lvl=20 ) - for i in range(iters): + for i in self.rep.pbar(iters, self.name): + if not fast: + sleep(0.01) # Compute cost for current position and personal best self.swarm.current_cost = objective_func( self.swarm.position, **kwargs @@ -228,16 +225,7 @@ def optimize( self.swarm.best_pos, self.swarm.best_cost = self.top.compute_gbest( self.swarm, self.p, self.k ) - # Print to console - if i % print_step == 0: - cli_print( - "Iteration {}/{}, cost: {}".format( - i + 1, iters, np.min(self.swarm.best_cost) - ), - verbose, - 2, - logger=self.logger, - ) + self.rep.hook(best_cost=np.min(self.swarm.best_cost)) # Save to history hist = self.ToHistory( best_cost=self.swarm.best_cost, @@ -264,7 +252,11 @@ def optimize( # Obtain the final best_cost and the final best_position final_best_cost = self.swarm.best_cost.copy() final_best_pos = self.swarm.best_pos.copy() - end_report( - final_best_cost, final_best_pos, verbose, logger=self.logger + # Write report in log and return final cost and position + self.rep.log( + "Optimization finished | best cost: {}, best pos: {}".format( + final_best_cost, final_best_pos + ), + lvl=20, ) return (final_best_cost, final_best_pos) diff --git a/pyswarms/utils/__init__.py b/pyswarms/utils/__init__.py index feb2c0bd..ea80fdaf 100644 --- a/pyswarms/utils/__init__.py +++ b/pyswarms/utils/__init__.py @@ -1 +1,5 @@ """ Pacakge for various utilities """ + +from .reporter.reporter import Reporter + +__all__ = ["Reporter"] diff --git a/pyswarms/utils/console_utils.py b/pyswarms/utils/console_utils.py deleted file mode 100644 index 22ae118d..00000000 --- a/pyswarms/utils/console_utils.py +++ /dev/null @@ -1,63 +0,0 @@ -# -*- coding: utf-8 -*- - -""" console_utils.py: various tools for printing into console """ - -# Import from __future__ -from __future__ import with_statement -from __future__ import absolute_import -from __future__ import print_function - -# Import modules - - -def cli_print(message, verbosity, threshold, logger): - """Helper function to print console output - - Parameters - ---------- - message : str - the message to be printed into the console - verbosity : int - verbosity setting of the user - threshold : int - threshold for printing - logger : logging.getLogger - logger instance - - """ - if verbosity >= threshold: - logger.info(message) - else: - pass - - -def end_report(cost, pos, verbosity, logger): - """Helper function to print a simple report at the end of the - run. This always has a threshold of 1. - - Parameters - ---------- - cost : float - final cost from the optimization procedure. - pos : numpy.ndarray or list - best position found - verbosity : int - verbosity setting of the user. - logger : logging.getLogger - logger instance - """ - - # Cuts the length of the best position if it's too long - if len(list(pos)) > 3: - out = ("[ " + 3 * "{:3f} " + "...]").format(*list(pos)) - else: - out = list(pos) - - template = ( - "================================\n" - "Optimization finished!\n" - "Final cost: {:06.4f}\n" - "Best value: {}\n" - ).format(cost, out) - if verbosity >= 1: - logger.info(template) diff --git a/pyswarms/utils/functions/single_obj.py b/pyswarms/utils/functions/single_obj.py index 63acd877..4dfc1234 100644 --- a/pyswarms/utils/functions/single_obj.py +++ b/pyswarms/utils/functions/single_obj.py @@ -36,12 +36,6 @@ - Three Hump Camel, threehump """ -# Import from __future__ -from __future__ import with_statement -from __future__ import absolute_import -from __future__ import print_function - -# Import modules import numpy as np diff --git a/pyswarms/utils/plotters/formatters.py b/pyswarms/utils/plotters/formatters.py index 027c14a5..b240ce76 100644 --- a/pyswarms/utils/plotters/formatters.py +++ b/pyswarms/utils/plotters/formatters.py @@ -6,9 +6,8 @@ This module implements helpful classes to format your plots or create meshes. """ -# Import modules import numpy as np -from attr import attrs, attrib +from attr import attrib, attrs from attr.validators import instance_of from matplotlib import cm, colors diff --git a/pyswarms/utils/plotters/plotters.py b/pyswarms/utils/plotters/plotters.py index 5d92d55c..6d87e395 100644 --- a/pyswarms/utils/plotters/plotters.py +++ b/pyswarms/utils/plotters/plotters.py @@ -65,7 +65,6 @@ speed of animation. """ -# Import modules import logging import matplotlib.pyplot as plt @@ -73,11 +72,10 @@ from matplotlib import animation, cm from mpl_toolkits.mplot3d import Axes3D -# Import from package from .formatters import Designer, Animator +from ..reporter import Reporter -# Initialize logger -logger = logging.getLogger(__name__) +rep = Reporter(logger=logging.getLogger(__name__)) def plot_cost_history( @@ -132,6 +130,7 @@ def plot_cost_history( ax.set_ylabel(designer.label[1], fontsize=designer.text_fontsize) ax.tick_params(labelsize=designer.text_fontsize) except TypeError: + rep.logger.exception("Please check your input type") raise else: return ax @@ -234,6 +233,7 @@ def plot_contour( repeat_delay=animator.repeat_delay, ) except TypeError: + rep.logger.exception("Please check your input type") raise else: return anim @@ -376,6 +376,7 @@ def plot_surface( repeat_delay=animator.repeat_delay, ) except TypeError: + rep.logger.exception("Please check your input type") raise else: return anim diff --git a/pyswarms/utils/reporter/__init__.py b/pyswarms/utils/reporter/__init__.py new file mode 100644 index 00000000..15390076 --- /dev/null +++ b/pyswarms/utils/reporter/__init__.py @@ -0,0 +1,5 @@ +"""Reporter module""" + +from .reporter import Reporter + +__all__ = ["Reporter"] diff --git a/pyswarms/utils/reporter/reporter.py b/pyswarms/utils/reporter/reporter.py new file mode 100644 index 00000000..01853c5d --- /dev/null +++ b/pyswarms/utils/reporter/reporter.py @@ -0,0 +1,233 @@ +# -*- coding: utf-8 -*- +import os +import yaml +import pprint +import logging +import logging.config +from tqdm import trange + + +class Reporter(object): + """A Reporter object that abstracts various logging capabilities + + To set-up a Reporter, simply perform the following tasks: + + .. code-block:: python + + from pyswarms.utils import Reporter + + rep = Reporter() + rep.log("Here's my message", lvl=20) + + This will set-up a reporter with a default configuration that + logs to a file, `report.log`, on the current working directory. + You can change the log path by passing a string to the `log_path` + parameter: + + .. code-block:: python + + from pyswarms.utils import Reporter + + rep = Reporter(log_path="/path/to/log/file.log") + rep.log("Here's my message", lvl=20) + + If you are working on a module and you have an existing logger, + you can pass that logger instance during initialization: + + .. code-block:: python + + # mymodule.py + from pyswarms.utils import Reporter + + # An existing logger in a module + logger = logging.getLogger(__name__) + rep = Reporter(logger=logger) + + Lastly, if you have your own logger configuration (YAML file), + then simply pass that to the `config_path` parameter. This + overrides the default configuration (including `log_path`): + + .. code-block:: python + + from pyswarms.utils import Reporter + + rep = Reporter(config_path="/path/to/config/file.yml") + rep.log("Here's my message", lvl=20) + + """ + + def __init__( + self, log_path=None, config_path=None, logger=None, printer=None + ): + """Initialize the reporter + + Attributes + ---------- + log_path : str (default is :code:`None`) + Sets the default log path (overriden when :code:`path` is given to + :code:`_setup_logger()`) + config_path : str (default is :code:`None`) + Sets the configuration path for custom loggers + logger : logging.Logger (default is :code:`None`) + The logger object. By default, it creates a new :code:`Logger` + instance + printer : pprint.PrettyPrinter (default is :code:`None`) + A printer object. By default, it creates a :code:`PrettyPrinter` + instance with default values + """ + self.logger = logger or logging.getLogger(__name__) + self.printer = printer or pprint.PrettyPrinter() + self.log_path = log_path or (os.getcwd() + "/report.log") + self._bar_fmt = "{l_bar}{bar}|{n_fmt}/{total_fmt}{postfix}" + self._env_key = "LOG_CFG" + self._default_config = { + "version": 1, + "disable_existing_loggers": False, + "formatters": { + "standard": { + "format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s" + } + }, + "handlers": { + "default": { + "level": "INFO", + "class": "logging.StreamHandler", + "formatter": "standard", + }, + "file_default": { + "level": "INFO", + "formatter": "standard", + "class": "logging.handlers.RotatingFileHandler", + "filename": self.log_path, + "encoding": "utf8", + "maxBytes": 10485760, + "backupCount": 20, + }, + }, + "loggers": { + "": { + "handlers": ["default", "file_default"], + "level": "INFO", + "propagate": True, + } + }, + } + self._setup_logger(config_path) + + def log(self, msg, lvl=20, *args, **kwargs): + """Log a message within a set level + + This method abstracts the logging.Logger.log() method. We use this + method during major state changes, errors, or critical events during + the optimization run. + + You can check logging levels on this `link`_. In essence, DEBUG is 10, + INFO is 20, WARNING is 30, ERROR is 40, and CRITICAL is 50. + + .. link: https://docs.python.org/3/library/logging.html#logging-levels + + Parameters + ---------- + msg : str + Message to be logged + lvl : int (default is 20) + Logging level + """ + self.logger.log(lvl, msg, *args, **kwargs) + + def print(self, msg, verbosity, threshold=0): + """Print a message into console + + This method can be called during non-system calls or minor state + changes. In practice, we call this method when reporting the cost + on a given timestep. + + Parameters + ---------- + msg : str + Message to be printed + verbosity : int + Verbosity parameter, prints message when it's greater than the + threshold + threshold : int (default is 0) + Threshold parameer, prints message when it's lesser than the + verbosity + """ + if verbosity > threshold: + self.printer.pprint(msg) + else: + pass + + def _setup_logger(self, path=None): + """Set-up the logger with default values + + This method is called right after initializing the Reporter module. + If no path is supplied, then it loads a default configuration. + You can view the defaults via the Reporter._default_config attribute. + + + Parameters + ---------- + path : str + Path to a YAML configuration. If not supplied, uses + a default config. + """ + value = path or os.getenv(self._env_key, None) + try: + with open(value, "rt") as f: + config = yaml.safe_load(f.read()) + logging.config.dictConfig(config) + except (TypeError, FileNotFoundError): + self._load_defaults() + + def _load_defaults(self): + """Load default logging configuration""" + logging.config.dictConfig(self._default_config) + + def pbar(self, iters, desc=None): + """Create a tqdm iterable + + You can use this method to create progress bars. It uses a set + of abstracted methods from tqdm: + + .. code-block:: python + + from pyswarms.utils import Reporter + + rep = Reporter() + # Create a progress bar + for i in rep.pbar(100, name="Optimizer") + pass + + Parameters + ---------- + iters : int + Maximum range passed to the tqdm instance + desc : str + Name of the progress bar that will be displayed + + Returns + ------- + tqdm._tqdm.tqdm + A tqdm iterable + """ + self.t = trange(iters, desc=desc, bar_format=self._bar_fmt) + return self.t + + def hook(self, *args, **kwargs): + """Set a hook on the progress bar + + Method for creating a postfix in tqdm. In practice we use this + to report the best cost found during an iteration: + + .. code-block:: python + + from pyswarms.utils import Reporter + + rep = Reporter() + # Create a progress bar + for i in rep.pbar(100, name="Optimizer") + best_cost = compute() + rep.hook(best_cost=best_cost) + """ + self.t.set_postfix(*args, **kwargs) diff --git a/requirements_dev.txt b/requirements_dev.txt index a2d2ed01..cf24109b 100644 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -12,7 +12,8 @@ PyYAML==3.13 # pyup: ignore future==0.16.0 scipy>=0.17.0 numpy>=1.13.0 +tqdm==4.24.0 matplotlib>=1.3.1 pytest==3.7.1 attrs==18.1.0 -pre-commit==1.10.5 \ No newline at end of file +pre-commit==1.10.5 diff --git a/setup.py b/setup.py index 44fc25c5..f8574a13 100644 --- a/setup.py +++ b/setup.py @@ -17,6 +17,7 @@ "mock==2.0.0", "pytest==3.6.4", "attrs==18.1.0", + "tqdm==4.24.0", "pre-commit", ] @@ -31,6 +32,7 @@ "numpy>=1.13.0", "matplotlib>=1.3.1", "mock==2.0.0", + "tqdm==4.24.0", "pytest==3.6.4", "attrs==18.1.0", "pre-commit", diff --git a/tests/optimizers/conftest.py b/tests/optimizers/conftest.py index cb78cf1f..58cc5e78 100644 --- a/tests/optimizers/conftest.py +++ b/tests/optimizers/conftest.py @@ -3,15 +3,12 @@ """Fixtures for tests""" -# Import modules import pytest -import numpy as np -# Import from package -from pyswarms.single import GlobalBestPSO, LocalBestPSO, GeneralOptimizerPSO +from pyswarms.backend.topology import Pyramid, Random, Ring, Star, VonNeumann from pyswarms.discrete import BinaryPSO +from pyswarms.single import GeneralOptimizerPSO, GlobalBestPSO, LocalBestPSO from pyswarms.utils.functions.single_obj import sphere -from pyswarms.backend.topology import Star, Ring, Pyramid, Random, VonNeumann @pytest.fixture(scope="module") @@ -21,7 +18,7 @@ def general_opt_history(topology): pso = GeneralOptimizerPSO( 10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5}, topology=topology ) - pso.optimize(sphere, 1000, verbose=0) + pso.optimize(sphere, 1000) return pso @@ -42,7 +39,7 @@ def gbest_history(): """Returns a GlobalBestPSO instance run for 1000 iterations for checking history""" pso = GlobalBestPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5}) - pso.optimize(sphere, 1000, verbose=0) + pso.optimize(sphere, 1000) return pso @@ -51,7 +48,7 @@ def gbest_reset(): """Returns a GlobalBestPSO instance that has been run and reset to check default value""" pso = GlobalBestPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5}) - pso.optimize(sphere, 10, verbose=0) + pso.optimize(sphere, 10) pso.reset() return pso @@ -61,7 +58,7 @@ def lbest_history(): """Returns a LocalBestPSO instance run for 1000 iterations for checking history""" pso = LocalBestPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 2}) - pso.optimize(sphere, 1000, verbose=0) + pso.optimize(sphere, 1000) return pso @@ -70,7 +67,7 @@ def lbest_reset(): """Returns a LocalBestPSO instance that has been run and reset to check default value""" pso = LocalBestPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 2}) - pso.optimize(sphere, 10, verbose=0) + pso.optimize(sphere, 10) pso.reset() return pso @@ -80,7 +77,7 @@ def binary_history(): """Returns a BinaryPSO instance run for 1000 iterations for checking history""" pso = BinaryPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 2}) - pso.optimize(sphere, 1000, verbose=0) + pso.optimize(sphere, 1000) return pso @@ -89,7 +86,7 @@ def binary_reset(): """Returns a BinaryPSO instance that has been run and reset to check default value""" pso = BinaryPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 2}) - pso.optimize(sphere, 10, verbose=0) + pso.optimize(sphere, 10) pso.reset() return pso diff --git a/tests/optimizers/test_general_optimizer.py b/tests/optimizers/test_general_optimizer.py index eac1d5b2..7936a114 100644 --- a/tests/optimizers/test_general_optimizer.py +++ b/tests/optimizers/test_general_optimizer.py @@ -1,13 +1,11 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# Import modules -import pytest import numpy as np +import pytest -# Import from package +from pyswarms.backend.topology import Random, Ring, VonNeumann from pyswarms.single import GeneralOptimizerPSO -from pyswarms.backend.topology import Star, Ring, Pyramid, Random, VonNeumann from pyswarms.utils.functions.single_obj import sphere @@ -236,5 +234,5 @@ def test_ftol_effect(options, topology): pso = GeneralOptimizerPSO( 10, 2, options=options, topology=topology, ftol=1e-1 ) - pso.optimize(sphere, 2000, verbose=0) + pso.optimize(sphere, 2000) assert np.array(pso.cost_history).shape != (2000,) diff --git a/tests/optimizers/test_global_best.py b/tests/optimizers/test_global_best.py index ef10f1d0..5a2020a5 100644 --- a/tests/optimizers/test_global_best.py +++ b/tests/optimizers/test_global_best.py @@ -1,11 +1,9 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# Import modules -import pytest import numpy as np +import pytest -# Import from package from pyswarms.single import GlobalBestPSO from pyswarms.utils.functions.single_obj import sphere @@ -109,5 +107,5 @@ def test_training_history_shape(gbest_history, history, expected_shape): def test_ftol_effect(options): """Test if setting the ftol breaks the optimization process accodingly""" pso = GlobalBestPSO(10, 2, options=options, ftol=1e-1) - pso.optimize(sphere, 2000, verbose=0) + pso.optimize(sphere, 2000) assert np.array(pso.cost_history).shape != (2000,) diff --git a/tests/optimizers/test_local_best.py b/tests/optimizers/test_local_best.py index b85db283..1a991df4 100644 --- a/tests/optimizers/test_local_best.py +++ b/tests/optimizers/test_local_best.py @@ -1,11 +1,9 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# Import modules -import pytest import numpy as np +import pytest -# Import from package from pyswarms.single import LocalBestPSO from pyswarms.utils.functions.single_obj import sphere @@ -130,5 +128,5 @@ def test_training_history_shape(lbest_history, history, expected_shape): def test_ftol_effect(options): """Test if setting the ftol breaks the optimization process accodingly""" pso = LocalBestPSO(10, 2, options=options, ftol=1e-1) - pso.optimize(sphere, 2000, verbose=0) + pso.optimize(sphere, 2000) assert np.array(pso.cost_history).shape != (2000,) diff --git a/tests/optimizers/test_objective_func_with_kwargs.py b/tests/optimizers/test_objective_func_with_kwargs.py index 169fd198..96228765 100644 --- a/tests/optimizers/test_objective_func_with_kwargs.py +++ b/tests/optimizers/test_objective_func_with_kwargs.py @@ -1,11 +1,9 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# Import modules import pytest import numpy as np -# Import from package from pyswarms.single import GlobalBestPSO, LocalBestPSO from pyswarms.utils.functions.single_obj import rosenbrock @@ -31,9 +29,7 @@ def test_global_kwargs(func): ) # run it - cost, pos = opt_ps.optimize( - func, 1000, print_step=10, verbose=3, a=1, b=100 - ) + cost, pos = opt_ps.optimize(func, 1000, a=1, b=100) assert np.isclose(cost, 0, rtol=1e-03) assert np.isclose(pos[0], 1.0, rtol=1e-03) @@ -56,7 +52,7 @@ def test_global_kwargs_without_named_arguments(func): ) # run it - cost, pos = opt_ps.optimize(func, 1000, verbose=3, a=1, b=100) + cost, pos = opt_ps.optimize(func, 1000, a=1, b=100) assert np.isclose(cost, 0, rtol=1e-03) assert np.isclose(pos[0], 1.0, rtol=1e-03) @@ -78,7 +74,7 @@ def test_global_no_kwargs(func): ) # run it - cost, pos = opt_ps.optimize(func, 1000, print_step=10, verbose=3) + cost, pos = opt_ps.optimize(func, 1000) assert np.isclose(cost, 0, rtol=1e-03) assert np.isclose(pos[0], 1.0, rtol=1e-03) @@ -100,9 +96,7 @@ def test_local_kwargs(func): ) # run it - cost, pos = opt_ps.optimize( - func, 1000, print_step=10, verbose=3, a=1, b=100 - ) + cost, pos = opt_ps.optimize(func, 1000, a=1, b=100) assert np.isclose(cost, 0, rtol=1e-03) assert np.isclose(pos[0], 1.0, rtol=1e-03) @@ -124,7 +118,7 @@ def test_local_no_kwargs(func): ) # run it - cost, pos = opt_ps.optimize(func, iters=1000, print_step=10, verbose=3) + cost, pos = opt_ps.optimize(func, iters=1000) assert np.isclose(cost, 0, rtol=1e-03) assert np.isclose(pos[0], 1.0, rtol=1e-03) @@ -147,7 +141,7 @@ def test_global_uneeded_kwargs(func): # run it with pytest.raises(TypeError) as excinfo: - cost, pos = opt_ps.optimize(func, 1000, print_step=10, verbose=3, a=1) + cost, pos = opt_ps.optimize(func, 1000, a=1) assert "unexpected keyword" in str(excinfo.value) @@ -167,7 +161,7 @@ def test_global_missed_kwargs(func): # run it with pytest.raises(TypeError) as excinfo: - cost, pos = opt_ps.optimize(func, 1000, print_step=10, verbose=3, a=1) + cost, pos = opt_ps.optimize(func, 1000, a=1) assert "missing 1 required positional argument" in str(excinfo.value) @@ -187,7 +181,7 @@ def test_local_uneeded_kwargs(func): # run it with pytest.raises(TypeError) as excinfo: - cost, pos = opt_ps.optimize(func, 1000, print_step=10, verbose=3, a=1) + cost, pos = opt_ps.optimize(func, 1000, a=1) assert "unexpected keyword" in str(excinfo.value) @@ -207,7 +201,7 @@ def test_local_missed_kwargs(func): # run it with pytest.raises(TypeError) as excinfo: - cost, pos = opt_ps.optimize(func, 1000, print_step=10, verbose=3, a=1) + cost, pos = opt_ps.optimize(func, 1000, a=1) assert "missing 1 required positional argument" in str(excinfo.value) @@ -227,9 +221,7 @@ def test_local_wrong_kwargs(func): # run it with pytest.raises(TypeError) as excinfo: - cost, pos = opt_ps.optimize( - func, 1000, print_step=10, verbose=3, c=1, d=100 - ) + cost, pos = opt_ps.optimize(func, 1000, print_step=10, c=1, d=100) assert "unexpected keyword" in str(excinfo.value) @@ -249,7 +241,5 @@ def test_global_wrong_kwargs(func): # run it with pytest.raises(TypeError) as excinfo: - cost, pos = opt_ps.optimize( - func, 1000, print_step=10, verbose=3, c=1, d=100 - ) + cost, pos = opt_ps.optimize(func, 1000, c=1, d=100) assert "unexpected keyword" in str(excinfo.value)