diff --git a/docs/api.rst b/docs/api.rst index 86e7838917..af262feaf8 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -77,25 +77,35 @@ InterruptTaskSet Exception .. autoexception:: locust.exception.InterruptTaskSet +Environment class +================= +.. autoclass:: locust.env.Environment + :members: + + .. _events: Event hooks =========== -The event hooks are instances of the **locust.events.EventHook** class: +Locust provide event hooks that can be used to extend Locus in various ways. -.. autoclass:: locust.events.EventHook +The following event hooks are available under :py:attr:`Environment.events `, +and there's also a reference to these events under ``locust.events`` that can be used at the module level +of locust scripts (since the Environment instance hasn't been created when the locustfile is imported). -.. note:: +.. autoclass:: locust.event.Events + :members: - It's highly recommended that you add a wildcard keyword argument in your event listeners - to prevent your code from breaking if new arguments are added in a future version. -Available hooks +EventHook class --------------- -The following event hooks are available under the **locust.events** module: +The event hooks are instances of the **locust.events.EventHook** class: + +.. autoclass:: locust.event.EventHook -.. automodule:: locust.events - :members: request_success, request_failure, locust_error, report_to_master, slave_report, hatch_complete, quitting +.. note:: + It's highly recommended that you add a wildcard keyword argument in your event listeners + to prevent your code from breaking if new arguments are added in a future version. diff --git a/docs/extending-locust.rst b/docs/extending-locust.rst index 1e9af96b48..352835e627 100644 --- a/docs/extending-locust.rst +++ b/docs/extending-locust.rst @@ -2,16 +2,21 @@ Extending Locust ================= -Locust comes with a number of events that provides hooks for extending locust in different ways. +Locust comes with a number of events hooks that can be used to extend Locust in different ways. -Event listeners can be registered at the module level in a locust file. Here's an example:: +Event hooks live on the Environment instance under the :py:attr:`events ` +attribute. However, since the Environment instance hasn't been created when locustfiles are imported, +the events object can also be accessed at the module level of the locustfile through the +:py:attr:`locust.events` variable. - from locust import events +Here's an example on how to set up an event listener:: + from locust import events + + @events.request_success.add_listener def my_success_handler(request_type, name, response_time, response_length, **kw): - print "Successfully fetched: %s" % (name) + print("Successfully made a request to: %s" % name) - events.request_success += my_success_handler .. note:: @@ -29,12 +34,16 @@ Adding Web Routes ================== Locust uses Flask to serve the web UI and therefore it is easy to add web end-points to the web UI. -Just import the Flask app in your locustfile and set up a new route:: - - from locust import web +By listening to the :py:attr:`init ` event, we can retrieve a reference +to the Flask app instance and use that to set up a new route:: - @web.app.route("/added_page") - def my_added_page(): - return "Another page" + from locust import events + + @events.init.add_listener + def on_locust_init(web_ui, **kw): + @web_ui.app.route("/added_page") + def my_added_page(): + return "Another page" You should now be able to start locust and browse to http://127.0.0.1:8089/added_page + diff --git a/docs/index.rst b/docs/index.rst index f7757804d4..0e9e6e6b08 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -56,6 +56,7 @@ Other functionalities testing-other-systems extending-locust logging + use-as-lib API diff --git a/docs/installation.rst b/docs/installation.rst index a2c6d9c788..42f251138e 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -59,16 +59,10 @@ Once you've done that you should be able to just ``pip install locustio``. Installing Locust on macOS -------------------------- -The following is currently the shortest path to installing gevent on OS X using Homebrew. +Make sure you have a working installation of Python 3.6 or higher and follow the above +instructions. `Homebrew `_ can be used to install Python +on macOS. -#. Install `Homebrew `_. -#. Install libev (dependency for gevent): - -.. code-block:: console - - brew install libev - -#. Then follow the above instructions. Increasing Maximum Number of Open Files Limit --------------------------------------------- diff --git a/docs/quickstart.rst b/docs/quickstart.rst index f0cf05c944..947f46d1be 100644 --- a/docs/quickstart.rst +++ b/docs/quickstart.rst @@ -101,7 +101,7 @@ For example, for an exponentially distributed wait time with average of 1 second class WebsiteUser(HttpLocust): task_set = UserBehaviour - wait_time = lambda self: random.expovariate(1)*1000 + wait_time = lambda self: random.expovariate(1) Start Locust diff --git a/docs/running-locust-in-step-load-mode.rst b/docs/running-locust-in-step-load-mode.rst index c821706500..78c4485750 100644 --- a/docs/running-locust-in-step-load-mode.rst +++ b/docs/running-locust-in-step-load-mode.rst @@ -15,13 +15,13 @@ Options ======= ``--step-load`` ------------- +---------------- Enable Step Load mode to monitor how performance metrics varies when user load increases. ``--step-clients`` ------------ +------------------- Client count to increase by step in Step Load mode. Only used together with ``--step-load``. @@ -33,7 +33,7 @@ Step duration in Step Load mode, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used to Running Locust in step load mode without the web UI ---------------------------------- +---------------------------------------------------- If you want to run Locust in step load mode without the web UI, you can do that with ``--step-clients`` and ``--step-time``: diff --git a/docs/testing-other-systems.rst b/docs/testing-other-systems.rst index 30eec9dd27..51f882d392 100644 --- a/docs/testing-other-systems.rst +++ b/docs/testing-other-systems.rst @@ -4,8 +4,8 @@ Testing other systems using custom clients Locust was built with HTTP as its main target. However, it can easily be extended to load test any request/response based system, by writing a custom client that triggers -:py:attr:`request_success ` and -:py:attr:`request_failure ` events. +:py:attr:`request_success ` and +:py:attr:`request_failure ` events. Sample XML-RPC Locust client ============================ @@ -19,10 +19,11 @@ If you've written Locust tests before, you'll recognize the class called *ApiUse Locust class that has a *TaskSet* class with *tasks* in its *task_set* attribute. However, the *ApiUser* inherits from *XmlRpcLocust* that you can see right above ApiUser. The *XmlRpcLocust* class provides an instance of XmlRpcClient under the *client* attribute. The *XmlRpcClient* is a wrapper around the standard -library's :py:class:`xmlrpclib.ServerProxy`. It basically just proxies the function calls, but with the -important addition of firing :py:attr:`locust.events.request_success` and :py:attr:`locust.events.request_failure` -events, which will make all calls reported in Locust's statistics. +library's :py:class:`xmlrpc.client.ServerProxy`. It basically just proxies the function calls, but with the +important addition of firing :py:attr:`locust.event.Events.request_success` and :py:attr:`locust.event.Events.request_failure` +events, which will record all calls in Locust's statistics. Here's an implementation of an XML-RPC server that would work as a server for the code above: .. literalinclude:: ../examples/custom_xmlrpc_client/server.py + diff --git a/docs/use-as-lib.rst b/docs/use-as-lib.rst new file mode 100644 index 0000000000..ad9e8811b1 --- /dev/null +++ b/docs/use-as-lib.rst @@ -0,0 +1,46 @@ +========================== +Using Locust as a library +========================== + +It's possible to use Locust as a library instead of running Locust by invoking the ``locust`` command. + +Here's an example:: + + import gevent + from locust import HttpLocust, TaskSet, task, between + from locust.runners import LocalLocustRunner + from locust.env import Environment + from locust.stats import stats_printer + from locust.log import setup_logging + from locust.web import WebUI + + setup_logging("INFO", None) + + + class User(HttpLocust): + wait_time = between(1, 3) + host = "https://docs.locust.io" + + class task_set(TaskSet): + @task + def my_task(self): + self.client.get("/") + + @task + def task_404(self): + self.client.get("/non-existing-path") + + # setup Environment and Runner + env = Environment(locust_classes=[User]) + runner = LocalLocustRunner(environment=env) + # start a WebUI instance + web_ui = WebUI(runner=runner, environment=env) + gevent.spawn(lambda: web_ui.start("127.0.0.1", 8089)) + + # start a greenlet that periodically outputs the current stats + gevent.spawn(stats_printer(env.stats)) + + # start the test + runner.start(1, hatch_rate=10) + # wait for the greenlets (indefinitely) + runner.greenlet.join() diff --git a/docs/writing-a-locustfile.rst b/docs/writing-a-locustfile.rst index d48e488b7b..1c0b02def2 100644 --- a/docs/writing-a-locustfile.rst +++ b/docs/writing-a-locustfile.rst @@ -473,61 +473,50 @@ Example: for i in range(10): self.client.get("/blog?id=%i" % i, name="/blog?id=[id]") -Common libraries -================= -Often, people wish to group multiple locustfiles that share common libraries. In that case, it is important -to define the *project root* to be the directory where you invoke locust, and it is suggested that all -locustfiles live somewhere beneath the project root. +How to structure your test code +================================ -A flat file structure works out of the box: +It's important to remember that the locustfile.py is just an ordinary Python module that is imported +by Locust. From this module you're free to import other python code just as you normally would +in any Python program. The current working directory is automatically added to python's ``sys.path``, +so any python file/module/packages that resides in the working directory can be imported using the +python ``import`` statement. -* project root +For small tests, keeping all of the test code in a single ``locustfile.py`` should work fine, but for +larger test suites, you'll probably want to split the code into multiple files and directories. - * ``commonlib_config.py`` +How you structure the test source code is ofcourse entirely up to you, but we recommend that you +follow Python best practices. Here's an example file structure of an imaginary Locust project: - * ``commonlib_auth.py`` - - * ``locustfile_web_app.py`` - - * ``locustfile_api.py`` - - * ``locustfile_ecommerce.py`` - -The locustfiles may import common libraries using, e.g. ``import commonlib_auth``. This approach does not -cleanly separate common libraries from locust files, however. - -Subdirectories can be a cleaner approach (see example below), but locust will only import modules relative to -the directory in which the running locustfile is placed. If you wish to import from your project root (i.e. the -location where you are running the locust command), make sure to write ``sys.path.append(os.getcwd())`` in your -locust file(s) before importing any common libraries---this will make the project root (i.e. the current -working directory) importable. - -* project root - - * ``__init__.py`` +* Project root * ``common/`` - + * ``__init__.py`` - + * ``auth.py`` * ``config.py`` + * ``locustfile.py`` + * ``requirements.txt`` (External Python dependencies is often kept in a requirements.txt) - * ``auth.py`` +A project with multiple different locustfiles could also keep them in a separate subdirectory: - * ``locustfiles/`` +* Project root + * ``common/`` + * ``__init__.py`` - - * ``web_app.py`` - + * ``auth.py`` + * ``config.py`` + * ``locustfiles/`` + * ``api.py`` + * ``website.py`` + * ``requirements.txt`` - * ``ecommerce.py`` -With the above project structure, your locust files can import common libraries using: +With any ofthe above project structure, your locustfile can import common libraries using: .. code-block:: python - sys.path.append(os.getcwd()) import common.auth diff --git a/examples/add_command_line_argument.py b/examples/add_command_line_argument.py new file mode 100644 index 0000000000..f684cd03ab --- /dev/null +++ b/examples/add_command_line_argument.py @@ -0,0 +1,26 @@ +from locust import HttpLocust, TaskSet, task, between +from locust import events + + +@events.init_command_line_parser.add_listener +def _(parser): + parser.add_argument( + '--custom-argument', + help="It's working" + ) + +@events.init.add_listener +def _(environment, **kw): + print("Custom argument supplied: %s" % environment.options.custom_argument) + + +class WebsiteUser(HttpLocust): + """ + Locust user class that does requests to the locust web server running on localhost + """ + host = "http://127.0.0.1:8089" + wait_time = between(2, 5) + class task_set(TaskSet): + @task + def my_task(self): + pass diff --git a/examples/custom_xmlrpc_client/server.py b/examples/custom_xmlrpc_client/server.py index 2865dc16fe..f202ed73a6 100644 --- a/examples/custom_xmlrpc_client/server.py +++ b/examples/custom_xmlrpc_client/server.py @@ -1,6 +1,6 @@ import random import time -from SimpleXMLRPCServer import SimpleXMLRPCServer +from xmlrpc.server import SimpleXMLRPCServer def get_time(): diff --git a/examples/custom_xmlrpc_client/xmlrpc_locustfile.py b/examples/custom_xmlrpc_client/xmlrpc_locustfile.py index c7dfca01b8..0b3987183c 100644 --- a/examples/custom_xmlrpc_client/xmlrpc_locustfile.py +++ b/examples/custom_xmlrpc_client/xmlrpc_locustfile.py @@ -1,27 +1,30 @@ import time -import xmlrpclib +from xmlrpc.client import ServerProxy, Fault from locust import Locust, TaskSet, events, task, between -class XmlRpcClient(xmlrpclib.ServerProxy): +class XmlRpcClient(ServerProxy): """ Simple, sample XML RPC client implementation that wraps xmlrpclib.ServerProxy and fires locust events on request_success and request_failure, so that all requests gets tracked in locust's statistics. """ + + _locust_environment = None + def __getattr__(self, name): - func = xmlrpclib.ServerProxy.__getattr__(self, name) + func = ServerProxy.__getattr__(self, name) def wrapper(*args, **kwargs): start_time = time.time() try: result = func(*args, **kwargs) - except xmlrpclib.Fault as e: + except Fault as e: total_time = int((time.time() - start_time) * 1000) - events.request_failure.fire(request_type="xmlrpc", name=name, response_time=total_time, exception=e) + self._locust_environment.events.request_failure.fire(request_type="xmlrpc", name=name, response_time=total_time, exception=e) else: total_time = int((time.time() - start_time) * 1000) - events.request_success.fire(request_type="xmlrpc", name=name, response_time=total_time, response_length=0) + self._locust_environment.events.request_success.fire(request_type="xmlrpc", name=name, response_time=total_time, response_length=0) # In this example, I've hardcoded response_length=0. If we would want the response length to be # reported correctly in the statistics, we would probably need to hook in at a lower level @@ -36,6 +39,7 @@ class XmlRpcLocust(Locust): def __init__(self, *args, **kwargs): super(XmlRpcLocust, self).__init__(*args, **kwargs) self.client = XmlRpcClient(self.host) + self.client._locust_environment = self.environment class ApiUser(XmlRpcLocust): diff --git a/examples/events.py b/examples/events.py index fca5c11673..b87d71da59 100644 --- a/examples/events.py +++ b/examples/events.py @@ -5,7 +5,8 @@ track the sum of the content-length header in all successful HTTP responses """ -from locust import HttpLocust, TaskSet, events, task, web, between +from locust import HttpLocust, TaskSet, task, web, between +from locust import events class MyTaskSet(TaskSet): @@ -19,25 +20,38 @@ def stats(l): class WebsiteUser(HttpLocust): host = "http://127.0.0.1:8089" - between(2, 5) + wait_time = between(2, 5) task_set = MyTaskSet -""" -We need somewhere to store the stats. - -On the master node stats will contain the aggregated sum of all content-lengths, -while on the slave nodes this will be the sum of the content-lengths since the -last stats report was sent to the master -""" stats = {"content-length":0} +@events.init.add_listener +def locust_init(environment, web_ui, **kwargs): + """ + We need somewhere to store the stats. + + On the master node stats will contain the aggregated sum of all content-lengths, + while on the slave nodes this will be the sum of the content-lengths since the + last stats report was sent to the master + """ + if web_ui: + # this code is only run on the master node (the web_ui instance doesn't exist on slaves) + @web_ui.app.route("/content-length") + def total_content_length(): + """ + Add a route to the Locust web app, where we can see the total content-length + """ + return "Total content-length recieved: %i" % stats["content-length"] + +@events.request_success.add_listener def on_request_success(request_type, name, response_time, response_length): """ Event handler that get triggered on every successful request """ stats["content-length"] += response_length +@events.report_to_master.add_listener def on_report_to_master(client_id, data): """ This event is triggered on the slave instances every time a stats report is @@ -47,6 +61,7 @@ def on_report_to_master(client_id, data): data["content-length"] = stats["content-length"] stats["content-length"] = 0 +@events.slave_report.add_listener def on_slave_report(client_id, data): """ This event is triggered on the master instance when a new stats report arrives @@ -54,15 +69,3 @@ def on_slave_report(client_id, data): stats dict. """ stats["content-length"] += data["content-length"] - -# Hook up the event listeners -events.request_success += on_request_success -events.report_to_master += on_report_to_master -events.slave_report += on_slave_report - -@web.app.route("/content-length") -def total_content_length(): - """ - Add a route to the Locust web app, where we can see the total content-length - """ - return "Total content-length recieved: %i" % stats["content-length"] diff --git a/examples/semaphore_wait.py b/examples/semaphore_wait.py index 24df036196..1f7275d7bb 100644 --- a/examples/semaphore_wait.py +++ b/examples/semaphore_wait.py @@ -5,10 +5,11 @@ all_locusts_spawned = Semaphore() all_locusts_spawned.acquire() -def on_hatch_complete(**kw): - all_locusts_spawned.release() - -events.hatch_complete += on_hatch_complete +@events.init.add_listener +def _(environment, **kw): + @environment.events.hatch_complete.add_listener + def on_hatch_complete(**kw): + all_locusts_spawned.release() class UserTasks(TaskSet): def on_start(self): diff --git a/locust/argument_parser.py b/locust/argument_parser.py new file mode 100644 index 0000000000..c1e644dda8 --- /dev/null +++ b/locust/argument_parser.py @@ -0,0 +1,326 @@ +import os +import sys + +import configargparse + +import locust + +version = locust.__version__ + + +DEFAULT_CONFIG_FILES = ['~/.locust.conf','locust.conf'] + + +def _is_package(path): + """ + Is the given path a Python package? + """ + return ( + os.path.isdir(path) + and os.path.exists(os.path.join(path, '__init__.py')) + ) + +def find_locustfile(locustfile): + """ + Attempt to locate a locustfile, either explicitly or by searching parent dirs. + """ + # Obtain env value + names = [locustfile] + # Create .py version if necessary + if not names[0].endswith('.py'): + names.append(names[0] + '.py') + # Does the name contain path elements? + if os.path.dirname(names[0]): + # If so, expand home-directory markers and test for existence + for name in names: + expanded = os.path.expanduser(name) + if os.path.exists(expanded): + if name.endswith('.py') or _is_package(expanded): + return os.path.abspath(expanded) + else: + # Otherwise, start in cwd and work downwards towards filesystem root + path = os.path.abspath('.') + while True: + for name in names: + joined = os.path.join(path, name) + if os.path.exists(joined): + if name.endswith('.py') or _is_package(joined): + return os.path.abspath(joined) + parent_path = os.path.dirname(path) + if parent_path == path: + # we've reached the root path which has been checked this iteration + break + path = parent_path + # Implicit 'return None' if nothing was found + + +def get_empty_argument_parser(add_help=True, default_config_files=DEFAULT_CONFIG_FILES): + parser = configargparse.ArgumentParser( + default_config_files=default_config_files, + auto_env_var_prefix="LOCUST_", + add_env_var_help=False, + add_help=add_help, + ) + parser.add_argument( + '-f', '--locustfile', + default='locustfile', + help="Python module file to import, e.g. '../other.py'. Default: locustfile" + ) + return parser + + +def parse_locustfile_option(args=None): + """ + Construct a command line parser that is only used to parse the -f argument so that we can + import the test scripts in case any of them adds additional command line arguments to the + parser + """ + parser = get_empty_argument_parser(add_help=False) + parser.add_argument( + '-h', '--help', + action='store_true', + default=False, + ) + options, _ = parser.parse_known_args(args=args) + + locustfile = find_locustfile(options.locustfile) + + if not locustfile: + if options.help: + # if --help is specified we'll call parse_options which will print the default help message + parse_options(args=args) + sys.stderr.write("Could not find any locustfile! Ensure file ends in '.py' and see --help for available options.\n") + sys.exit(1) + + if locustfile == "locust.py": + sys.stderr.write("The locustfile must not be named `locust.py`. Please rename the file and try again.\n") + sys.exit(1) + + return locustfile + + +def setup_parser_arguments(parser): + """ + Setup command-line options + + Takes a configargparse.ArgumentParser as argument and calls it's add_argument + for each of the supported arguments + """ + parser.add_argument( + '-H', '--host', + help="Host to load test in the following format: http://10.21.32.33" + ) + parser.add_argument( + '--web-host', + default="", + help="Host to bind the web interface to. Defaults to '' (all interfaces)" + ) + parser.add_argument( + '-P', '--web-port', + type=int, + default=8089, + help="Port on which to run web host" + ) + # A file that contains the current request stats. + parser.add_argument( + '--csv', '--csv-base-name', + dest='csvfilebase', + help="Store current request stats to files in CSV format.", + ) + # Adds each stats entry at every iteration to the _stats_history.csv file. + parser.add_argument( + '--csv-full-history', + action='store_true', + default=False, + dest='stats_history_enabled', + help="Store each stats entry in CSV format to _stats_history.csv file", + ) + # if locust should be run in distributed mode as master + parser.add_argument( + '--master', + action='store_true', + help="Set locust to run in distributed mode with this process as master" + ) + # if locust should be run in distributed mode as slave + parser.add_argument( + '--slave', + action='store_true', + help="Set locust to run in distributed mode with this process as slave" + ) + # master host options + parser.add_argument( + '--master-host', + default="127.0.0.1", + help="Host or IP address of locust master for distributed load testing. Only used when running with --slave. Defaults to 127.0.0.1." + ) + parser.add_argument( + '--master-port', + type=int, + default=5557, + help="The port to connect to that is used by the locust master for distributed load testing. Only used when running with --slave. Defaults to 5557." + ) + parser.add_argument( + '--master-bind-host', + default="*", + help="Interfaces (hostname, ip) that locust master should bind to. Only used when running with --master. Defaults to * (all available interfaces)." + ) + parser.add_argument( + '--master-bind-port', + type=int, + default=5557, + help="Port that locust master should bind to. Only used when running with --master. Defaults to 5557." + ) + parser.add_argument( + '--expect-slaves', + type=int, + default=1, + help="How many slaves master should expect to connect before starting the test (only when --no-web used)." + ) + # if we should print stats in the console + parser.add_argument( + '--no-web', + action='store_true', + help="Disable the web interface, and instead start running the test immediately. Requires -c and -t to be specified." + ) + # Number of clients + parser.add_argument( + '-c', '--clients', + type=int, + dest='num_clients', + default=1, + help="Number of concurrent Locust users. Only used together with --no-web" + ) + # Client hatch rate + parser.add_argument( + '-r', '--hatch-rate', + type=float, + default=1, + help="The rate per second in which clients are spawned. Only used together with --no-web" + ) + # Time limit of the test run + parser.add_argument( + '-t', '--run-time', + help="Stop after the specified amount of time, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used together with --no-web" + ) + # skip logging setup + parser.add_argument( + '--skip-log-setup', + action='store_true', + dest='skip_log_setup', + default=False, + help="Disable Locust's logging setup. Instead, the configuration is provided by the Locust test or Python defaults." + ) + # Enable Step Load mode + parser.add_argument( + '--step-load', + action='store_true', + help="Enable Step Load mode to monitor how performance metrics varies when user load increases. Requires --step-clients and --step-time to be specified." + ) + # Number of clients to incease by Step + parser.add_argument( + '--step-clients', + type=int, + default=1, + help="Client count to increase by step in Step Load mode. Only used together with --step-load" + ) + # Time limit of each step + parser.add_argument( + '--step-time', + help="Step duration in Step Load mode, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used together with --step-load" + ) + # log level + parser.add_argument( + '--loglevel', '-L', + default='INFO', + help="Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL. Default is INFO.", + ) + # log file + parser.add_argument( + '--logfile', + help="Path to log file. If not set, log will go to stdout/stderr", + ) + # if we should print stats in the console + parser.add_argument( + '--print-stats', + action='store_true', + help="Print stats in the console" + ) + # only print summary stats + parser.add_argument( + '--only-summary', + action='store_true', + help='Only print the summary stats' + ) + parser.add_argument( + '--no-reset-stats', + action='store_true', + help="[DEPRECATED] Do not reset statistics once hatching has been completed. This is now the default behavior. See --reset-stats to disable", + ) + parser.add_argument( + '--reset-stats', + action='store_true', + help="Reset statistics once hatching has been completed. Should be set on both master and slaves when running in distributed mode", + ) + # List locust commands found in loaded locust files/source files + parser.add_argument( + '-l', '--list', + action='store_true', + dest='list_commands', + help="Show list of possible locust classes and exit" + ) + # Display ratio table of all tasks + parser.add_argument( + '--show-task-ratio', + action='store_true', + help="print table of the locust classes' task execution ratio" + ) + # Display ratio table of all tasks in JSON format + parser.add_argument( + '--show-task-ratio-json', + action='store_true', + help="print json data of the locust classes' task execution ratio" + ) + # Version number (optparse gives you --version but we have to do it + # ourselves to get -V too. sigh) + parser.add_argument( + '-V', '--version', + action='version', + version='%(prog)s {}'.format(version), + ) + # set the exit code to post on errors + parser.add_argument( + '--exit-code-on-error', + type=int, + default=1, + help="sets the exit code to post on error" + ) + parser.add_argument( + '-s', '--stop-timeout', + action='store', + type=int, + dest='stop_timeout', + default=None, + help="Number of seconds to wait for a simulated user to complete any executing task before exiting. Default is to terminate immediately. This parameter only needs to be specified for the master process when running Locust distributed." + ) + parser.add_argument( + 'locust_classes', + nargs='*', + metavar='LocustClass', + ) + + +def get_parser(default_config_files=DEFAULT_CONFIG_FILES): + # get a parser that is only able to parse the -f argument + parser = get_empty_argument_parser(add_help=True, default_config_files=default_config_files) + # add all the other supported arguments + setup_parser_arguments(parser) + # fire event to provide a hook for locustscripts and plugins to add command line arguments + locust.events.init_command_line_parser.fire(parser=parser) + return parser + + +def parse_options(args=None): + parser = get_parser() + # parse command line and return options + options = parser.parse_args(args=args) + return options diff --git a/locust/clients.py b/locust/clients.py index 865b0fac66..8103ac7b2d 100644 --- a/locust/clients.py +++ b/locust/clients.py @@ -9,7 +9,6 @@ from urllib.parse import urlparse, urlunparse -from . import events from .exception import CatchResponseError, ResponseError absolute_http_url_regexp = re.compile(r"^https?://", re.I) @@ -46,10 +45,12 @@ class HttpSession(requests.Session): response, even if the response code is ok (2xx). The opposite also works, one can use catch_response to catch a request and then mark it as successful even if the response code was not (i.e 500 or 404). """ - def __init__(self, base_url, *args, **kwargs): + def __init__(self, base_url, request_success, request_failure, *args, **kwargs): super(HttpSession, self).__init__(*args, **kwargs) - + self.base_url = base_url + self.request_success = request_success + self.request_failure = request_failure # Check for basic authentication parsed_url = urlparse(self.base_url) @@ -127,7 +128,7 @@ def request(self, method, url, name=None, catch_response=False, **kwargs): if catch_response: response.locust_request_meta = request_meta - return ResponseContextManager(response) + return ResponseContextManager(response, request_success=self.request_success, request_failure=self.request_failure) else: if name: # Since we use the Exception message when grouping failures, in order to not get @@ -138,7 +139,7 @@ def request(self, method, url, name=None, catch_response=False, **kwargs): try: response.raise_for_status() except RequestException as e: - events.request_failure.fire( + self.request_failure.fire( request_type=request_meta["method"], name=request_meta["name"], response_time=request_meta["response_time"], @@ -146,7 +147,7 @@ def request(self, method, url, name=None, catch_response=False, **kwargs): exception=e, ) else: - events.request_success.fire( + self.request_success.fire( request_type=request_meta["method"], name=request_meta["name"], response_time=request_meta["response_time"], @@ -186,9 +187,11 @@ class ResponseContextManager(LocustResponse): _is_reported = False - def __init__(self, response): + def __init__(self, response, request_success, request_failure): # copy data from response to this object self.__dict__ = response.__dict__ + self._request_success = request_success + self._request_failure = request_failure def __enter__(self): return self @@ -223,7 +226,7 @@ def success(self): if response.status_code == 404: response.success() """ - events.request_success.fire( + self._request_success.fire( request_type=self.locust_request_meta["method"], name=self.locust_request_meta["name"], response_time=self.locust_request_meta["response_time"], @@ -247,7 +250,7 @@ def failure(self, exc): if isinstance(exc, str): exc = CatchResponseError(exc) - events.request_failure.fire( + self._request_failure.fire( request_type=self.locust_request_meta["method"], name=self.locust_request_meta["name"], response_time=self.locust_request_meta["response_time"], diff --git a/locust/contrib/fasthttp.py b/locust/contrib/fasthttp.py index dd14656c33..4fb8d9e794 100644 --- a/locust/contrib/fasthttp.py +++ b/locust/contrib/fasthttp.py @@ -15,7 +15,6 @@ from geventhttpclient.useragent import UserAgent, CompatRequest, CompatResponse, ConnectionError from geventhttpclient.response import HTTPConnectionClosed -from locust import events from locust.core import Locust from locust.exception import LocustError, CatchResponseError, ResponseError @@ -64,20 +63,21 @@ class FastHttpLocust(Locust): The client support cookies, and therefore keeps the session between HTTP requests. """ - def __init__(self): - super(FastHttpLocust, self).__init__() + def __init__(self, environment): + super().__init__(environment) if self.host is None: raise LocustError("You must specify the base host. Either in the host attribute in the Locust class, or on the command line using the --host option.") if not re.match(r"^https?://[^/]+", self.host, re.I): raise LocustError("Invalid host (`%s`), must be a valid base URL. E.g. http://example.com" % self.host) - self.client = FastHttpSession(base_url=self.host) + self.client = FastHttpSession(self.environment, base_url=self.host) class FastHttpSession(object): auth_header = None - def __init__(self, base_url, **kwargs): + def __init__(self, environment, base_url, **kwargs): + self.environment = environment self.base_url = base_url self.cookiejar = CookieJar() self.client = LocustUserAgent( @@ -183,12 +183,12 @@ def request(self, method, path, name=None, data=None, catch_response=False, stre if catch_response: response.locust_request_meta = request_meta - return ResponseContextManager(response) + return ResponseContextManager(response, environment=self.environment) else: try: response.raise_for_status() except FAILURE_EXCEPTIONS as e: - events.request_failure.fire( + self.environment.events.request_failure.fire( request_type=request_meta["method"], name=request_meta["name"], response_time=request_meta["response_time"], @@ -196,7 +196,7 @@ def request(self, method, path, name=None, data=None, catch_response=False, stre exception=e, ) else: - events.request_success.fire( + self.environment.events.request_success.fire( request_type=request_meta["method"], name=request_meta["name"], response_time=request_meta["response_time"], @@ -318,10 +318,12 @@ class ResponseContextManager(FastResponse): _is_reported = False - def __init__(self, response): + def __init__(self, response, environment): # copy data from response to this object self.__dict__ = response.__dict__ self._cached_content = response.content + # store reference to locust Environment + self.environment = environment def __enter__(self): return self @@ -356,7 +358,7 @@ def success(self): if response.status_code == 404: response.success() """ - events.request_success.fire( + self.environment.events.request_success.fire( request_type=self.locust_request_meta["method"], name=self.locust_request_meta["name"], response_time=self.locust_request_meta["response_time"], @@ -380,7 +382,7 @@ def failure(self, exc): if isinstance(exc, str): exc = CatchResponseError(exc) - events.request_failure.fire( + self.environment.events.request_failure.fire( request_type=self.locust_request_meta["method"], name=self.locust_request_meta["name"], response_time=self.locust_request_meta["response_time"], diff --git a/locust/core.py b/locust/core.py index b686df8eff..63cfe7807e 100644 --- a/locust/core.py +++ b/locust/core.py @@ -14,7 +14,6 @@ # See: https://github.com/requests/requests/issues/3752#issuecomment-294608002 monkey.patch_all() -from . import events from .clients import HttpSession from .exception import (InterruptTaskSet, LocustError, RescheduleTask, RescheduleTaskImmediately, StopLocust, MissingWaitTimeError) @@ -152,22 +151,24 @@ class User(Locust): _lock = gevent.lock.Semaphore() # Lock to make sure setup is only run once _state = False - def __init__(self): + def __init__(self, environment): super(Locust, self).__init__() # check if deprecated wait API is used deprecation.check_for_deprecated_wait_api(self) + self.environment = environment + with self._lock: if hasattr(self, "setup") and self._setup_has_run is False: self._set_setup_flag() try: self.setup() except Exception as e: - events.locust_error.fire(locust_instance=self, exception=e, tb=sys.exc_info()[2]) + self.environment.events.locust_error.fire(locust_instance=self, exception=e, tb=sys.exc_info()[2]) logger.error("%s\n%s", e, traceback.format_exc()) if hasattr(self, "teardown") and self._teardown_is_set is False: self._set_teardown_flag() - events.quitting += self.teardown + self.environment.events.quitting.add_listener(self.teardown) @classmethod def _set_setup_flag(cls): @@ -218,12 +219,16 @@ class HttpLocust(Locust): We don't need this feature most of the time, so disable it by default. """ - def __init__(self): - super(HttpLocust, self).__init__() + def __init__(self, *args, **kwargs): + super(HttpLocust, self).__init__(*args, **kwargs) if self.host is None: raise LocustError("You must specify the base host. Either in the host attribute in the Locust class, or on the command line using the --host option.") - session = HttpSession(base_url=self.host) + session = HttpSession( + base_url=self.host, + request_success=self.environment.events.request_success, + request_failure=self.environment.events.request_failure, + ) session.trust_env = self.trust_env self.client = session @@ -363,11 +368,11 @@ def __init__(self, parent): try: self.setup() except Exception as e: - events.locust_error.fire(locust_instance=self, exception=e, tb=sys.exc_info()[2]) + self.locust.environment.events.locust_error.fire(locust_instance=self, exception=e, tb=sys.exc_info()[2]) logger.error("%s\n%s", e, traceback.format_exc()) if hasattr(self, "teardown") and self._teardown_is_set is False: self._set_teardown_flag() - events.quitting += self.teardown + self.environment.events.quitting.add_listener(self.teardown) @classmethod def _set_setup_flag(cls): @@ -419,7 +424,7 @@ def run(self, *args, **kwargs): except GreenletExit: raise except Exception as e: - events.locust_error.fire(locust_instance=self, exception=e, tb=sys.exc_info()[2]) + self.locust.environment.events.locust_error.fire(locust_instance=self, exception=e, tb=sys.exc_info()[2]) if self.locust._catch_exceptions: logger.error("%s\n%s", e, traceback.format_exc()) self.wait() diff --git a/locust/env.py b/locust/env.py new file mode 100644 index 0000000000..ab3cbec1b3 --- /dev/null +++ b/locust/env.py @@ -0,0 +1,60 @@ +from .event import Events + + +class Environment: + events = None + """ + Event hooks used by Locust internally, as well as to extend Locust's functionality + See :ref:`events` for available events. + """ + + options = None + """Parsed command line options""" + + host = None + """Base URL of the target system""" + + reset_stats = False + """Determines if stats should be reset once all simulated users have been spawned""" + + step_load = False + """Determines if we're running in step load mode""" + + stop_timeout = None + """ + If set, the runner will try to stop the runnning users gracefully and wait this many seconds + before killing them hard. + """ + + master_host = "127.0.0.1" + """Hostname of master node that the slave should connect to""" + + master_port = 5557 + """Port of master node that the slave should connect to. Defaults to 5557.""" + + master_bind_host = "*" + """Hostname/interfaces that the master node should expect slaves to connect to. Defaults to '*' which means all interfaces.""" + + master_bind_port = 5557 + """Port that the master node should listen to and expect slaves to connect to. Defaults to 5557.""" + + def __init__( + self, + events=None, + options=None, + host=None, + reset_stats=False, + step_load=False, + stop_timeout=None, + ): + if events: + self.events = events + else: + self.events = Events() + + self.options = options + self.host = host + self.reset_stats = reset_stats + self.step_load = step_load + self.stop_timeout = stop_timeout + diff --git a/locust/event.py b/locust/event.py new file mode 100644 index 0000000000..bc4e36f040 --- /dev/null +++ b/locust/event.py @@ -0,0 +1,161 @@ +class EventHook(object): + """ + Simple event class used to provide hooks for different types of events in Locust. + + Here's how to use the EventHook class:: + + my_event = EventHook() + def on_my_event(a, b, **kw): + print("Event was fired with arguments: %s, %s" % (a, b)) + my_event.add_listener(on_my_event) + my_event.fire(a="foo", b="bar") + + If reverse is True, then the handlers will run in the reverse order + that they were inserted + """ + + def __init__(self): + self._handlers = [] + + def add_listener(self, handler): + self._handlers.append(handler) + return handler + + def remove_listener(self, handler): + self._handlers.remove(handler) + + def fire(self, reverse=False, **kwargs): + if reverse: + handlers = reversed(self._handlers) + else: + handlers = self._handlers + for handler in handlers: + handler(**kwargs) + + +class Events: + request_success = EventHook + """ + *request_success* is fired when a request is completed successfully. + + Listeners should take the following arguments: + + * *request_type*: Request type method used + * *name*: Path to the URL that was called (or override name if it was used in the call to the client) + * *response_time*: Response time in milliseconds + * *response_length*: Content-length of the response + """ + + request_failure = EventHook + """ + *request_failure* is fired when a request fails + + Event is fired with the following arguments: + + * *request_type*: Request type method used + * *name*: Path to the URL that was called (or override name if it was used in the call to the client) + * *response_time*: Time in milliseconds until exception was thrown + * *response_length*: Content-length of the response + * *exception*: Exception instance that was thrown + """ + + locust_error = EventHook + """ + *locust_error* is fired when an exception occurs inside the execution of a Locust class. + + Event is fired with the following arguments: + + * *locust_instance*: Locust class instance where the exception occurred + * *exception*: Exception that was thrown + * *tb*: Traceback object (from sys.exc_info()[2]) + """ + + report_to_master = EventHook + """ + *report_to_master* is used when Locust is running in --slave mode. It can be used to attach + data to the dicts that are regularly sent to the master. It's fired regularly when a report + is to be sent to the master server. + + Note that the keys "stats" and "errors" are used by Locust and shouldn't be overridden. + + Event is fired with the following arguments: + + * *client_id*: The client id of the running locust process. + * *data*: Data dict that can be modified in order to attach data that should be sent to the master. + """ + + slave_report = EventHook + """ + *slave_report* is used when Locust is running in --master mode and is fired when the master + server receives a report from a Locust slave server. + + This event can be used to aggregate data from the locust slave servers. + + Event is fired with following arguments: + + * *client_id*: Client id of the reporting locust slave + * *data*: Data dict with the data from the slave node + """ + + hatch_complete = EventHook + """ + *hatch_complete* is fired when all locust users has been spawned. + + Event is fire with the following arguments: + + * *user_count*: Number of users that was hatched + """ + + quitting = EventHook + """ + *quitting* is fired when the locust process is exiting + """ + + master_start_hatching = EventHook + """ + *master_start_hatching* is fired when we initiate the hatching process on the master. + + This event is especially useful to detect when the 'start' button is clicked on the web ui. + """ + + master_stop_hatching = EventHook + """ + *master_stop_hatching* is fired when terminate the hatching process on the master. + + This event is especially useful to detect when the 'stop' button is clicked on the web ui. + """ + + locust_start_hatching = EventHook + """ + *locust_start_hatching* is fired when we initiate the hatching process on any locust worker. + """ + + locust_stop_hatching = EventHook + """ + *locust_stop_hatching* is fired when terminate the hatching process on any locust worker. + """ + + init = EventHook + """ + *init* is fired when Locust is started, once the Environment instance and locust runner instance + have been created. This hook can be used by end-users' code to run code that requires access to + the Envirionment. For example to register listeners to request_success, request_failure + or other events. + + Event is fired with following arguments: + + * *environment*: Environment instance + """ + + init_command_line_parser = EventHook + """ + Event that can be used to add command line options to Locust + + Event is fired with the following arguments: + * *parser*: ArgumentParser instance + """ + + def __init__(self): + for name, value in vars(type(self)).items(): + if value == EventHook: + setattr(self, name, value()) diff --git a/locust/events.py b/locust/events.py deleted file mode 100644 index 9679c80430..0000000000 --- a/locust/events.py +++ /dev/null @@ -1,135 +0,0 @@ -import gevent - -class EventHook(object): - """ - Simple event class used to provide hooks for different types of events in Locust. - - Here's how to use the EventHook class:: - - my_event = EventHook() - def on_my_event(a, b, **kw): - print "Event was fired with arguments: %s, %s" % (a, b) - my_event += on_my_event - my_event.fire(a="foo", b="bar") - - If reverse is True, then the handlers will run in the reverse order - that they were inserted - """ - - def __init__(self): - self._handlers = [] - - def __iadd__(self, handler): - self._handlers.append(handler) - return self - - def __isub__(self, handler): - self._handlers.remove(handler) - return self - - def fire(self, reverse=False, **kwargs): - if reverse: - self._handlers.reverse() - for handler in self._handlers: - handler(**kwargs) - -request_success = EventHook() -""" -*request_success* is fired when a request is completed successfully. - -Listeners should take the following arguments: - -* *request_type*: Request type method used -* *name*: Path to the URL that was called (or override name if it was used in the call to the client) -* *response_time*: Response time in milliseconds -* *response_length*: Content-length of the response -""" - -request_failure = EventHook() -""" -*request_failure* is fired when a request fails - -Event is fired with the following arguments: - -* *request_type*: Request type method used -* *name*: Path to the URL that was called (or override name if it was used in the call to the client) -* *response_time*: Time in milliseconds until exception was thrown -* *response_length*: Content-length of the response -* *exception*: Exception instance that was thrown -""" - -locust_error = EventHook() -""" -*locust_error* is fired when an exception occurs inside the execution of a Locust class. - -Event is fired with the following arguments: - -* *locust_instance*: Locust class instance where the exception occurred -* *exception*: Exception that was thrown -* *tb*: Traceback object (from sys.exc_info()[2]) -""" - -report_to_master = EventHook() -""" -*report_to_master* is used when Locust is running in --slave mode. It can be used to attach -data to the dicts that are regularly sent to the master. It's fired regularly when a report -is to be sent to the master server. - -Note that the keys "stats" and "errors" are used by Locust and shouldn't be overridden. - -Event is fired with the following arguments: - -* *client_id*: The client id of the running locust process. -* *data*: Data dict that can be modified in order to attach data that should be sent to the master. -""" - -slave_report = EventHook() -""" -*slave_report* is used when Locust is running in --master mode and is fired when the master -server receives a report from a Locust slave server. - -This event can be used to aggregate data from the locust slave servers. - -Event is fired with following arguments: - -* *client_id*: Client id of the reporting locust slave -* *data*: Data dict with the data from the slave node -""" - -hatch_complete = EventHook() -""" -*hatch_complete* is fired when all locust users has been spawned. - -Event is fire with the following arguments: - -* *user_count*: Number of users that was hatched -""" - -quitting = EventHook() -""" -*quitting* is fired when the locust process is exiting -""" - -master_start_hatching = EventHook() -""" -*master_start_hatching* is fired when we initiate the hatching process on the master. - -This event is especially useful to detect when the 'start' button is clicked on the web ui. -""" - -master_stop_hatching = EventHook() -""" -*master_stop_hatching* is fired when terminate the hatching process on the master. - -This event is especially useful to detect when the 'stop' button is clicked on the web ui. -""" - -locust_start_hatching = EventHook() -""" -*locust_start_hatching* is fired when we initiate the hatching process on any locust worker. -""" - -locust_stop_hatching = EventHook() -""" -*locust_stop_hatching* is fired when terminate the hatching process on any locust worker. -""" diff --git a/locust/main.py b/locust/main.py index 44727ba669..669ab3aa49 100644 --- a/locust/main.py +++ b/locust/main.py @@ -6,334 +6,27 @@ import socket import sys import time -import configargparse import gevent import locust -from . import events, runners, web +from .event import Events +from .argument_parser import parse_locustfile_option, parse_options from .core import HttpLocust, Locust +from .env import Environment from .inspectlocust import get_task_ratio_dict, print_task_ratio from .log import console_logger, setup_logging from .runners import LocalLocustRunner, MasterLocustRunner, SlaveLocustRunner from .stats import (print_error_report, print_percentile_stats, print_stats, stats_printer, stats_writer, write_stat_csvs) from .util.timespan import parse_timespan +from .web import WebUI _internals = [Locust, HttpLocust] version = locust.__version__ -def parse_options(args=None, default_config_files=['~/.locust.conf','locust.conf']): - """ - Handle command-line options with configargparse.ArgumentParser. - - Returns a two-tuple of parser + the output from parse_args() - """ - # Initialize - parser = configargparse.ArgumentParser(default_config_files=default_config_files, auto_env_var_prefix="LOCUST_", add_env_var_help=False) - - parser.add_argument( - '-H', '--host', - help="Host to load test in the following format: http://10.21.32.33" - ) - - parser.add_argument( - '--web-host', - default="", - help="Host to bind the web interface to. Defaults to '' (all interfaces)" - ) - - parser.add_argument( - '-P', '--port', '--web-port', - type=int, - default=8089, - help="Port on which to run web host" - ) - - parser.add_argument( - '-f', '--locustfile', - default='locustfile', - help="Python module file to import, e.g. '../other.py'. Default: locustfile" - ) - - # A file that contains the current request stats. - parser.add_argument( - '--csv', '--csv-base-name', - dest='csvfilebase', - help="Store current request stats to files in CSV format.", - ) - - # Adds each stats entry at every iteration to the _stats_history.csv file. - parser.add_argument( - '--csv-full-history', - action='store_true', - default=False, - dest='stats_history_enabled', - help="Store each stats entry in CSV format to _stats_history.csv file", - ) - - # if locust should be run in distributed mode as master - parser.add_argument( - '--master', - action='store_true', - help="Set locust to run in distributed mode with this process as master" - ) - - # if locust should be run in distributed mode as slave - parser.add_argument( - '--slave', - action='store_true', - help="Set locust to run in distributed mode with this process as slave" - ) - - # master host options - parser.add_argument( - '--master-host', - default="127.0.0.1", - help="Host or IP address of locust master for distributed load testing. Only used when running with --slave. Defaults to 127.0.0.1." - ) - - parser.add_argument( - '--master-port', - type=int, - default=5557, - help="The port to connect to that is used by the locust master for distributed load testing. Only used when running with --slave. Defaults to 5557." - ) - - parser.add_argument( - '--master-bind-host', - default="*", - help="Interfaces (hostname, ip) that locust master should bind to. Only used when running with --master. Defaults to * (all available interfaces)." - ) - - parser.add_argument( - '--master-bind-port', - type=int, - default=5557, - help="Port that locust master should bind to. Only used when running with --master. Defaults to 5557." - ) - - parser.add_argument( - '--heartbeat-liveness', - type=int, - default=3, - help="set number of seconds before failed heartbeat from slave" - ) - - parser.add_argument( - '--heartbeat-interval', - type=int, - default=1, - help="set number of seconds delay between slave heartbeats to master" - ) - - parser.add_argument( - '--expect-slaves', - type=int, - default=1, - help="How many slaves master should expect to connect before starting the test (only when --no-web used)." - ) - - # if we should print stats in the console - parser.add_argument( - '--no-web', - action='store_true', - help="Disable the web interface, and instead start running the test immediately. Requires -c and -t to be specified." - ) - - # Number of clients - parser.add_argument( - '-c', '--clients', - type=int, - dest='num_clients', - default=1, - help="Number of concurrent Locust users. Only used together with --no-web" - ) - - # Client hatch rate - parser.add_argument( - '-r', '--hatch-rate', - type=float, - default=1, - help="The rate per second in which clients are spawned. Only used together with --no-web" - ) - - # Time limit of the test run - parser.add_argument( - '-t', '--run-time', - help="Stop after the specified amount of time, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used together with --no-web" - ) - - # skip logging setup - parser.add_argument( - '--skip-log-setup', - action='store_true', - dest='skip_log_setup', - default=False, - help="Disable Locust's logging setup. Instead, the configuration is provided by the Locust test or Python defaults." - ) - - # Enable Step Load mode - parser.add_argument( - '--step-load', - action='store_true', - help="Enable Step Load mode to monitor how performance metrics varies when user load increases. Requires --step-clients and --step-time to be specified." - ) - - # Number of clients to incease by Step - parser.add_argument( - '--step-clients', - type=int, - default=1, - help="Client count to increase by step in Step Load mode. Only used together with --step-load" - ) - - # Time limit of each step - parser.add_argument( - '--step-time', - help="Step duration in Step Load mode, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used together with --step-load" - ) - - # log level - parser.add_argument( - '--loglevel', '-L', - default='INFO', - help="Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL. Default is INFO.", - ) - - # log file - parser.add_argument( - '--logfile', - help="Path to log file. If not set, log will go to stdout/stderr", - ) - - # if we should print stats in the console - parser.add_argument( - '--print-stats', - action='store_true', - help="Print stats in the console" - ) - - # only print summary stats - parser.add_argument( - '--only-summary', - action='store_true', - help='Only print the summary stats' - ) - - parser.add_argument( - '--no-reset-stats', - action='store_true', - help="[DEPRECATED] Do not reset statistics once hatching has been completed. This is now the default behavior. See --reset-stats to disable", - ) - - parser.add_argument( - '--reset-stats', - action='store_true', - help="Reset statistics once hatching has been completed. Should be set on both master and slaves when running in distributed mode", - ) - - # List locust commands found in loaded locust files/source files - parser.add_argument( - '-l', '--list', - action='store_true', - dest='list_commands', - help="Show list of possible locust classes and exit" - ) - - # Display ratio table of all tasks - parser.add_argument( - '--show-task-ratio', - action='store_true', - help="print table of the locust classes' task execution ratio" - ) - # Display ratio table of all tasks in JSON format - parser.add_argument( - '--show-task-ratio-json', - action='store_true', - help="print json data of the locust classes' task execution ratio" - ) - - # Version number (optparse gives you --version but we have to do it - # ourselves to get -V too. sigh) - parser.add_argument( - '-V', '--version', - action='version', - version='%(prog)s {}'.format(version), - ) - - # set the exit code to post on errors - parser.add_argument( - '--exit-code-on-error', - type=int, - default=1, - help="sets the exit code to post on error" - ) - - parser.add_argument( - '-s', '--stop-timeout', - action='store', - type=int, - dest='stop_timeout', - default=None, - help="Number of seconds to wait for a simulated user to complete any executing task before exiting. Default is to terminate immediately. This parameter only needs to be specified for the master process when running Locust distributed." - ) - - parser.add_argument( - 'locust_classes', - nargs='*', - metavar='LocustClass', - ) - - return parser, parser.parse_args(args=args) - - -def _is_package(path): - """ - Is the given path a Python package? - """ - return ( - os.path.isdir(path) - and os.path.exists(os.path.join(path, '__init__.py')) - ) - - -def find_locustfile(locustfile): - """ - Attempt to locate a locustfile, either explicitly or by searching parent dirs. - """ - # Obtain env value - names = [locustfile] - # Create .py version if necessary - if not names[0].endswith('.py'): - names += [names[0] + '.py'] - # Does the name contain path elements? - if os.path.dirname(names[0]): - # If so, expand home-directory markers and test for existence - for name in names: - expanded = os.path.expanduser(name) - if os.path.exists(expanded): - if name.endswith('.py') or _is_package(expanded): - return os.path.abspath(expanded) - else: - # Otherwise, start in cwd and work downwards towards filesystem root - path = os.path.abspath('.') - while True: - for name in names: - joined = os.path.join(path, name) - if os.path.exists(joined): - if name.endswith('.py') or _is_package(joined): - return os.path.abspath(joined) - parent_path = os.path.dirname(path) - if parent_path == path: - # we've reached the root path which has been checked this iteration - break - path = parent_path - # Implicit 'return None' if nothing was found - - def is_locust(tup): """ Takes (name, object) tuple, returns True if it's a public Locust subclass. @@ -398,27 +91,41 @@ def __import_locustfile__(filename, path): locusts = dict(filter(is_locust, vars(imported).items())) return imported.__doc__, locusts -def main(): - parser, options = parse_options() +def create_environment(options, events=None): + """ + Create an Environment instance from options + """ + return Environment( + events=events, + host=options.host, + options=options, + reset_stats=options.reset_stats, + step_load=options.step_load, + stop_timeout=options.stop_timeout, + ) + + +def main(): + # create an Events instance that the locustfile can use to register event listeners at the module level + locust.events = Events() + + # find specified locustfile and make sure it exists, using a very simplified + # command line parser that is only used to parse the -f option + locustfile = parse_locustfile_option() + + # import the locustfile + docstring, locusts = load_locustfile(locustfile) + + # parse all command line options + options = parse_options() + # setup logging if not options.skip_log_setup: setup_logging(options.loglevel, options.logfile) logger = logging.getLogger(__name__) - locustfile = find_locustfile(options.locustfile) - - if not locustfile: - logger.error("Could not find any locustfile! Ensure file ends in '.py' and see --help for available options.") - sys.exit(1) - - if locustfile == "locust.py": - logger.error("The locustfile must not be named `locust.py`. Please rename the file and try again.") - sys.exit(1) - - docstring, locusts = load_locustfile(locustfile) - if options.list_commands: console_logger.info("Available Locusts:") for name in locusts: @@ -442,6 +149,9 @@ def main(): # list() call is needed to consume the dict_view object in Python 3 locust_classes = list(locusts.values()) + # create locust Environment + environment = create_environment(options, events=locust.events) + if options.show_task_ratio: console_logger.info("\n Task ratio per locust class") console_logger.info( "-" * 80) @@ -458,25 +168,6 @@ def main(): } console_logger.info(dumps(task_data)) sys.exit(0) - - if options.run_time: - if not options.no_web: - logger.error("The --run-time argument can only be used together with --no-web") - sys.exit(1) - if options.slave: - logger.error("--run-time should be specified on the master node, and not on slave nodes") - sys.exit(1) - try: - options.run_time = parse_timespan(options.run_time) - except ValueError: - logger.error("Valid --run-time formats are: 20, 20s, 3m, 2h, 1h20m, 3h30m10s, etc.") - sys.exit(1) - def spawn_run_time_limit_greenlet(): - logger.info("Run time limit set to %s seconds" % options.run_time) - def timelimit_stop(): - logger.info("Time limit reached. Stopping Locust.") - runners.locust_runner.quit() - gevent.spawn_later(options.run_time, timelimit_stop) if options.step_time: if not options.step_load: @@ -492,45 +183,86 @@ def timelimit_stop(): sys.exit(1) if options.master: - runners.locust_runner = MasterLocustRunner(locust_classes, options) + runner = MasterLocustRunner( + environment, + locust_classes, + master_bind_host=options.master_bind_host, + master_bind_port=options.master_bind_port, + ) elif options.slave: try: - runners.locust_runner = SlaveLocustRunner(locust_classes, options) + runner = SlaveLocustRunner( + environment, + locust_classes, + master_host=options.master_host, + master_port=options.master_port, + ) except socket.error as e: logger.error("Failed to connect to the Locust master: %s", e) sys.exit(-1) else: - runners.locust_runner = LocalLocustRunner(locust_classes, options) - # main_greenlet is pointing to runners.locust_runner.greenlet by default, it will point the web greenlet later if in web mode - main_greenlet = runners.locust_runner.greenlet - + runner = LocalLocustRunner(environment, locust_classes) + + # main_greenlet is pointing to runners.greenlet by default, it will point the web greenlet later if in web mode + main_greenlet = runner.greenlet + + if options.run_time: + if not options.no_web: + logger.error("The --run-time argument can only be used together with --no-web") + sys.exit(1) + if options.slave: + logger.error("--run-time should be specified on the master node, and not on slave nodes") + sys.exit(1) + try: + options.run_time = parse_timespan(options.run_time) + except ValueError: + logger.error("Valid --run-time formats are: 20, 20s, 3m, 2h, 1h20m, 3h30m10s, etc.") + sys.exit(1) + def spawn_run_time_limit_greenlet(): + logger.info("Run time limit set to %s seconds" % options.run_time) + def timelimit_stop(): + logger.info("Time limit reached. Stopping Locust.") + runner.quit() + gevent.spawn_later(options.run_time, timelimit_stop) + + # start Web UI + if not options.no_web and not options.slave: + # spawn web greenlet + logger.info("Starting web monitor at http://%s:%s" % (options.web_host or "*", options.web_port)) + web_ui = WebUI(environment=environment, runner=runner) + main_greenlet = gevent.spawn(web_ui.start, host=options.web_host, port=options.web_port) + else: + web_ui = None + + # Fire locust init event which can be used by end-users' code to run setup code that + # need access to the Environment, Runner or WebUI + environment.events.init.fire(environment=environment, runner=runner, web_ui=web_ui) + if options.no_web: + # headless mode if options.master: - while len(runners.locust_runner.clients.ready) < options.expect_slaves: + # what for slave nodes to connect + while len(runner.clients.ready) < options.expect_slaves: logging.info("Waiting for slaves to be ready, %s of %s connected", - len(runners.locust_runner.clients.ready), options.expect_slaves) + len(runner.clients.ready), options.expect_slaves) time.sleep(1) - if options.step_time: - runners.locust_runner.start_stepload(options.num_clients, options.hatch_rate, options.step_clients, options.step_time) - elif not options.slave: - runners.locust_runner.start_hatching(options.num_clients, options.hatch_rate) - # make locusts are spawned - time.sleep(1) - elif not options.slave: - # spawn web greenlet - logger.info("Starting web monitor at http://%s:%s" % (options.web_host or "*", options.port)) - main_greenlet = gevent.spawn(web.start, locust_classes, options) - + if not options.slave: + # start the test + if options.step_time: + runner.start_stepload(options.num_clients, options.hatch_rate, options.step_clients, options.step_time) + else: + runner.start(options.num_clients, options.hatch_rate) + if options.run_time: spawn_run_time_limit_greenlet() stats_printer_greenlet = None if not options.only_summary and (options.print_stats or (options.no_web and not options.slave)): # spawn stats printing greenlet - stats_printer_greenlet = gevent.spawn(stats_printer) + stats_printer_greenlet = gevent.spawn(stats_printer(runner.stats)) if options.csvfilebase: - gevent.spawn(stats_writer, options.csvfilebase, options.stats_history_enabled) + gevent.spawn(stats_writer, runner.stats, options.csvfilebase, options.stats_history_enabled) def shutdown(code=0): @@ -541,15 +273,15 @@ def shutdown(code=0): if stats_printer_greenlet is not None: stats_printer_greenlet.kill(block=False) logger.info("Cleaning up runner...") - if runners.locust_runner is not None: - runners.locust_runner.quit() + if runner is not None: + runner.quit() logger.info("Running teardowns...") - events.quitting.fire(reverse=True) - print_stats(runners.locust_runner.stats, current=False) - print_percentile_stats(runners.locust_runner.stats) + environment.events.quitting.fire(reverse=True) + print_stats(runner.stats, current=False) + print_percentile_stats(runner.stats) if options.csvfilebase: - write_stat_csvs(options.csvfilebase, options.stats_history_enabled) - print_error_report() + write_stat_csvs(runner.stats, options.csvfilebase, options.stats_history_enabled) + print_error_report(runner.stats) sys.exit(code) # install SIGTERM handler @@ -562,8 +294,7 @@ def sig_term_handler(): logger.info("Starting Locust %s" % version) main_greenlet.join() code = 0 - lr = runners.locust_runner - if len(lr.errors) or len(lr.exceptions) or lr.cpu_log_warning(): + if len(runner.errors) or len(runner.exceptions): code = options.exit_code_on_error shutdown(code=code) except KeyboardInterrupt as e: diff --git a/locust/runners.py b/locust/runners.py index 7c48c8c337..b1bb4062c7 100644 --- a/locust/runners.py +++ b/locust/runners.py @@ -12,27 +12,24 @@ from gevent import GreenletExit from gevent.pool import Group -from . import events from .rpc import Message, rpc -from .stats import global_stats +from .stats import RequestStats, setup_distributed_stats_event_listeners logger = logging.getLogger(__name__) -# global locust runner singleton -locust_runner = None STATE_INIT, STATE_HATCHING, STATE_RUNNING, STATE_CLEANUP, STATE_STOPPING, STATE_STOPPED, STATE_MISSING = ["ready", "hatching", "running", "cleanup", "stopping", "stopped", "missing"] SLAVE_REPORT_INTERVAL = 3.0 CPU_MONITOR_INTERVAL = 5.0 +HEARTBEAT_INTERVAL = 1 +HEARTBEAT_LIVENESS = 3 LOCUST_STATE_RUNNING, LOCUST_STATE_WAITING, LOCUST_STATE_STOPPING = ["running", "waiting", "stopping"] class LocustRunner(object): - def __init__(self, locust_classes, options): - self.options = options + def __init__(self, environment, locust_classes): + self.environment = environment self.locust_classes = locust_classes - self.hatch_rate = options.hatch_rate - self.host = options.host self.locusts = Group() self.greenlet = Group() self.state = STATE_INIT @@ -42,25 +39,31 @@ def __init__(self, locust_classes, options): self.cpu_warning_emitted = False self.greenlet.spawn(self.monitor_cpu) self.exceptions = {} - self.stats = global_stats - self.step_load = options.step_load + self.stats = RequestStats() + + # set up event listeners for recording requests + def on_request_success(request_type, name, response_time, response_length, **kwargs): + self.stats.log_request(request_type, name, response_time, response_length) + + def on_request_failure(request_type, name, response_time, response_length, exception, **kwargs): + self.stats.log_request(request_type, name, response_time, response_length) + self.stats.log_error(request_type, name, exception) + + self.environment.events.request_success.add_listener(on_request_success) + self.environment.events.request_failure.add_listener(on_request_failure) # register listener that resets stats when hatching is complete def on_hatch_complete(user_count): self.state = STATE_RUNNING - if self.options.reset_stats: + if environment.reset_stats: logger.info("Resetting stats\n") self.stats.reset_all() - events.hatch_complete += on_hatch_complete + self.environment.events.hatch_complete.add_listener(on_hatch_complete) def __del__(self): # don't leave any stray greenlets if runner is removed - if len(self.greenlet) > 0: + if self.greenlet and len(self.greenlet) > 0: self.greenlet.kill(block=False) - - @property - def request_stats(self): - return self.stats.entries @property def errors(self): @@ -90,8 +93,8 @@ def weight_locusts(self, amount): warnings.warn("Notice: Found Locust class (%s) got no task_set. Skipping..." % locust.__name__) continue - if self.host is not None: - locust.host = self.host + if self.environment.host is not None: + locust.host = self.environment.host # create locusts depending on weight percent = locust.weight / float(weight_sum) @@ -114,30 +117,30 @@ def weight_locusts(self, amount): return bucket - def spawn_locusts(self, spawn_count, wait=False): + def spawn_locusts(self, spawn_count, hatch_rate, wait=False): bucket = self.weight_locusts(spawn_count) spawn_count = len(bucket) if self.state == STATE_INIT or self.state == STATE_STOPPED: self.state = STATE_HATCHING existing_count = len(self.locusts) - logger.info("Hatching and swarming %i users at the rate %g users/s (%i users already running)..." % (spawn_count, self.hatch_rate, existing_count)) + logger.info("Hatching and swarming %i users at the rate %g users/s (%i users already running)..." % (spawn_count, hatch_rate, existing_count)) occurrence_count = dict([(l.__name__, 0) for l in self.locust_classes]) def hatch(): - sleep_time = 1.0 / self.hatch_rate + sleep_time = 1.0 / hatch_rate while True: if not bucket: logger.info("All locusts hatched: %s (%i already running)" % ( ", ".join(["%s: %d" % (name, count) for name, count in occurrence_count.items()]), existing_count, )) - events.hatch_complete.fire(user_count=len(self.locusts)) + self.environment.events.hatch_complete.fire(user_count=len(self.locusts)) return locust = bucket.pop(random.randint(0, len(bucket)-1)) occurrence_count[locust.__name__] += 1 - new_locust = locust() + new_locust = locust(self.environment) def start_locust(_): try: new_locust.run(runner=self) @@ -169,14 +172,14 @@ def kill_locusts(self, kill_count): bucket.remove(l) break self.kill_locust_greenlets(dying) - events.hatch_complete.fire(user_count=self.user_count) + self.environment.events.hatch_complete.fire(user_count=self.user_count) def kill_locust_greenlets(self, greenlets): """ - Kill running locust greenlets. If options.stop_timeout is set, we try to stop the + Kill running locust greenlets. If environment.stop_timeout is set, we try to stop the Locust users gracefully """ - if self.options.stop_timeout: + if self.environment.stop_timeout: dying = Group() for g in greenlets: locust = g.args[0] @@ -185,8 +188,8 @@ def kill_locust_greenlets(self, greenlets): else: locust._state = LOCUST_STATE_STOPPING dying.add(g) - if not dying.join(timeout=self.options.stop_timeout): - logger.info("Not all locusts finished their tasks & terminated in %s seconds. Killing them..." % self.options.stop_timeout) + if not dying.join(timeout=self.environment.stop_timeout): + logger.info("Not all locusts finished their tasks & terminated in %s seconds. Killing them..." % self.environment.stop_timeout) dying.kill(block=True) else: for g in greenlets: @@ -201,13 +204,13 @@ def monitor_cpu(self): self.cpu_warning_emitted = True gevent.sleep(CPU_MONITOR_INTERVAL) - def start_hatching(self, locust_count, hatch_rate, wait=False): + def start(self, locust_count, hatch_rate, wait=False): if self.state != STATE_RUNNING and self.state != STATE_HATCHING: self.stats.clear_all() self.exceptions = {} self.cpu_warning_emitted = False self.slave_cpu_warning_emitted = False - events.locust_start_hatching.fire() + self.environment.events.locust_start_hatching.fire() # Dynamically changing the locust count if self.state != STATE_INIT and self.state != STATE_STOPPED: @@ -218,42 +221,38 @@ def start_hatching(self, locust_count, hatch_rate, wait=False): self.kill_locusts(kill_count) elif self.user_count < locust_count: # Spawn some locusts - self.hatch_rate = hatch_rate spawn_count = locust_count - self.user_count - self.spawn_locusts(spawn_count=spawn_count) + self.spawn_locusts(spawn_count=spawn_count, hatch_rate=hatch_rate) else: - events.hatch_complete.fire(user_count=self.user_count) + self.environment.events.hatch_complete.fire(user_count=self.user_count) else: self.hatch_rate = hatch_rate - self.spawn_locusts(locust_count, wait=wait) + self.spawn_locusts(locust_count, hatch_rate=hatch_rate, wait=wait) def start_stepload(self, locust_count, hatch_rate, step_locust_count, step_duration): if locust_count < step_locust_count: logger.error("Invalid parameters: total locust count of %d is smaller than step locust count of %d" % (locust_count, step_locust_count)) return self.total_clients = locust_count - self.hatch_rate = hatch_rate - self.step_clients_growth = step_locust_count - self.step_duration = step_duration if self.stepload_greenlet: logger.info("There is an ongoing swarming in Step Load mode, will stop it now.") self.stepload_greenlet.kill() logger.info("Start a new swarming in Step Load mode: total locust count of %d, hatch rate of %d, step locust count of %d, step duration of %d " % (locust_count, hatch_rate, step_locust_count, step_duration)) self.state = STATE_INIT - self.stepload_greenlet = self.greenlet.spawn(self.stepload_worker) + self.stepload_greenlet = self.greenlet.spawn(self.stepload_worker, hatch_rate, step_locust_count, step_duration) self.stepload_greenlet.link_exception(callback=self.noop) - def stepload_worker(self): + def stepload_worker(self, hatch_rate, step_clients_growth, step_duration): current_num_clients = 0 while self.state == STATE_INIT or self.state == STATE_HATCHING or self.state == STATE_RUNNING: - current_num_clients += self.step_clients_growth + current_num_clients += step_clients_growth if current_num_clients > int(self.total_clients): logger.info('Step Load is finished.') break - self.start_hatching(current_num_clients, self.hatch_rate) + self.start(current_num_clients, hatch_rate) logger.info('Step loading: start hatch job of %d locust.' % (current_num_clients)) - gevent.sleep(self.step_duration) + gevent.sleep(step_duration) def stop(self): # if we are currently hatching locusts we need to kill the hatching greenlet first @@ -261,7 +260,8 @@ def stop(self): self.hatching_greenlet.kill(block=True) self.kill_locust_greenlets([g for g in self.locusts]) self.state = STATE_STOPPED - events.locust_stop_hatching.fire() + self.cpu_log_warning() + self.environment.events.locust_stop_hatching.fire() def quit(self): self.stop() @@ -279,36 +279,31 @@ def noop(self, *args, **kwargs): pass class LocalLocustRunner(LocustRunner): - def __init__(self, locust_classes, options): - super(LocalLocustRunner, self).__init__(locust_classes, options) + def __init__(self, environment, locust_classes): + super(LocalLocustRunner, self).__init__(environment, locust_classes) # register listener thats logs the exception for the local runner def on_locust_error(locust_instance, exception, tb): formatted_tb = "".join(traceback.format_tb(tb)) self.log_exception("local", str(exception), formatted_tb) - events.locust_error += on_locust_error + self.environment.events.locust_error.add_listener(on_locust_error) - def start_hatching(self, locust_count, hatch_rate, wait=False): + def start(self, locust_count, hatch_rate, wait=False): if hatch_rate > 100: logger.warning("Your selected hatch rate is very high (>100), and this is known to sometimes cause issues. Do you really need to ramp up that fast?") if self.hatching_greenlet: # kill existing hatching_greenlet before we start a new one self.hatching_greenlet.kill(block=True) - self.hatching_greenlet = self.greenlet.spawn(lambda: super(LocalLocustRunner, self).start_hatching(locust_count, hatch_rate, wait=wait)) + self.hatching_greenlet = self.greenlet.spawn(lambda: super(LocalLocustRunner, self).start(locust_count, hatch_rate, wait=wait)) class DistributedLocustRunner(LocustRunner): - def __init__(self, locust_classes, options): - super(DistributedLocustRunner, self).__init__(locust_classes, options) - self.master_host = options.master_host - self.master_port = options.master_port - self.master_bind_host = options.master_bind_host - self.master_bind_port = options.master_bind_port - self.heartbeat_liveness = options.heartbeat_liveness - self.heartbeat_interval = options.heartbeat_interval + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + setup_distributed_stats_event_listeners(self.environment.events, self.stats) class SlaveNode(object): - def __init__(self, id, state=STATE_INIT, heartbeat_liveness=3): + def __init__(self, id, state=STATE_INIT, heartbeat_liveness=HEARTBEAT_LIVENESS): self.id = id self.state = state self.user_count = 0 @@ -317,8 +312,8 @@ def __init__(self, id, state=STATE_INIT, heartbeat_liveness=3): self.cpu_warning_emitted = False class MasterLocustRunner(DistributedLocustRunner): - def __init__(self, *args, **kwargs): - super(MasterLocustRunner, self).__init__(*args, **kwargs) + def __init__(self, *args, master_bind_host, master_bind_port, **kwargs): + super().__init__(*args, **kwargs) self.slave_cpu_warning_emitted = False self.target_user_count = None @@ -343,7 +338,7 @@ def running(self): return self.get_by_state(STATE_RUNNING) self.clients = SlaveNodesDict() - self.server = rpc.Server(self.master_bind_host, self.master_bind_port) + self.server = rpc.Server(master_bind_host, master_bind_port) self.greenlet.spawn(self.heartbeat_worker).link_exception(callback=self.noop) self.greenlet.spawn(self.client_listener).link_exception(callback=self.noop) @@ -354,12 +349,12 @@ def on_slave_report(client_id, data): return self.clients[client_id].user_count = data["user_count"] - events.slave_report += on_slave_report + self.environment.events.slave_report.add_listener(on_slave_report) # register listener that sends quit message to slave nodes def on_quitting(): self.quit() - events.quitting += on_quitting + self.environment.events.quitting.add_listener(on_quitting) @property def user_count(self): @@ -372,7 +367,7 @@ def cpu_log_warning(self): warning_emitted = True return warning_emitted - def start_hatching(self, locust_count, hatch_rate): + def start(self, locust_count, hatch_rate): self.target_user_count = locust_count num_slaves = len(self.clients.ready) + len(self.clients.running) + len(self.clients.hatching) if not num_slaves: @@ -393,14 +388,14 @@ def start_hatching(self, locust_count, hatch_rate): if self.state != STATE_RUNNING and self.state != STATE_HATCHING: self.stats.clear_all() self.exceptions = {} - events.master_start_hatching.fire() + self.environment.events.master_start_hatching.fire() for client in (self.clients.ready + self.clients.running + self.clients.hatching): data = { "hatch_rate": slave_hatch_rate, "num_clients": slave_num_clients, - "host": self.host, - "stop_timeout": self.options.stop_timeout, + "host": self.environment.host, + "stop_timeout": self.environment.stop_timeout, } if remaining > 0: @@ -415,7 +410,7 @@ def stop(self): self.state = STATE_STOPPING for client in self.clients.all: self.server.send_to_client(Message("stop", None, client.id)) - events.master_stop_hatching.fire() + self.environment.events.master_stop_hatching.fire() def quit(self): for client in self.clients.all: @@ -425,7 +420,7 @@ def quit(self): def heartbeat_worker(self): while True: - gevent.sleep(self.heartbeat_interval) + gevent.sleep(HEARTBEAT_INTERVAL) for client in self.clients.all: if client.heartbeat < 0 and client.state != STATE_MISSING: logger.info('Slave %s failed to send heartbeat, setting state to missing.' % str(client.id)) @@ -440,11 +435,11 @@ def client_listener(self): msg.node_id = client_id if msg.type == "client_ready": id = msg.node_id - self.clients[id] = SlaveNode(id, heartbeat_liveness=self.heartbeat_liveness) + self.clients[id] = SlaveNode(id, heartbeat_liveness=HEARTBEAT_LIVENESS) logger.info("Client %r reported as ready. Currently %i clients ready to swarm." % (id, len(self.clients.ready + self.clients.running + self.clients.hatching))) if self.state == STATE_RUNNING or self.state == STATE_HATCHING: # balance the load distribution when new client joins - self.start_hatching(self.target_user_count, self.hatch_rate) + self.start(self.target_user_count, self.hatch_rate) ## emit a warning if the slave's clock seem to be out of sync with our clock #if abs(time() - msg.data["time"]) > 5.0: # warnings.warn("The slave node's clock seem to be out of sync. For the statistics to be correct the different locust servers need to have synchronized clocks.") @@ -454,7 +449,7 @@ def client_listener(self): elif msg.type == "heartbeat": if msg.node_id in self.clients: c = self.clients[msg.node_id] - c.heartbeat = self.heartbeat_liveness + c.heartbeat = HEARTBEAT_LIVENESS c.state = msg.data['state'] c.cpu_usage = msg.data['current_cpu_usage'] if not c.cpu_warning_emitted and c.cpu_usage > 90: @@ -462,7 +457,7 @@ def client_listener(self): c.cpu_warning_emitted = True # used to suppress logging for this node logger.warning("Slave %s exceeded cpu threshold (will only log this once per slave)" % (msg.node_id)) elif msg.type == "stats": - events.slave_report.fire(client_id=msg.node_id, data=msg.data) + self.environment.events.slave_report.fire(client_id=msg.node_id, data=msg.data) elif msg.type == "hatching": self.clients[msg.node_id].state = STATE_HATCHING elif msg.type == "hatch_complete": @@ -470,7 +465,7 @@ def client_listener(self): self.clients[msg.node_id].user_count = msg.data["count"] if len(self.clients.hatching) == 0: count = sum(c.user_count for c in self.clients.values()) - events.hatch_complete.fire(user_count=count) + self.environment.events.hatch_complete.fire(user_count=count) elif msg.type == "quit": if msg.node_id in self.clients: del self.clients[msg.node_id] @@ -486,11 +481,11 @@ def slave_count(self): return len(self.clients.ready) + len(self.clients.hatching) + len(self.clients.running) class SlaveLocustRunner(DistributedLocustRunner): - def __init__(self, *args, **kwargs): - super(SlaveLocustRunner, self).__init__(*args, **kwargs) + def __init__(self, *args, master_host, master_port, **kwargs): + super().__init__(*args, **kwargs) self.client_id = socket.gethostname() + "_" + uuid4().hex - self.client = rpc.Client(self.master_host, self.master_port, self.client_id) + self.client = rpc.Client(master_host, master_port, self.client_id) self.greenlet.spawn(self.heartbeat).link_exception(callback=self.noop) self.greenlet.spawn(self.worker).link_exception(callback=self.noop) self.client.send(Message("client_ready", None, self.client_id)) @@ -501,28 +496,28 @@ def __init__(self, *args, **kwargs): def on_hatch_complete(user_count): self.client.send(Message("hatch_complete", {"count":user_count}, self.client_id)) self.slave_state = STATE_RUNNING - events.hatch_complete += on_hatch_complete + self.environment.events.hatch_complete.add_listener(on_hatch_complete) # register listener that adds the current number of spawned locusts to the report that is sent to the master node def on_report_to_master(client_id, data): data["user_count"] = self.user_count - events.report_to_master += on_report_to_master + self.environment.events.report_to_master.add_listener(on_report_to_master) # register listener that sends quit message to master def on_quitting(): self.client.send(Message("quit", None, self.client_id)) - events.quitting += on_quitting + self.environment.events.quitting.add_listener(on_quitting) # register listener thats sends locust exceptions to master def on_locust_error(locust_instance, exception, tb): formatted_tb = "".join(traceback.format_tb(tb)) self.client.send(Message("exception", {"msg" : str(exception), "traceback" : formatted_tb}, self.client_id)) - events.locust_error += on_locust_error + self.environment.events.locust_error.add_listener(on_locust_error) def heartbeat(self): while True: self.client.send(Message('heartbeat', {'state': self.slave_state, 'current_cpu_usage': self.current_cpu_usage}, self.client_id)) - gevent.sleep(self.heartbeat_interval) + gevent.sleep(HEARTBEAT_INTERVAL) def worker(self): while True: @@ -532,12 +527,12 @@ def worker(self): self.client.send(Message("hatching", None, self.client_id)) job = msg.data self.hatch_rate = job["hatch_rate"] - self.host = job["host"] - self.options.stop_timeout = job["stop_timeout"] + self.environment.host = job["host"] + self.environment.stop_timeout = job["stop_timeout"] if self.hatching_greenlet: # kill existing hatching greenlet before we launch new one self.hatching_greenlet.kill(block=True) - self.hatching_greenlet = self.greenlet.spawn(lambda: self.start_hatching(locust_count=job["num_clients"], hatch_rate=job["hatch_rate"])) + self.hatching_greenlet = self.greenlet.spawn(lambda: self.start(locust_count=job["num_clients"], hatch_rate=job["hatch_rate"])) elif msg.type == "stop": self.stop() self.client.send(Message("client_stopped", None, self.client_id)) @@ -561,5 +556,5 @@ def stats_reporter(self): def _send_stats(self): data = {} - events.report_to_master.fire(client_id=self.client_id, data=data) + self.environment.events.report_to_master.fire(client_id=self.client_id, data=data) self.client.send(Message("stats", data, self.client_id)) diff --git a/locust/stats.py b/locust/stats.py index f81a3458cc..c2eb0a7c1d 100644 --- a/locust/stats.py +++ b/locust/stats.py @@ -6,7 +6,6 @@ import gevent -from . import events from .exception import StopLocust from .log import console_logger @@ -643,57 +642,43 @@ def median_from_dict(total, count): pos -= count[k] -global_stats = RequestStats() -""" -A global instance for holding the statistics. Should be removed eventually. -""" - -def on_request_success(request_type, name, response_time, response_length, **kwargs): - global_stats.log_request(request_type, name, response_time, response_length) - -def on_request_failure(request_type, name, response_time, response_length, exception, **kwargs): - global_stats.log_request(request_type, name, response_time, response_length) - global_stats.log_error(request_type, name, exception) - -def on_report_to_master(client_id, data): - data["stats"] = global_stats.serialize_stats() - data["stats_total"] = global_stats.total.get_stripped_report() - data["errors"] = global_stats.serialize_errors() - global_stats.errors = {} - -def on_slave_report(client_id, data): - for stats_data in data["stats"]: - entry = StatsEntry.unserialize(stats_data) - request_key = (entry.name, entry.method) - if not request_key in global_stats.entries: - global_stats.entries[request_key] = StatsEntry(global_stats, entry.name, entry.method) - global_stats.entries[request_key].extend(entry) - - for error_key, error in data["errors"].items(): - if error_key not in global_stats.errors: - global_stats.errors[error_key] = StatsError.from_dict(error) - else: - global_stats.errors[error_key].occurrences += error["occurrences"] - - # save the old last_request_timestamp, to see if we should store a new copy - # of the response times in the response times cache - old_last_request_timestamp = global_stats.total.last_request_timestamp - # update the total StatsEntry - global_stats.total.extend(StatsEntry.unserialize(data["stats_total"])) - if global_stats.total.last_request_timestamp and global_stats.total.last_request_timestamp > (old_last_request_timestamp or 0): - # If we've entered a new second, we'll cache the response times. Note that there - # might still be reports from other slave nodes - that contains requests for the same - # time periods - that hasn't been received/accounted for yet. This will cause the cache to - # lag behind a second or two, but since StatsEntry.current_response_time_percentile() - # (which is what the response times cache is used for) uses an approximation of the - # last 10 seconds anyway, it should be fine to ignore this. - global_stats.total._cache_response_times(int(global_stats.total.last_request_timestamp)) - - -events.request_success += on_request_success -events.request_failure += on_request_failure -events.report_to_master += on_report_to_master -events.slave_report += on_slave_report +def setup_distributed_stats_event_listeners(events, stats): + def on_report_to_master(client_id, data): + data["stats"] = stats.serialize_stats() + data["stats_total"] = stats.total.get_stripped_report() + data["errors"] = stats.serialize_errors() + stats.errors = {} + + def on_slave_report(client_id, data): + for stats_data in data["stats"]: + entry = StatsEntry.unserialize(stats_data) + request_key = (entry.name, entry.method) + if not request_key in stats.entries: + stats.entries[request_key] = StatsEntry(stats, entry.name, entry.method) + stats.entries[request_key].extend(entry) + + for error_key, error in data["errors"].items(): + if error_key not in stats.errors: + stats.errors[error_key] = StatsError.from_dict(error) + else: + stats.errors[error_key].occurrences += error["occurrences"] + + # save the old last_request_timestamp, to see if we should store a new copy + # of the response times in the response times cache + old_last_request_timestamp = stats.total.last_request_timestamp + # update the total StatsEntry + stats.total.extend(StatsEntry.unserialize(data["stats_total"])) + if stats.total.last_request_timestamp and stats.total.last_request_timestamp > (old_last_request_timestamp or 0): + # If we've entered a new second, we'll cache the response times. Note that there + # might still be reports from other slave nodes - that contains requests for the same + # time periods - that hasn't been received/accounted for yet. This will cause the cache to + # lag behind a second or two, but since StatsEntry.current_response_time_percentile() + # (which is what the response times cache is used for) uses an approximation of the + # last 10 seconds anyway, it should be fine to ignore this. + stats.total._cache_response_times(int(stats.total.last_request_timestamp)) + + events.report_to_master.add_listener(on_report_to_master) + events.slave_report.add_listener(on_slave_report) def print_stats(stats, current=True): @@ -736,49 +721,50 @@ def print_percentile_stats(stats): console_logger.info(stats.total.percentile()) console_logger.info("") -def print_error_report(): - if not len(global_stats.errors): +def print_error_report(stats): + if not len(stats.errors): return console_logger.info("Error report") console_logger.info(" %-18s %-100s" % ("# occurrences", "Error")) console_logger.info("-" * (80 + STATS_NAME_WIDTH)) - for error in global_stats.errors.values(): + for error in stats.errors.values(): console_logger.info(" %-18i %-100s" % (error.occurrences, error.to_name())) console_logger.info("-" * (80 + STATS_NAME_WIDTH)) console_logger.info("") -def stats_printer(): - from . import runners - while True: - print_stats(runners.locust_runner.stats) - gevent.sleep(CONSOLE_STATS_INTERVAL_SEC) +def stats_printer(stats): + def stats_printer_func(): + while True: + print_stats(stats) + gevent.sleep(CONSOLE_STATS_INTERVAL_SEC) + return stats_printer_func -def stats_writer(base_filepath, stats_history_enabled=False): +def stats_writer(stats, base_filepath, stats_history_enabled=False): """Writes the csv files for the locust run.""" with open(base_filepath + '_stats_history.csv', 'w') as f: f.write(stats_history_csv_header()) while True: - write_stat_csvs(base_filepath, stats_history_enabled) + write_stat_csvs(stats, base_filepath, stats_history_enabled) gevent.sleep(CSV_STATS_INTERVAL_SEC) -def write_stat_csvs(base_filepath, stats_history_enabled=False): +def write_stat_csvs(stats, base_filepath, stats_history_enabled=False): """Writes the requests, distribution, and failures csvs.""" with open(base_filepath + '_stats.csv', 'w') as f: - f.write(requests_csv()) + f.write(requests_csv(stats)) with open(base_filepath + '_stats_history.csv', 'a') as f: - f.write(stats_history_csv(stats_history_enabled) + "\n") + f.write(stats_history_csv(stats, stats_history_enabled) + "\n") with open(base_filepath + '_failures.csv', 'w') as f: - f.write(failures_csv()) + f.write(failures_csv(stats)) def sort_stats(stats): return [stats[key] for key in sorted(stats.keys())] -def requests_csv(): +def requests_csv(stats): from . import runners """Returns the contents of the 'requests' & 'distribution' tab as CSV.""" @@ -810,7 +796,7 @@ def requests_csv(): ]) ] - for s in chain(sort_stats(runners.locust_runner.request_stats), [runners.locust_runner.stats.total]): + for s in chain(sort_stats(stats.entries), [stats.total]): if s.num_requests: percentile_str = ','.join([ str(int(s.get_response_time_percentile(x) or 0)) for x in PERCENTILES_TO_REPORT]) @@ -863,10 +849,8 @@ def stats_history_csv_header(): '"100%"' )) + '\n' -def stats_history_csv(stats_history_enabled=False, csv_for_web_ui=False): +def stats_history_csv(stats, stats_history_enabled=False, csv_for_web_ui=False): """Returns the Aggregated stats entry every interval""" - from . import runners - # csv_for_web_ui boolean returns the header along with the stats history row so that # it can be returned as a csv for download on the web ui. Otherwise when run with # the '--no-web' option we write the header first and then append the file with stats @@ -880,9 +864,9 @@ def stats_history_csv(stats_history_enabled=False, csv_for_web_ui=False): stats_entries_per_iteration = [] if stats_history_enabled: - stats_entries_per_iteration = sort_stats(runners.locust_runner.request_stats) + stats_entries_per_iteration = sort_stats(stats.entries) - for s in chain(stats_entries_per_iteration, [runners.locust_runner.stats.total]): + for s in chain(stats_entries_per_iteration, [stats.total]): if s.num_requests: percentile_str = ','.join([ str(int(s.get_current_response_time_percentile(x) or 0)) for x in PERCENTILES_TO_REPORT]) @@ -907,10 +891,8 @@ def stats_history_csv(stats_history_enabled=False, csv_for_web_ui=False): return "\n".join(rows) -def failures_csv(): +def failures_csv(stats): """"Return the contents of the 'failures' tab as a CSV.""" - from . import runners - rows = [ ",".join(( '"Method"', @@ -920,7 +902,7 @@ def failures_csv(): )) ] - for s in sort_stats(runners.locust_runner.stats.errors): + for s in sort_stats(stats.errors): rows.append('"%s","%s","%s",%i' % ( s.method, s.name, diff --git a/locust/test/mock_locustfile.py b/locust/test/mock_locustfile.py new file mode 100644 index 0000000000..f912e5eee4 --- /dev/null +++ b/locust/test/mock_locustfile.py @@ -0,0 +1,55 @@ +import os +import random +import time + +from contextlib import contextmanager + + +MOCK_LOUCSTFILE_CONTENT = ''' +"""This is a mock locust file for unit testing""" + +from locust import HttpLocust, TaskSet, task, between + + +def index(l): + l.client.get("/") + +def stats(l): + l.client.get("/stats/requests") + + +class UserTasks(TaskSet): + # one can specify tasks like this + tasks = [index, stats] + + +class LocustSubclass(HttpLocust): + host = "http://127.0.0.1:8089" + wait_time = between(2, 5) + task_set = UserTasks + + +class NotLocustSubclass(): + host = "http://localhost:8000" + +''' + +class MockedLocustfile: + __slots__ = ["filename", "directory", "file_path"] + + +@contextmanager +def mock_locustfile(filename_prefix="mock_locustfile", content=MOCK_LOUCSTFILE_CONTENT): + mocked = MockedLocustfile() + mocked.directory = os.path.dirname(os.path.abspath(__file__)) + mocked.filename = "%s_%s_%i.py" % ( + filename_prefix, + str(time.time()).replace(".", "_"), + random.randint(0,100000), + ) + mocked.file_path = os.path.join(mocked.directory, mocked.filename) + with open(mocked.file_path, 'w') as file: + file.write(content) + + yield mocked + os.remove(mocked.file_path) diff --git a/locust/test/test_client.py b/locust/test/test_client.py index afd2da6cd7..0c7af9739a 100644 --- a/locust/test/test_client.py +++ b/locust/test/test_client.py @@ -1,20 +1,28 @@ from requests.exceptions import (InvalidSchema, InvalidURL, MissingSchema, RequestException) -from locust import events from locust.clients import HttpSession -from locust.stats import global_stats +from locust.env import Environment from .testcases import WebserverTestCase class TestHttpSession(WebserverTestCase): + def get_client(self, base_url=None): + if base_url is None: + base_url = "http://127.0.0.1:%i" % self.port + return HttpSession( + base_url=base_url, + request_success=self.environment.events.request_success, + request_failure=self.environment.events.request_failure, + ) + def test_get(self): - s = HttpSession("http://127.0.0.1:%i" % self.port) + s = self.get_client() r = s.get("/ultra_fast") self.assertEqual(200, r.status_code) def test_connection_error(self): - s = HttpSession("http://localhost:1") + s = self.get_client(base_url="http://localhost:1") r = s.get("/", timeout=0.1) self.assertEqual(r.status_code, 0) self.assertEqual(None, r.content) @@ -26,7 +34,7 @@ def test_wrong_url(self): ("telnet://127.0.0.1", InvalidSchema), ("127.0.0.1", MissingSchema), ): - s = HttpSession(url) + s = self.get_client(base_url=url) try: self.assertRaises(exception, s.get, "/") except KeyError: @@ -36,60 +44,60 @@ def test_streaming_response(self): """ Test a request to an endpoint that returns a streaming response """ - s = HttpSession("http://127.0.0.1:%i" % self.port) + s = self.get_client() r = s.get("/streaming/30") # verify that the time reported includes the download time of the whole streamed response - self.assertGreater(global_stats.get("/streaming/30", method="GET").avg_response_time, 250) - global_stats.clear_all() + self.assertGreater(self.runner.stats.get("/streaming/30", method="GET").avg_response_time, 250) + self.runner.stats.clear_all() # verify that response time does NOT include whole download time, when using stream=True r = s.get("/streaming/30", stream=True) - self.assertGreater(global_stats.get("/streaming/30", method="GET").avg_response_time, 0) - self.assertLess(global_stats.get("/streaming/30", method="GET").avg_response_time, 250) + self.assertGreater(self.runner.stats.get("/streaming/30", method="GET").avg_response_time, 0) + self.assertLess(self.runner.stats.get("/streaming/30", method="GET").avg_response_time, 250) # download the content of the streaming response (so we don't get an ugly exception in the log) _ = r.content def test_slow_redirect(self): - s = HttpSession("http://127.0.0.1:%i" % self.port) + s = self.get_client() url = "/redirect?url=/redirect?delay=0.5" r = s.get(url) - stats = global_stats.get(url, method="GET") + stats = self.runner.stats.get(url, method="GET") self.assertEqual(1, stats.num_requests) self.assertGreater(stats.avg_response_time, 500) def test_post_redirect(self): - s = HttpSession("http://127.0.0.1:%i" % self.port) + s = self.get_client() url = "/redirect" r = s.post(url) self.assertEqual(200, r.status_code) - post_stats = global_stats.get(url, method="POST") - get_stats = global_stats.get(url, method="GET") + post_stats = self.runner.stats.get(url, method="POST") + get_stats = self.runner.stats.get(url, method="GET") self.assertEqual(1, post_stats.num_requests) self.assertEqual(0, get_stats.num_requests) def test_cookie(self): - s = HttpSession("http://127.0.0.1:%i" % self.port) + s = self.get_client() r = s.post("/set_cookie?name=testcookie&value=1337") self.assertEqual(200, r.status_code) r = s.get("/get_cookie?name=testcookie") self.assertEqual('1337', r.content.decode()) def test_head(self): - s = HttpSession("http://127.0.0.1:%i" % self.port) + s = self.get_client() r = s.head("/request_method") self.assertEqual(200, r.status_code) self.assertEqual("", r.content.decode()) def test_delete(self): - s = HttpSession("http://127.0.0.1:%i" % self.port) + s = self.get_client() r = s.delete("/request_method") self.assertEqual(200, r.status_code) self.assertEqual("DELETE", r.content.decode()) def test_options(self): - s = HttpSession("http://127.0.0.1:%i" % self.port) + s = self.get_client() r = s.options("/request_method") self.assertEqual(200, r.status_code) self.assertEqual("", r.content.decode()) @@ -99,15 +107,11 @@ def test_options(self): ) def test_error_message_with_name_replacment(self): - s = HttpSession("http://127.0.0.1:%i" % self.port) - my_event = events.EventHook() + s = self.get_client() kwargs = {} - def on_my_event(**kw): + def on_error(**kw): kwargs.update(kw) - my_event += on_my_event - orig_events = events.request_failure - events.request_failure = my_event + self.environment.events.request_failure.add_listener(on_error) s.request('get', '/wrong_url/01', name='replaced_url_name') - events.request_failure = orig_events self.assertIn('for url: replaced_url_name', str(kwargs['exception'])) diff --git a/locust/test/test_fasthttp.py b/locust/test/test_fasthttp.py index 3b15b93c9d..0066ff0e2b 100644 --- a/locust/test/test_fasthttp.py +++ b/locust/test/test_fasthttp.py @@ -1,109 +1,101 @@ -import socket - -from locust import TaskSet, task, events -from locust.core import LocustError +from locust.core import task, TaskSet from locust.contrib.fasthttp import FastHttpSession, FastHttpLocust from locust.exception import CatchResponseError, InterruptTaskSet, ResponseError -from locust.stats import global_stats - from .testcases import WebserverTestCase class TestFastHttpSession(WebserverTestCase): def test_get(self): - s = FastHttpSession("http://127.0.0.1:%i" % self.port) + s = FastHttpSession(self.environment, "http://127.0.0.1:%i" % self.port) r = s.get("/ultra_fast") self.assertEqual(200, r.status_code) def test_connection_error(self): - global_stats.clear_all() - s = FastHttpSession("http://localhost:1") + s = FastHttpSession(self.environment, "http://localhost:1") r = s.get("/", timeout=0.1) self.assertEqual(r.status_code, 0) self.assertEqual(None, r.content) - self.assertEqual(1, len(global_stats.errors)) + self.assertEqual(1, len(self.runner.stats.errors)) self.assertTrue(isinstance(r.error, ConnectionRefusedError)) - self.assertTrue(isinstance(next(iter(global_stats.errors.values())).error, ConnectionRefusedError)) + self.assertTrue(isinstance(next(iter(self.runner.stats.errors.values())).error, ConnectionRefusedError)) def test_404(self): - global_stats.clear_all() - s = FastHttpSession("http://127.0.0.1:%i" % self.port) + s = FastHttpSession(self.environment, "http://127.0.0.1:%i" % self.port) r = s.get("/does_not_exist") self.assertEqual(404, r.status_code) - self.assertEqual(1, global_stats.get("/does_not_exist", "GET").num_failures) + self.assertEqual(1, self.runner.stats.get("/does_not_exist", "GET").num_failures) def test_204(self): - global_stats.clear_all() - s = FastHttpSession("http://127.0.0.1:%i" % self.port) + s = FastHttpSession(self.environment, "http://127.0.0.1:%i" % self.port) r = s.get("/status/204") self.assertEqual(204, r.status_code) - self.assertEqual(1, global_stats.get("/status/204", "GET").num_requests) - self.assertEqual(0, global_stats.get("/status/204", "GET").num_failures) + self.assertEqual(1, self.runner.stats.get("/status/204", "GET").num_requests) + self.assertEqual(0, self.runner.stats.get("/status/204", "GET").num_failures) def test_streaming_response(self): """ Test a request to an endpoint that returns a streaming response """ - s = FastHttpSession("http://127.0.0.1:%i" % self.port) + s = FastHttpSession(self.environment, "http://127.0.0.1:%i" % self.port) r = s.get("/streaming/30") # verify that the time reported includes the download time of the whole streamed response - self.assertGreater(global_stats.get("/streaming/30", method="GET").avg_response_time, 250) - global_stats.clear_all() + self.assertGreater(self.runner.stats.get("/streaming/30", method="GET").avg_response_time, 250) + self.runner.stats.clear_all() # verify that response time does NOT include whole download time, when using stream=True r = s.get("/streaming/30", stream=True) - self.assertGreaterEqual(global_stats.get("/streaming/30", method="GET").avg_response_time, 0) - self.assertLess(global_stats.get("/streaming/30", method="GET").avg_response_time, 250) + self.assertGreaterEqual(self.runner.stats.get("/streaming/30", method="GET").avg_response_time, 0) + self.assertLess(self.runner.stats.get("/streaming/30", method="GET").avg_response_time, 250) # download the content of the streaming response (so we don't get an ugly exception in the log) _ = r.content def test_slow_redirect(self): - s = FastHttpSession("http://127.0.0.1:%i" % self.port) + s = FastHttpSession(self.environment, "http://127.0.0.1:%i" % self.port) url = "/redirect?url=/redirect?delay=0.5" r = s.get(url) - stats = global_stats.get(url, method="GET") + stats = self.runner.stats.get(url, method="GET") self.assertEqual(1, stats.num_requests) self.assertGreater(stats.avg_response_time, 500) def test_post_redirect(self): - s = FastHttpSession("http://127.0.0.1:%i" % self.port) + s = FastHttpSession(self.environment, "http://127.0.0.1:%i" % self.port) url = "/redirect" r = s.post(url) self.assertEqual(200, r.status_code) - post_stats = global_stats.get(url, method="POST") - get_stats = global_stats.get(url, method="GET") + post_stats = self.runner.stats.get(url, method="POST") + get_stats = self.runner.stats.get(url, method="GET") self.assertEqual(1, post_stats.num_requests) self.assertEqual(0, get_stats.num_requests) def test_cookie(self): - s = FastHttpSession("http://127.0.0.1:%i" % self.port) + s = FastHttpSession(self.environment, "http://127.0.0.1:%i" % self.port) r = s.post("/set_cookie?name=testcookie&value=1337") self.assertEqual(200, r.status_code) r = s.get("/get_cookie?name=testcookie") self.assertEqual('1337', r.content.decode()) def test_head(self): - s = FastHttpSession("http://127.0.0.1:%i" % self.port) + s = FastHttpSession(self.environment, "http://127.0.0.1:%i" % self.port) r = s.head("/request_method") self.assertEqual(200, r.status_code) self.assertEqual("", r.content.decode()) def test_delete(self): - s = FastHttpSession("http://127.0.0.1:%i" % self.port) + s = FastHttpSession(self.environment, "http://127.0.0.1:%i" % self.port) r = s.delete("/request_method") self.assertEqual(200, r.status_code) self.assertEqual("DELETE", r.content.decode()) def test_patch(self): - s = FastHttpSession("http://127.0.0.1:%i" % self.port) + s = FastHttpSession(self.environment, "http://127.0.0.1:%i" % self.port) r = s.patch("/request_method") self.assertEqual(200, r.status_code) self.assertEqual("PATCH", r.content.decode()) def test_options(self): - s = FastHttpSession("http://127.0.0.1:%i" % self.port) + s = FastHttpSession(self.environment, "http://127.0.0.1:%i" % self.port) r = s.options("/request_method") self.assertEqual(200, r.status_code) self.assertEqual("", r.content.decode()) @@ -118,61 +110,61 @@ def test_request_stats_content_length(self): class MyLocust(FastHttpLocust): host = "http://127.0.0.1:%i" % self.port - locust = MyLocust() + locust = MyLocust(self.environment) locust.client.get("/ultra_fast") - self.assertEqual(global_stats.get("/ultra_fast", "GET").avg_content_length, len("This is an ultra fast response")) + self.assertEqual(self.runner.stats.get("/ultra_fast", "GET").avg_content_length, len("This is an ultra fast response")) locust.client.get("/ultra_fast") - self.assertEqual(global_stats.get("/ultra_fast", "GET").avg_content_length, len("This is an ultra fast response")) + self.assertEqual(self.runner.stats.get("/ultra_fast", "GET").avg_content_length, len("This is an ultra fast response")) def test_request_stats_no_content_length(self): class MyLocust(FastHttpLocust): host = "http://127.0.0.1:%i" % self.port - l = MyLocust() + l = MyLocust(self.environment) path = "/no_content_length" r = l.client.get(path) - self.assertEqual(global_stats.get(path, "GET").avg_content_length, len("This response does not have content-length in the header")) + self.assertEqual(self.runner.stats.get(path, "GET").avg_content_length, len("This response does not have content-length in the header")) def test_request_stats_no_content_length_streaming(self): class MyLocust(FastHttpLocust): host = "http://127.0.0.1:%i" % self.port - l = MyLocust() + l = MyLocust(self.environment) path = "/no_content_length" r = l.client.get(path, stream=True) - self.assertEqual(0, global_stats.get(path, "GET").avg_content_length) + self.assertEqual(0, self.runner.stats.get(path, "GET").avg_content_length) def test_request_stats_named_endpoint(self): class MyLocust(FastHttpLocust): host = "http://127.0.0.1:%i" % self.port - locust = MyLocust() + locust = MyLocust(self.environment) locust.client.get("/ultra_fast", name="my_custom_name") - self.assertEqual(1, global_stats.get("my_custom_name", "GET").num_requests) + self.assertEqual(1, self.runner.stats.get("my_custom_name", "GET").num_requests) def test_request_stats_query_variables(self): class MyLocust(FastHttpLocust): host = "http://127.0.0.1:%i" % self.port - locust = MyLocust() + locust = MyLocust(self.environment) locust.client.get("/ultra_fast?query=1") - self.assertEqual(1, global_stats.get("/ultra_fast?query=1", "GET").num_requests) + self.assertEqual(1, self.runner.stats.get("/ultra_fast?query=1", "GET").num_requests) def test_request_stats_put(self): class MyLocust(FastHttpLocust): host = "http://127.0.0.1:%i" % self.port - locust = MyLocust() + locust = MyLocust(self.environment) locust.client.put("/put") - self.assertEqual(1, global_stats.get("/put", "PUT").num_requests) + self.assertEqual(1, self.runner.stats.get("/put", "PUT").num_requests) def test_request_connection_error(self): class MyLocust(FastHttpLocust): host = "http://localhost:1" - locust = MyLocust() + locust = MyLocust(self.environment) response = locust.client.get("/", timeout=0.1) self.assertEqual(response.status_code, 0) - self.assertEqual(1, global_stats.get("/", "GET").num_failures) - self.assertEqual(1, global_stats.get("/", "GET").num_requests) + self.assertEqual(1, self.runner.stats.get("/", "GET").num_failures) + self.assertEqual(1, self.runner.stats.get("/", "GET").num_requests) class TestFastHttpLocustClass(WebserverTestCase): @@ -184,7 +176,7 @@ class MyLocust(FastHttpLocust): tasks = [t1] host = "http://127.0.0.1:%i" % self.port - my_locust = MyLocust() + my_locust = MyLocust(self.environment) t1(my_locust) self.assertEqual(self.response.text, "This is an ultra fast response") @@ -192,28 +184,28 @@ def test_client_request_headers(self): class MyLocust(FastHttpLocust): host = "http://127.0.0.1:%i" % self.port - locust = MyLocust() + locust = MyLocust(self.environment) self.assertEqual("hello", locust.client.get("/request_header_test", headers={"X-Header-Test":"hello"}).text) def test_client_get(self): class MyLocust(FastHttpLocust): host = "http://127.0.0.1:%i" % self.port - locust = MyLocust() + locust = MyLocust(self.environment) self.assertEqual("GET", locust.client.get("/request_method").text) def test_client_get_absolute_url(self): class MyLocust(FastHttpLocust): host = "http://127.0.0.1:%i" % self.port - locust = MyLocust() + locust = MyLocust(self.environment) self.assertEqual("GET", locust.client.get("http://127.0.0.1:%i/request_method" % self.port).text) def test_client_post(self): class MyLocust(FastHttpLocust): host = "http://127.0.0.1:%i" % self.port - locust = MyLocust() + locust = MyLocust(self.environment) self.assertEqual("POST", locust.client.post("/request_method", {"arg":"hello world"}).text) self.assertEqual("hello world", locust.client.post("/post", {"arg":"hello world"}).text) @@ -221,7 +213,7 @@ def test_client_put(self): class MyLocust(FastHttpLocust): host = "http://127.0.0.1:%i" % self.port - locust = MyLocust() + locust = MyLocust(self.environment) self.assertEqual("PUT", locust.client.put("/request_method", {"arg":"hello world"}).text) self.assertEqual("hello world", locust.client.put("/put", {"arg":"hello world"}).text) @@ -229,7 +221,7 @@ def test_client_delete(self): class MyLocust(FastHttpLocust): host = "http://127.0.0.1:%i" % self.port - locust = MyLocust() + locust = MyLocust(self.environment) self.assertEqual("DELETE", locust.client.delete("/request_method").text) self.assertEqual(200, locust.client.delete("/request_method").status_code) @@ -237,11 +229,10 @@ def test_client_head(self): class MyLocust(FastHttpLocust): host = "http://127.0.0.1:%i" % self.port - locust = MyLocust() + locust = MyLocust(self.environment) self.assertEqual(200, locust.client.head("/request_method").status_code) def test_log_request_name_argument(self): - from locust.stats import global_stats self.response = "" class MyLocust(FastHttpLocust): @@ -252,23 +243,22 @@ class MyLocust(FastHttpLocust): def t1(l): self.response = l.client.get("/ultra_fast", name="new name!") - my_locust = MyLocust() + my_locust = MyLocust(self.environment) my_locust.t1() - self.assertEqual(1, global_stats.get("new name!", "GET").num_requests) - self.assertEqual(0, global_stats.get("/ultra_fast", "GET").num_requests) + self.assertEqual(1, self.runner.stats.get("new name!", "GET").num_requests) + self.assertEqual(0, self.runner.stats.get("/ultra_fast", "GET").num_requests) def test_redirect_url_original_path_as_name(self): class MyLocust(FastHttpLocust): host = "http://127.0.0.1:%i" % self.port - l = MyLocust() + l = MyLocust(self.environment) l.client.get("/redirect") - from locust.stats import global_stats - self.assertEqual(1, len(global_stats.entries)) - self.assertEqual(1, global_stats.get("/redirect", "GET").num_requests) - self.assertEqual(0, global_stats.get("/ultra_fast", "GET").num_requests) + self.assertEqual(1, len(self.runner.stats.entries)) + self.assertEqual(1, self.runner.stats.get("/redirect", "GET").num_requests) + self.assertEqual(0, self.runner.stats.get("/ultra_fast", "GET").num_requests) def test_client_basic_auth(self): class MyLocust(FastHttpLocust): @@ -280,9 +270,9 @@ class MyAuthorizedLocust(FastHttpLocust): class MyUnauthorizedLocust(FastHttpLocust): host = "http://locust:wrong@127.0.0.1:%i" % self.port - locust = MyLocust() - unauthorized = MyUnauthorizedLocust() - authorized = MyAuthorizedLocust() + locust = MyLocust(self.environment) + unauthorized = MyUnauthorizedLocust(self.environment) + authorized = MyAuthorizedLocust(self.environment) response = authorized.client.get("/basic_auth") self.assertEqual(200, response.status_code) self.assertEqual("Authorized", response.text) @@ -297,7 +287,7 @@ def setUp(self): class MyLocust(FastHttpLocust): host = "http://127.0.0.1:%i" % self.port - self.locust = MyLocust() + self.locust = MyLocust(self.environment) self.num_failures = 0 self.num_success = 0 @@ -306,8 +296,8 @@ def on_failure(request_type, name, response_time, response_length, exception): self.last_failure_exception = exception def on_success(**kwargs): self.num_success += 1 - events.request_failure += on_failure - events.request_success += on_success + self.environment.events.request_failure.add_listener(on_failure) + self.environment.events.request_success.add_listener(on_success) def test_catch_response(self): self.assertEqual(500, self.locust.client.get("/fail").status_code) @@ -364,7 +354,7 @@ class MyLocust(FastHttpLocust): host = "http://127.0.0.1:%i" % self.port task_set = MyTaskSet - l = MyLocust() + l = MyLocust(self.environment) ts = MyTaskSet(l) self.assertRaises(InterruptTaskSet, lambda: ts.interrupted_task()) self.assertEqual(0, self.num_failures) @@ -373,7 +363,7 @@ class MyLocust(FastHttpLocust): def test_catch_response_connection_error_success(self): class MyLocust(FastHttpLocust): host = "http://127.0.0.1:1" - l = MyLocust() + l = MyLocust(self.environment) with l.client.get("/", catch_response=True) as r: self.assertEqual(r.status_code, 0) self.assertEqual(None, r.content) @@ -384,7 +374,7 @@ class MyLocust(FastHttpLocust): def test_catch_response_connection_error_fail(self): class MyLocust(FastHttpLocust): host = "http://127.0.0.1:1" - l = MyLocust() + l = MyLocust(self.environment) with l.client.get("/", catch_response=True) as r: self.assertEqual(r.status_code, 0) self.assertEqual(None, r.content) diff --git a/locust/test/test_locust_class.py b/locust/test/test_locust_class.py index da72ca177c..0a3d378b4f 100644 --- a/locust/test/test_locust_class.py +++ b/locust/test/test_locust_class.py @@ -1,5 +1,6 @@ from locust import InterruptTaskSet, ResponseError -from locust.core import HttpLocust, Locust, TaskSet, events, task +from locust.core import HttpLocust, Locust, TaskSet, task +from locust.env import Environment from locust.exception import (CatchResponseError, LocustError, RescheduleTask, RescheduleTaskImmediately) @@ -13,7 +14,8 @@ def setUp(self): class User(Locust): host = "127.0.0.1" - self.locust = User() + self.environment = Environment() + self.locust = User(self.environment) def test_task_ratio(self): t1 = lambda l: None @@ -256,8 +258,8 @@ class MyLocust2(Locust): host = "http://127.0.0.1" task_set = MyTaskSet2 - l = MyLocust() - l2 = MyLocust2() + l = MyLocust(Environment()) + l2 = MyLocust2(Environment()) self.assertRaises(LocustError, lambda: l.run()) self.assertRaises(LocustError, lambda: l2.run()) @@ -289,7 +291,7 @@ class MyLocust(Locust): host = "" task_set = SubTaskSet - l = MyLocust() + l = MyLocust(Environment()) task_set = SubTaskSet(l) self.assertRaises(RescheduleTaskImmediately, lambda: task_set.run(reschedule=True)) self.assertRaises(RescheduleTask, lambda: task_set.run(reschedule=False)) @@ -317,7 +319,7 @@ class MyLocust(Locust): host = "" task_set = RootTaskSet - l = MyLocust() + l = MyLocust(Environment()) l.run() self.assertTrue(isinstance(parents["sub"], RootTaskSet)) self.assertTrue(isinstance(parents["subsub"], SubTaskSet)) @@ -329,8 +331,8 @@ class User(Locust): setup_run_count = 0 def setup(self): User.setup_run_count += 1 - User() - User() + User(self.environment) + User(self.environment) self.assertEqual(1, User.setup_run_count) @@ -343,7 +345,7 @@ class MyLocust(HttpLocust): tasks = [t1] host = "http://127.0.0.1:%i" % self.port - my_locust = MyLocust() + my_locust = MyLocust(self.environment) t1(my_locust) self.assertEqual(self.response.text, "This is an ultra fast response") @@ -351,28 +353,28 @@ def test_client_request_headers(self): class MyLocust(HttpLocust): host = "http://127.0.0.1:%i" % self.port - locust = MyLocust() + locust = MyLocust(self.environment) self.assertEqual("hello", locust.client.get("/request_header_test", headers={"X-Header-Test":"hello"}).text) def test_client_get(self): class MyLocust(HttpLocust): host = "http://127.0.0.1:%i" % self.port - locust = MyLocust() + locust = MyLocust(self.environment) self.assertEqual("GET", locust.client.get("/request_method").text) def test_client_get_absolute_url(self): class MyLocust(HttpLocust): host = "http://127.0.0.1:%i" % self.port - locust = MyLocust() + locust = MyLocust(self.environment) self.assertEqual("GET", locust.client.get("http://127.0.0.1:%i/request_method" % self.port).text) def test_client_post(self): class MyLocust(HttpLocust): host = "http://127.0.0.1:%i" % self.port - locust = MyLocust() + locust = MyLocust(self.environment) self.assertEqual("POST", locust.client.post("/request_method", {"arg":"hello world"}).text) self.assertEqual("hello world", locust.client.post("/post", {"arg":"hello world"}).text) @@ -380,7 +382,7 @@ def test_client_put(self): class MyLocust(HttpLocust): host = "http://127.0.0.1:%i" % self.port - locust = MyLocust() + locust = MyLocust(self.environment) self.assertEqual("PUT", locust.client.put("/request_method", {"arg":"hello world"}).text) self.assertEqual("hello world", locust.client.put("/put", {"arg":"hello world"}).text) @@ -388,7 +390,7 @@ def test_client_delete(self): class MyLocust(HttpLocust): host = "http://127.0.0.1:%i" % self.port - locust = MyLocust() + locust = MyLocust(self.environment) self.assertEqual("DELETE", locust.client.delete("/request_method").text) self.assertEqual(200, locust.client.delete("/request_method").status_code) @@ -396,7 +398,7 @@ def test_client_head(self): class MyLocust(HttpLocust): host = "http://127.0.0.1:%i" % self.port - locust = MyLocust() + locust = MyLocust(self.environment) self.assertEqual(200, locust.client.head("/request_method").status_code) def test_client_basic_auth(self): @@ -409,9 +411,9 @@ class MyAuthorizedLocust(HttpLocust): class MyUnauthorizedLocust(HttpLocust): host = "http://locust:wrong@127.0.0.1:%i" % self.port - locust = MyLocust() - unauthorized = MyUnauthorizedLocust() - authorized = MyAuthorizedLocust() + locust = MyLocust(self.environment) + unauthorized = MyUnauthorizedLocust(self.environment) + authorized = MyAuthorizedLocust(self.environment) response = authorized.client.get("/basic_auth") self.assertEqual(200, response.status_code) self.assertEqual("Authorized", response.text) @@ -419,22 +421,19 @@ class MyUnauthorizedLocust(HttpLocust): self.assertEqual(401, unauthorized.client.get("/basic_auth").status_code) def test_log_request_name_argument(self): - from locust.stats import global_stats - self.response = "" - class MyLocust(HttpLocust): tasks = [] host = "http://127.0.0.1:%i" % self.port @task() def t1(l): - self.response = l.client.get("/ultra_fast", name="new name!") + l.client.get("/ultra_fast", name="new name!") - my_locust = MyLocust() + my_locust = MyLocust(self.environment) my_locust.t1() - self.assertEqual(1, global_stats.get("new name!", "GET").num_requests) - self.assertEqual(0, global_stats.get("/ultra_fast", "GET").num_requests) + self.assertEqual(1, self.runner.stats.get("new name!", "GET").num_requests) + self.assertEqual(0, self.runner.stats.get("/ultra_fast", "GET").num_requests) def test_locust_client_error(self): class MyTaskSet(TaskSet): @@ -447,7 +446,7 @@ class MyLocust(Locust): host = "http://127.0.0.1:%i" % self.port task_set = MyTaskSet - my_locust = MyLocust() + my_locust = MyLocust(self.environment) self.assertRaises(LocustError, lambda: my_locust.client.get("/")) my_taskset = MyTaskSet(my_locust) self.assertRaises(LocustError, lambda: my_taskset.client.get("/")) @@ -456,13 +455,12 @@ def test_redirect_url_original_path_as_name(self): class MyLocust(HttpLocust): host = "http://127.0.0.1:%i" % self.port - l = MyLocust() + l = MyLocust(self.environment) l.client.get("/redirect") - from locust.stats import global_stats - self.assertEqual(1, len(global_stats.entries)) - self.assertEqual(1, global_stats.get("/redirect", "GET").num_requests) - self.assertEqual(0, global_stats.get("/ultra_fast", "GET").num_requests) + self.assertEqual(1, len(self.runner.stats.entries)) + self.assertEqual(1, self.runner.stats.get("/redirect", "GET").num_requests) + self.assertEqual(0, self.runner.stats.get("/ultra_fast", "GET").num_requests) class TestCatchResponse(WebserverTestCase): @@ -472,7 +470,7 @@ def setUp(self): class MyLocust(HttpLocust): host = "http://127.0.0.1:%i" % self.port - self.locust = MyLocust() + self.locust = MyLocust(self.environment) self.num_failures = 0 self.num_success = 0 @@ -481,8 +479,8 @@ def on_failure(request_type, name, response_time, response_length, exception): self.last_failure_exception = exception def on_success(**kwargs): self.num_success += 1 - events.request_failure += on_failure - events.request_success += on_success + self.environment.events.request_failure.add_listener(on_failure) + self.environment.events.request_success.add_listener(on_success) def test_catch_response(self): self.assertEqual(500, self.locust.client.get("/fail").status_code) @@ -538,7 +536,7 @@ class MyLocust(HttpLocust): host = "http://127.0.0.1:%i" % self.port task_set = MyTaskSet - l = MyLocust() + l = MyLocust(self.environment) ts = MyTaskSet(l) self.assertRaises(InterruptTaskSet, lambda: ts.interrupted_task()) self.assertEqual(0, self.num_failures) @@ -547,7 +545,7 @@ class MyLocust(HttpLocust): def test_catch_response_connection_error_success(self): class MyLocust(HttpLocust): host = "http://127.0.0.1:1" - l = MyLocust() + l = MyLocust(self.environment) with l.client.get("/", catch_response=True) as r: self.assertEqual(r.status_code, 0) self.assertEqual(None, r.content) @@ -558,7 +556,7 @@ class MyLocust(HttpLocust): def test_catch_response_connection_error_fail(self): class MyLocust(HttpLocust): host = "http://127.0.0.1:1" - l = MyLocust() + l = MyLocust(self.environment) with l.client.get("/", catch_response=True) as r: self.assertEqual(r.status_code, 0) self.assertEqual(None, r.content) diff --git a/locust/test/test_main.py b/locust/test/test_main.py index 2d8b5b9227..234fd59af7 100644 --- a/locust/test/test_main.py +++ b/locust/test/test_main.py @@ -1,10 +1,14 @@ +import os + from locust import main +from locust.argument_parser import parse_options +from locust.main import create_environment from locust.core import HttpLocust, Locust, TaskSet - from .testcases import LocustTestCase -import os +from .mock_locustfile import mock_locustfile -class TestTaskSet(LocustTestCase): + +class TestLoadLocustfile(LocustTestCase): def test_is_locust(self): self.assertFalse(main.is_locust(("Locust", Locust))) self.assertFalse(main.is_locust(("HttpLocust", HttpLocust))) @@ -27,95 +31,39 @@ class ThriftLocust(Locust): pass self.assertFalse(main.is_locust(("ThriftLocust", ThriftLocust))) - - -class TestLoadLocustfile(LocustTestCase): - mock_docstring = 'This is a mock locust file for unit testing.' - mock_locust_file_content = """\"\"\"{}\"\"\" - -from locust import HttpLocust, TaskSet, task, between - - -def index(l): - l.client.get("/") - -def stats(l): - l.client.get("/stats/requests") - - -class UserTasks(TaskSet): - # one can specify tasks like this - tasks = [index, stats] - - -class LocustSubclass(HttpLocust): - host = "http://127.0.0.1:8089" - wait_time = between(2, 5) - task_set = UserTasks - - -class NotLocustSubclass(): - host = "http://localhost:8000" - - """.format(mock_docstring) - directory = os.path.dirname(os.path.abspath(__file__)) - filename = 'mock_locust_file' - - def __create_mock_locust_file(self, filename): - # Creates a mock locust file for testing - self.filename = filename - self.file_path = os.path.join(self.directory, self.filename) - with open(self.file_path, 'w') as file: - file.write(self.mock_locust_file_content) - - def setUp(self): - super(TestLoadLocustfile, self).setUp() - - def tearDown(self): - os.remove(self.file_path) - super(TestLoadLocustfile, self).tearDown() - + def test_load_locust_file_from_absolute_path(self): - self.__create_mock_locust_file('mock_locust_file.py') - docstring, locusts = main.load_locustfile(self.file_path) + with mock_locustfile() as mocked: + docstring, locusts = main.load_locustfile(mocked.file_path) + self.assertIn('LocustSubclass', locusts) + self.assertNotIn('NotLocustSubclass', locusts) def test_load_locust_file_from_relative_path(self): - self.__create_mock_locust_file('mock_locust_file.py') - docstring, locusts = main.load_locustfile(os.path.join('./locust/test/', self.filename)) + with mock_locustfile() as mocked: + docstring, locusts = main.load_locustfile(os.path.join('./locust/test/', mocked.filename)) def test_load_locust_file_with_a_dot_in_filename(self): - self.__create_mock_locust_file('mock_locust_file.py') - docstring, locusts = main.load_locustfile(self.file_path) - - def test_load_locust_file_with_multiple_dots_in_filename(self): - self.__create_mock_locust_file('mock_locust_file.test.py') - docstring, locusts = main.load_locustfile(self.file_path) + with mock_locustfile(filename_prefix="mocked.locust.file") as mocked: + docstring, locusts = main.load_locustfile(mocked.file_path) def test_return_docstring_and_locusts(self): - self.__create_mock_locust_file('mock_locust_file.py') - docstring, locusts = main.load_locustfile(self.file_path) - self.assertEqual(docstring, self.mock_docstring) - self.assertIn('LocustSubclass', locusts) - self.assertNotIn('NotLocustSubclass', locusts) - - -class TestParseOptions(LocustTestCase): - def test_parse_options(self): - parser, options = main.parse_options(args=[ - "-f", "locustfile.py", - "-c", "100", - "-r", "10", - "-t", "5m", + with mock_locustfile() as mocked: + docstring, locusts = main.load_locustfile(mocked.file_path) + self.assertEqual("This is a mock locust file for unit testing", docstring) + self.assertIn('LocustSubclass', locusts) + self.assertNotIn('NotLocustSubclass', locusts) + + def test_create_environment(self): + options = parse_options(args=[ + "--host", "https://custom-host", "--reset-stats", - "--stop-timeout", "5", - "MyLocustClass", ]) - self.assertEqual("locustfile.py", options.locustfile) - self.assertEqual(100, options.num_clients) - self.assertEqual(10, options.hatch_rate) - self.assertEqual("5m", options.run_time) - self.assertTrue(options.reset_stats) - self.assertEqual(5, options.stop_timeout) - self.assertEqual(["MyLocustClass"], options.locust_classes) - # check default arg - self.assertEqual(8089, options.port) + env = create_environment(options) + self.assertEqual("https://custom-host", env.host) + self.assertTrue(env.reset_stats) + + options = parse_options(args=[]) + env = create_environment(options) + self.assertEqual(None, env.host) + self.assertFalse(env.reset_stats) + diff --git a/locust/test/test_old_wait_api.py b/locust/test/test_old_wait_api.py index 4910869752..57a6e2386d 100644 --- a/locust/test/test_old_wait_api.py +++ b/locust/test/test_old_wait_api.py @@ -1,7 +1,7 @@ import warnings from locust import InterruptTaskSet, ResponseError -from locust.core import HttpLocust, Locust, TaskSet, events, task +from locust.core import HttpLocust, Locust, TaskSet, task from locust.exception import (CatchResponseError, LocustError, RescheduleTask, RescheduleTaskImmediately) from locust.wait_time import between, constant @@ -19,7 +19,7 @@ class User(Locust): wait_function = lambda self: 5000 class MyTaskSet(TaskSet): pass - taskset = MyTaskSet(User()) + taskset = MyTaskSet(User(self.environment)) self.assertEqual(5, taskset.wait_time()) self.assertEqual(1, len(w)) self.assertTrue(issubclass(w[0].category, DeprecationWarning)) @@ -31,7 +31,7 @@ class User(Locust): pass class MyTaskSet(TaskSet): wait_function = lambda self: 5000 - taskset = MyTaskSet(User()) + taskset = MyTaskSet(User(self.environment)) self.assertEqual(5, taskset.wait_time()) self.assertEqual(1, len(w)) self.assertTrue(issubclass(w[0].category, DeprecationWarning)) @@ -46,7 +46,7 @@ class TS(TaskSet): @task def t(self): pass - taskset = TS(User()) + taskset = TS(User(self.environment)) self.assertEqual(1, taskset.wait_time()) self.assertEqual(1, len(w)) self.assertTrue(issubclass(w[0].category, DeprecationWarning)) @@ -63,7 +63,7 @@ class TS(TaskSet): @task def t(self): pass - taskset = TS(User()) + taskset = TS(User(self.environment)) self.assertEqual(0, taskset.wait_time()) self.assertEqual(1, len(w)) self.assertTrue(issubclass(w[0].category, DeprecationWarning)) @@ -80,7 +80,7 @@ class TS(TaskSet): @task def t(self): pass - taskset = TS(User()) + taskset = TS(User(self.environment)) self.assertEqual(3, taskset.wait_time()) self.assertEqual(1, len(w)) self.assertTrue(issubclass(w[0].category, DeprecationWarning)) @@ -97,7 +97,7 @@ class TS(TaskSet): @task def t(self): pass - taskset = TS(User()) + taskset = TS(User(self.environment)) self.assertEqual(3, taskset.wait_time()) self.assertEqual(1, len(w)) self.assertTrue(issubclass(w[0].category, DeprecationWarning)) diff --git a/locust/test/test_parser.py b/locust/test/test_parser.py index 2c7732db28..f55a7a3893 100644 --- a/locust/test/test_parser.py +++ b/locust/test/test_parser.py @@ -1,13 +1,18 @@ import unittest import os import tempfile +import mock +from io import StringIO -from locust.main import parse_options +import locust +from locust.argument_parser import parse_options, get_parser, parse_locustfile_option +from .mock_locustfile import mock_locustfile +from .testcases import LocustTestCase class TestParser(unittest.TestCase): def setUp(self): - self.parser, _ = parse_options(default_config_files=[]) + self.parser = get_parser(default_config_files=[]) def test_default(self): opts = self.parser.parse_args([]) @@ -40,9 +45,106 @@ def test_parameter_parsing(self): os.environ['LOCUST_LOCUSTFILE'] = "locustfile_from_env" file.write("host host_from_config\nweb-host webhost_from_config") file.flush() - parser, _ = parse_options(default_config_files=[file.name]) + parser = get_parser(default_config_files=[file.name]) options = parser.parse_args(['-H','host_from_args']) del os.environ['LOCUST_LOCUSTFILE'] self.assertEqual(options.web_host, 'webhost_from_config') self.assertEqual(options.locustfile, 'locustfile_from_env') self.assertEqual(options.host, 'host_from_args') # overridden + + +class TestArgumentParser(LocustTestCase): + def test_parse_options(self): + options = parse_options(args=[ + "-f", "locustfile.py", + "-c", "100", + "-r", "10", + "-t", "5m", + "--reset-stats", + "--stop-timeout", "5", + "MyLocustClass", + ]) + self.assertEqual("locustfile.py", options.locustfile) + self.assertEqual(100, options.num_clients) + self.assertEqual(10, options.hatch_rate) + self.assertEqual("5m", options.run_time) + self.assertTrue(options.reset_stats) + self.assertEqual(5, options.stop_timeout) + self.assertEqual(["MyLocustClass"], options.locust_classes) + # check default arg + self.assertEqual(8089, options.web_port) + + def test_parse_locustfile(self): + with mock_locustfile() as mocked: + locustfile = parse_locustfile_option(args=[ + "-f", mocked.file_path, + "-c", "100", + "-r", "10", + "-t", "5m", + "--reset-stats", + "--stop-timeout", "5", + "MyLocustClass", + ]) + self.assertEqual(mocked.file_path, locustfile) + locustfile = parse_locustfile_option(args=[ + "-f", mocked.file_path, + ]) + self.assertEqual(mocked.file_path, locustfile) + + def test_unknown_command_line_arg(self): + with self.assertRaises(SystemExit): + with mock.patch("sys.stderr", new=StringIO()): + parse_options(args=[ + "-f", "something.py", + "-c", "100", + "-r", "10", + "-t", "5m", + "--reset-stats", + "--stop-timeout", "5", + "--unknown-flag", + "MyLocustClass", + ]) + + def test_custom_argument(self): + @locust.events.init_command_line_parser.add_listener + def _(parser, **kw): + parser.add_argument( + '--custom-bool-arg', + action='store_true', + help="Custom boolean flag" + ) + parser.add_argument( + '--custom-string-arg', + help="Custom string arg", + ) + + options = parse_options(args=[ + "-c", "666", + "--custom-bool-arg", + "--custom-string-arg", "HEJ", + ]) + self.assertEqual(666, options.num_clients) + self.assertEqual("HEJ", options.custom_string_arg) + self.assertTrue(options.custom_bool_arg) + + def test_custom_argument_help_message(self): + @locust.events.init_command_line_parser.add_listener + def _(parser, **kw): + parser.add_argument( + '--custom-bool-arg', + action='store_true', + help="Custom boolean flag" + ) + parser.add_argument( + '--custom-string-arg', + help="Custom string arg", + ) + out = StringIO() + with mock.patch("sys.stdout", new=out): + with self.assertRaises(SystemExit): + parse_options(args=["--help"]) + + out.seek(0) + stdout = out.read() + self.assertIn("Custom boolean flag", stdout) + self.assertIn("Custom string arg", stdout) diff --git a/locust/test/test_runners.py b/locust/test/test_runners.py index 97b0792c2a..a8efd53115 100644 --- a/locust/test/test_runners.py +++ b/locust/test/test_runners.py @@ -5,13 +5,15 @@ from gevent.queue import Queue import mock -from locust import events, runners +from locust import runners +from locust.main import create_environment from locust.core import Locust, TaskSet, task +from locust.env import Environment from locust.exception import LocustError from locust.rpc import Message from locust.runners import LocustRunner, LocalLocustRunner, MasterLocustRunner, SlaveNode, \ SlaveLocustRunner, STATE_INIT, STATE_HATCHING, STATE_RUNNING, STATE_MISSING -from locust.stats import global_stats, RequestStats +from locust.stats import RequestStats from locust.test.testcases import LocustTestCase from locust.wait_time import between, constant @@ -95,9 +97,12 @@ class task_set(TaskSet): def cpu_task(self): for i in range(1000000): _ = 3 / 2 - runner = LocalLocustRunner([CpuLocust], mocked_options()) + environment = Environment( + options=mocked_options(), + ) + runner = LocalLocustRunner(environment, [CpuLocust]) self.assertFalse(runner.cpu_warning_emitted) - runner.spawn_locusts(1, wait=False) + runner.spawn_locusts(1, 1, wait=False) sleep(2.5) runner.quit() self.assertTrue(runner.cpu_warning_emitted) @@ -115,7 +120,7 @@ class L2(BaseLocust): class L3(BaseLocust): weight = 100 - runner = LocustRunner([L1, L2, L3], mocked_options()) + runner = LocustRunner(Environment(options=mocked_options()), locust_classes=[L1, L2, L3]) self.assert_locust_class_distribution({L1:10, L2:9, L3:10}, runner.weight_locusts(29)) self.assert_locust_class_distribution({L1:10, L2:10, L3:10}, runner.weight_locusts(30)) self.assert_locust_class_distribution({L1:11, L2:10, L3:10}, runner.weight_locusts(31)) @@ -130,7 +135,7 @@ class L2(BaseLocust): class L3(BaseLocust): weight = 100 - runner = LocustRunner([L1, L2, L3], mocked_options()) + runner = LocustRunner(Environment(options=mocked_options()), locust_classes=[L1, L2, L3]) self.assertEqual(1, len(runner.weight_locusts(1))) self.assert_locust_class_distribution({L1:1}, runner.weight_locusts(1)) @@ -142,8 +147,8 @@ class task_set(TaskSet): @task def trigger(self): triggered[0] = True - runner = LocustRunner([BaseLocust], mocked_options()) - runner.spawn_locusts(2, wait=False) + runner = LocustRunner(Environment(options=mocked_options()), locust_classes=[BaseLocust]) + runner.spawn_locusts(2, hatch_rate=2, wait=False) self.assertEqual(2, len(runner.locusts)) g1 = list(runner.locusts)[0] g2 = list(runner.locusts)[1] @@ -167,12 +172,14 @@ class task_set(TaskSet): def my_task(self): User.task_run_count += 1 + environment = Environment(options=mocked_options()) + def on_locust_error(*args, **kwargs): User.locust_error_count += 1 - events.locust_error += on_locust_error + environment.events.locust_error.add_listener(on_locust_error) - runner = LocalLocustRunner([User], mocked_options()) - runner.start_hatching(locust_count=3, hatch_rate=3, wait=False) + runner = LocalLocustRunner(environment, locust_classes=[User]) + runner.start(locust_count=3, hatch_rate=3, wait=False) runner.hatching_greenlet.get(timeout=3) self.assertEqual(1, User.setup_run_count) @@ -193,12 +200,14 @@ def setup(self): def my_task(self): User.task_run_count += 1 + environment = Environment(options=mocked_options()) + def on_locust_error(*args, **kwargs): User.locust_error_count += 1 - events.locust_error += on_locust_error + environment.events.locust_error.add_listener(on_locust_error) - runner = LocalLocustRunner([User], mocked_options()) - runner.start_hatching(locust_count=3, hatch_rate=3, wait=False) + runner = LocalLocustRunner(environment, locust_classes=[User]) + runner.start(locust_count=3, hatch_rate=3, wait=False) runner.hatching_greenlet.get(timeout=3) self.assertEqual(1, User.setup_run_count) @@ -206,39 +215,87 @@ def on_locust_error(*args, **kwargs): self.assertEqual(3, User.task_run_count) def test_change_user_count_during_hatching(self): - class User(Locust): - wait_time = constant(1) - class task_set(TaskSet): - @task - def my_task(self): - pass - - runner = LocalLocustRunner([User], mocked_options()) - runner.start_hatching(locust_count=10, hatch_rate=5, wait=False) - sleep(0.6) - runner.start_hatching(locust_count=5, hatch_rate=5, wait=False) - runner.hatching_greenlet.join() - self.assertEqual(5, len(runner.locusts)) - runner.quit() + class User(Locust): + wait_time = constant(1) + class task_set(TaskSet): + @task + def my_task(self): + pass + + environment = Environment(options=mocked_options()) + runner = LocalLocustRunner(environment, [User]) + runner.start(locust_count=10, hatch_rate=5, wait=False) + sleep(0.6) + runner.start(locust_count=5, hatch_rate=5, wait=False) + runner.hatching_greenlet.join() + self.assertEqual(5, len(runner.locusts)) + runner.quit() + + def test_reset_stats(self): + class User(Locust): + wait_time = constant(0) + class task_set(TaskSet): + @task + def my_task(self): + self.locust.environment.events.request_success.fire( + request_type="GET", + name="/test", + response_time=666, + response_length=1337, + ) + sleep(2) + + environment = Environment(reset_stats=True, options=mocked_options()) + runner = LocalLocustRunner(environment, locust_classes=[User]) + runner.start(locust_count=6, hatch_rate=12, wait=False) + sleep(0.25) + self.assertGreaterEqual(runner.stats.get("/test", "GET").num_requests, 3) + sleep(0.3) + self.assertLessEqual(runner.stats.get("/test", "GET").num_requests, 1) + runner.quit() + + def test_no_reset_stats(self): + class User(Locust): + wait_time = constant(0) + class task_set(TaskSet): + @task + def my_task(self): + self.locust.environment.events.request_success.fire( + request_type="GET", + name="/test", + response_time=666, + response_length=1337, + ) + sleep(2) + + environment = Environment(reset_stats=False, options=mocked_options()) + runner = LocalLocustRunner(environment, locust_classes=[User]) + runner.start(locust_count=6, hatch_rate=12, wait=False) + sleep(0.25) + self.assertGreaterEqual(runner.stats.get("/test", "GET").num_requests, 3) + sleep(0.3) + self.assertEqual(6, runner.stats.get("/test", "GET").num_requests) + runner.quit() class TestMasterRunner(LocustTestCase): def setUp(self): super(TestMasterRunner, self).setUp() - global_stats.reset_all() - self._slave_report_event_handlers = [h for h in events.slave_report._handlers] - self.options = mocked_options() + #self._slave_report_event_handlers = [h for h in events.slave_report._handlers] + self.environment.options = mocked_options() + class MyTestLocust(Locust): + pass def tearDown(self): - events.slave_report._handlers = self._slave_report_event_handlers + #events.slave_report._handlers = self._slave_report_event_handlers super(TestMasterRunner, self).tearDown() + def get_runner(self): + return MasterLocustRunner(self.environment, [], master_bind_host="*", master_bind_port=5557) + def test_slave_connect(self): - class MyTestLocust(Locust): - pass - with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: - master = MasterLocustRunner(MyTestLocust, self.options) + master = self.get_runner() server.mocked_send(Message("client_ready", None, "zeh_fake_client1")) self.assertEqual(1, len(master.clients)) self.assertTrue("zeh_fake_client1" in master.clients, "Could not find fake client in master instance's clients dict") @@ -251,11 +308,8 @@ class MyTestLocust(Locust): self.assertEqual(3, len(master.clients)) def test_slave_stats_report_median(self): - class MyTestLocust(Locust): - pass - with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: - master = MasterLocustRunner(MyTestLocust, self.options) + master = self.get_runner() server.mocked_send(Message("client_ready", None, "fake_client")) master.stats.get("/", "GET").log(100, 23455) @@ -263,7 +317,7 @@ class MyTestLocust(Locust): master.stats.get("/", "GET").log(700, 23455) data = {"user_count":1} - events.report_to_master.fire(client_id="fake_client", data=data) + self.environment.events.report_to_master.fire(client_id="fake_client", data=data) master.stats.clear_all() server.mocked_send(Message("stats", data, "fake_client")) @@ -271,11 +325,8 @@ class MyTestLocust(Locust): self.assertEqual(700, s.median_response_time) def test_slave_stats_report_with_none_response_times(self): - class MyTestLocust(Locust): - pass - with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: - master = MasterLocustRunner(MyTestLocust, self.options) + master = self.get_runner() server.mocked_send(Message("client_ready", None, "fake_client")) master.stats.get("/mixed", "GET").log(0, 23455) @@ -288,7 +339,7 @@ class MyTestLocust(Locust): master.stats.get("/onlyNone", "GET").log(None, 23455) data = {"user_count":1} - events.report_to_master.fire(client_id="fake_client", data=data) + self.environment.events.report_to_master.fire(client_id="fake_client", data=data) master.stats.clear_all() server.mocked_send(Message("stats", data, "fake_client")) @@ -300,22 +351,16 @@ class MyTestLocust(Locust): self.assertEqual(0, s2.avg_response_time) def test_master_marks_downed_slaves_as_missing(self): - class MyTestLocust(Locust): - pass - with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: - master = MasterLocustRunner(MyTestLocust, self.options) + master = self.get_runner() server.mocked_send(Message("client_ready", None, "fake_client")) sleep(6) # print(master.clients['fake_client'].__dict__) assert master.clients['fake_client'].state == STATE_MISSING def test_master_total_stats(self): - class MyTestLocust(Locust): - pass - with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: - master = MasterLocustRunner(MyTestLocust, self.options) + master = self.get_runner() server.mocked_send(Message("client_ready", None, "fake_client")) stats = RequestStats() stats.log_request("GET", "/1", 100, 3546) @@ -337,11 +382,8 @@ class MyTestLocust(Locust): self.assertEqual(700, master.stats.total.median_response_time) def test_master_total_stats_with_none_response_times(self): - class MyTestLocust(Locust): - pass - with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: - master = MasterLocustRunner(MyTestLocust, self.options) + master = self.get_runner() server.mocked_send(Message("client_ready", None, "fake_client")) stats = RequestStats() stats.log_request("GET", "/1", 100, 3546) @@ -373,15 +415,12 @@ class MyTestLocust(Locust): self.assertEqual(700, master.stats.total.median_response_time) def test_master_current_response_times(self): - class MyTestLocust(Locust): - pass - start_time = 1 with mock.patch("time.time") as mocked_time: mocked_time.return_value = start_time - global_stats.reset_all() + self.runner.stats.reset_all() with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: - master = MasterLocustRunner(MyTestLocust, self.options) + master = self.get_runner() mocked_time.return_value += 1.0234 server.mocked_send(Message("client_ready", None, "fake_client")) stats = RequestStats() @@ -422,16 +461,13 @@ class MyTestLocust(Locust): self.assertEqual(3000, master.stats.total.get_current_response_time_percentile(0.95)) def test_rebalance_locust_users_on_slave_connect(self): - class MyTestLocust(Locust): - pass - with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: - master = MasterLocustRunner(MyTestLocust, self.options) + master = self.get_runner() server.mocked_send(Message("client_ready", None, "zeh_fake_client1")) self.assertEqual(1, len(master.clients)) self.assertTrue("zeh_fake_client1" in master.clients, "Could not find fake client in master instance's clients dict") - master.start_hatching(100, 20) + master.start(100, 20) self.assertEqual(1, len(server.outbox)) client_id, msg = server.outbox.pop() self.assertEqual(100, msg.data["num_clients"]) @@ -450,18 +486,15 @@ class MyTestLocust(Locust): def test_sends_hatch_data_to_ready_running_hatching_slaves(self): '''Sends hatch job to running, ready, or hatching slaves''' - class MyTestLocust(Locust): - pass - with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: - master = MasterLocustRunner(MyTestLocust, self.options) + master = self.get_runner() master.clients[1] = SlaveNode(1) master.clients[2] = SlaveNode(2) master.clients[3] = SlaveNode(3) master.clients[1].state = STATE_INIT master.clients[2].state = STATE_HATCHING master.clients[3].state = STATE_RUNNING - master.start_hatching(5,5) + master.start(locust_count=5,hatch_rate=5) self.assertEqual(3, len(server.outbox)) @@ -475,13 +508,14 @@ class MyTestLocust(Locust): task_set = MyTaskSet wait_time = constant(0.1) - runner = LocalLocustRunner([MyTestLocust], self.options) + environment = Environment(options=mocked_options()) + runner = LocalLocustRunner(environment, [MyTestLocust]) timeout = gevent.Timeout(2.0) timeout.start() try: - runner.start_hatching(0, 1, wait=True) + runner.start(0, 1, wait=True) runner.hatching_greenlet.join() except gevent.Timeout: self.fail("Got Timeout exception. A locust seems to have been spawned, even though 0 was specified.") @@ -493,15 +527,12 @@ def test_spawn_uneven_locusts(self): Tests that we can accurately spawn a certain number of locusts, even if it's not an even number of the connected slaves """ - class MyTestLocust(Locust): - pass - with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: - master = MasterLocustRunner(MyTestLocust, self.options) + master = self.get_runner() for i in range(5): server.mocked_send(Message("client_ready", None, "fake_client%i" % i)) - master.start_hatching(7, 7) + master.start(7, 7) self.assertEqual(5, len(server.outbox)) num_clients = 0 @@ -511,15 +542,12 @@ class MyTestLocust(Locust): self.assertEqual(7, num_clients, "Total number of locusts that would have been spawned is not 7") def test_spawn_fewer_locusts_than_slaves(self): - class MyTestLocust(Locust): - pass - with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: - master = MasterLocustRunner(MyTestLocust, self.options) + master = self.get_runner() for i in range(5): server.mocked_send(Message("client_ready", None, "fake_client%i" % i)) - master.start_hatching(2, 2) + master.start(2, 2) self.assertEqual(5, len(server.outbox)) num_clients = 0 @@ -529,19 +557,16 @@ class MyTestLocust(Locust): self.assertEqual(2, num_clients, "Total number of locusts that would have been spawned is not 2") def test_spawn_locusts_in_stepload_mode(self): - class MyTestLocust(Locust): - pass - with mock.patch("locust.rpc.rpc.Server", mocked_rpc()) as server: - master = MasterLocustRunner(MyTestLocust, self.options) + master = self.get_runner() for i in range(5): server.mocked_send(Message("client_ready", None, "fake_client%i" % i)) - # start a new swarming in Step Load mode: total locust count of 10, hatch rate of 2, step locust count of 5, step duration of 5s - master.start_stepload(10, 2, 5, 5) + # start a new swarming in Step Load mode: total locust count of 10, hatch rate of 2, step locust count of 5, step duration of 2s + master.start_stepload(10, 2, 5, 2) # make sure the first step run is started - sleep(1) + sleep(0.5) self.assertEqual(5, len(server.outbox)) num_clients = 0 @@ -552,7 +577,7 @@ class MyTestLocust(Locust): self.assertEqual(5, num_clients, "Total number of locusts that would have been spawned for first step is not 5") # make sure the first step run is complete - sleep(5) + sleep(2) num_clients = 0 idx = end_of_last_step while idx < len(server.outbox): @@ -571,9 +596,9 @@ class task_set(TaskSet): def will_error(self): raise HeyAnException(":(") - runner = LocalLocustRunner([MyLocust], self.options) + runner = LocalLocustRunner(self.environment, [MyLocust]) - l = MyLocust() + l = MyLocust(self.environment) l._catch_exceptions = False self.assertRaises(HeyAnException, l.run) @@ -610,8 +635,8 @@ class MyLocust(Locust): wait_time = constant(0.01) task_set = MyTaskSet - runner = LocalLocustRunner([MyLocust], self.options) - l = MyLocust() + runner = LocalLocustRunner(self.environment, [MyLocust]) + l = MyLocust(self.environment) l.task_set._task_queue = [l.task_set.will_error, l.task_set.will_stop] self.assertRaises(LocustError, l.run) # make sure HeyAnException isn't raised @@ -631,13 +656,17 @@ class MyLocust(Locust): class TestSlaveLocustRunner(LocustTestCase): def setUp(self): super(TestSlaveLocustRunner, self).setUp() - global_stats.reset_all() - self._report_to_master_event_handlers = [h for h in events.report_to_master._handlers] + #self._report_to_master_event_handlers = [h for h in events.report_to_master._handlers] def tearDown(self): - events.report_to_master._handlers = self._report_to_master_event_handlers + #events.report_to_master._handlers = self._report_to_master_event_handlers super(TestSlaveLocustRunner, self).tearDown() + def get_runner(self, environment=None, locust_classes=[]): + if environment is None: + environment = self.environment + return SlaveLocustRunner(environment, locust_classes, master_host="localhost", master_port=5557) + def test_slave_stop_timeout(self): class MyTestLocust(Locust): _test_state = 0 @@ -650,8 +679,8 @@ def the_task(self): MyTestLocust._test_state = 2 with mock.patch("locust.rpc.rpc.Client", mocked_rpc()) as client: - options = mocked_options() - slave = SlaveLocustRunner([MyTestLocust], options) + environment = Environment(options=mocked_options()) + slave = self.get_runner(environment=environment, locust_classes=[MyTestLocust]) self.assertEqual(1, len(client.outbox)) self.assertEqual("client_ready", client.outbox[0].type) client.mocked_send(Message("hatch", { @@ -688,7 +717,8 @@ def the_task(self): with mock.patch("locust.rpc.rpc.Client", mocked_rpc()) as client: options = mocked_options() options.stop_timeout = None - slave = SlaveLocustRunner([MyTestLocust], options) + environment = Environment(options=options) + slave = self.get_runner(environment=environment, locust_classes=[MyTestLocust]) self.assertEqual(1, len(client.outbox)) self.assertEqual("client_ready", client.outbox[0].type) client.mocked_send(Message("hatch", { @@ -722,7 +752,8 @@ def my_task(self): with mock.patch("locust.rpc.rpc.Client", mocked_rpc()) as client: options = mocked_options() options.stop_timeout = None - slave = SlaveLocustRunner([User], options) + environment = Environment(options=options) + slave = self.get_runner(environment=environment, locust_classes=[User]) client.mocked_send(Message("hatch", { "hatch_rate": 5, @@ -742,7 +773,6 @@ def my_task(self): slave.hatching_greenlet.join() self.assertEqual(9, len(slave.locusts)) slave.quit() - class TestMessageSerializing(unittest.TestCase): @@ -753,6 +783,7 @@ def test_message_serialize(self): self.assertEqual(msg.data, rebuilt.data) self.assertEqual(msg.node_id, rebuilt.node_id) + class TestStopTimeout(LocustTestCase): def test_stop_timeout(self): short_time = 0.05 @@ -770,22 +801,23 @@ class MyTestLocust(Locust): wait_time = constant(0) options = mocked_options() - runner = LocalLocustRunner([MyTestLocust], options) - runner.start_hatching(1, 1) + environment = Environment(options=options) + runner = LocalLocustRunner(environment, [MyTestLocust]) + runner.start(1, 1) gevent.sleep(short_time / 2) runner.quit() self.assertEqual("first", MyTaskSet.state) - options.stop_timeout = short_time / 2 # exit with timeout - runner = LocalLocustRunner([MyTestLocust], options) - runner.start_hatching(1, 1) + environment.stop_timeout = short_time / 2 # exit with timeout + runner = LocalLocustRunner(environment, [MyTestLocust]) + runner.start(1, 1) gevent.sleep(short_time) runner.quit() self.assertEqual("second", MyTaskSet.state) - options.stop_timeout = short_time * 3 # allow task iteration to complete, with some margin - runner = LocalLocustRunner([MyTestLocust], options) - runner.start_hatching(1, 1) + environment.stop_timeout = short_time * 3 # allow task iteration to complete, with some margin + runner = LocalLocustRunner(environment, [MyTestLocust]) + runner.start(1, 1) gevent.sleep(short_time) timeout = gevent.Timeout(short_time * 2) timeout.start() @@ -815,10 +847,10 @@ class MyTestLocust(Locust): task_set = MyTaskSet wait_time = constant(0) - options = mocked_options() - options.stop_timeout = short_time - runner = LocalLocustRunner([MyTestLocust], options) - runner.start_hatching(1, 1) + environment = create_environment(mocked_options()) + environment.stop_timeout = short_time + runner = LocalLocustRunner(environment, [MyTestLocust]) + runner.start(1, 1) gevent.sleep(short_time / 2) runner.quit() @@ -838,8 +870,9 @@ class MyTestLocust(Locust): options = mocked_options() options.stop_timeout = short_time - runner = LocalLocustRunner([MyTestLocust], options) - runner.start_hatching(1, 1) + environment = Environment(options=options) + runner = LocalLocustRunner(environment, [MyTestLocust]) + runner.start(1, 1) gevent.sleep(short_time) # sleep to make sure locust has had time to start waiting timeout = gevent.Timeout(short_time) timeout.start() @@ -865,10 +898,10 @@ class MyTaskSet(TaskSet): class MyTestLocust(Locust): task_set = MyTaskSet - options = mocked_options() - options.stop_timeout = short_time - runner = LocalLocustRunner([MyTestLocust], options) - runner.start_hatching(1, 1) + environment = create_environment(mocked_options()) + environment.stop_timeout = short_time + runner = LocalLocustRunner(environment, [MyTestLocust]) + runner.start(1, 1) gevent.sleep(0) timeout = gevent.Timeout(short_time) timeout.start() @@ -895,25 +928,25 @@ class MyTestLocust(Locust): task_set = MyTaskSet wait_time = constant(0) - options = mocked_options() - runner = LocalLocustRunner([MyTestLocust], options) - runner.start_hatching(1, 1) + environment = create_environment(mocked_options()) + runner = LocalLocustRunner(environment, [MyTestLocust]) + runner.start(1, 1) gevent.sleep(short_time / 2) runner.kill_locusts(1) self.assertEqual("first", MyTaskSet.state) runner.quit() - options.stop_timeout = short_time / 2 # exit with timeout - runner = LocalLocustRunner([MyTestLocust], options) - runner.start_hatching(1, 1) + environment.stop_timeout = short_time / 2 # exit with timeout + runner = LocalLocustRunner(environment, [MyTestLocust]) + runner.start(1, 1) gevent.sleep(short_time) runner.kill_locusts(1) self.assertEqual("second", MyTaskSet.state) runner.quit() - options.stop_timeout = short_time * 3 # allow task iteration to complete, with some margin - runner = LocalLocustRunner([MyTestLocust], options) - runner.start_hatching(1, 1) + environment.stop_timeout = short_time * 3 # allow task iteration to complete, with some margin + runner = LocalLocustRunner(environment, [MyTestLocust]) + runner.start(1, 1) gevent.sleep(short_time) timeout = gevent.Timeout(short_time * 2) timeout.start() diff --git a/locust/test/test_stats.py b/locust/test/test_stats.py index a2bc93cbaf..ca5899a1b6 100644 --- a/locust/test/test_stats.py +++ b/locust/test/test_stats.py @@ -3,11 +3,13 @@ import re import os +import gevent import locust from locust.core import HttpLocust, TaskSet, task, Locust +from locust.env import Environment from locust.inspectlocust import get_task_ratio_dict from locust.rpc.protocol import Message -from locust.stats import CachedResponseTimes, RequestStats, StatsEntry, diff_response_time_dicts, global_stats +from locust.stats import CachedResponseTimes, RequestStats, StatsEntry, diff_response_time_dicts, stats_writer from locust.test.testcases import LocustTestCase from .testcases import WebserverTestCase @@ -288,6 +290,7 @@ class TestWriteStatCSVs(LocustTestCase): STATS_FAILURES_FILENAME = "{}_failures.csv".format(STATS_BASE_NAME) def setUp(self): + super().setUp() class User(Locust): setup_run_count = 0 task_run_count = 0 @@ -298,7 +301,8 @@ class task_set(TaskSet): @task def my_task(self): User.task_run_count += 1 - locust.runners.locust_runner = locust.runners.LocalLocustRunner([User], mocked_options()) + self.environment = Environment(options=mocked_options()) + locust.runners.locust_runner = locust.runners.LocalLocustRunner(self.environment, [User]) self.remove_file_if_exists(self.STATS_FILENAME) self.remove_file_if_exists(self.STATS_HISTORY_FILENAME) self.remove_file_if_exists(self.STATS_FAILURES_FILENAME) @@ -314,7 +318,15 @@ def remove_file_if_exists(self, filename): os.remove(filename) def test_write_stat_csvs(self): - locust.stats.write_stat_csvs(self.STATS_BASE_NAME) + locust.stats.write_stat_csvs(self.runner.stats, self.STATS_BASE_NAME) + self.assertTrue(os.path.exists(self.STATS_FILENAME)) + self.assertTrue(os.path.exists(self.STATS_HISTORY_FILENAME)) + self.assertTrue(os.path.exists(self.STATS_FAILURES_FILENAME)) + + def test_csv_stats_writer(self): + greenlet = gevent.spawn(stats_writer, self.runner.stats, self.STATS_BASE_NAME) + gevent.sleep(0.2) + gevent.kill(greenlet) self.assertTrue(os.path.exists(self.STATS_FILENAME)) self.assertTrue(os.path.exists(self.STATS_HISTORY_FILENAME)) self.assertTrue(os.path.exists(self.STATS_FAILURES_FILENAME)) @@ -465,65 +477,49 @@ def test_fail_ratio_with_half_failures(self): class TestRequestStatsWithWebserver(WebserverTestCase): - def test_request_stats_content_length(self): + def setUp(self): + super().setUp() class MyLocust(HttpLocust): host = "http://127.0.0.1:%i" % self.port + self.locust = MyLocust(self.environment) - locust = MyLocust() - locust.client.get("/ultra_fast") - self.assertEqual(global_stats.get("/ultra_fast", "GET").avg_content_length, len("This is an ultra fast response")) - locust.client.get("/ultra_fast") - self.assertEqual(global_stats.get("/ultra_fast", "GET").avg_content_length, len("This is an ultra fast response")) + def test_request_stats_content_length(self): + self.locust.client.get("/ultra_fast") + self.assertEqual(self.runner.stats.get("/ultra_fast", "GET").avg_content_length, len("This is an ultra fast response")) + self.locust.client.get("/ultra_fast") + self.assertEqual(self.runner.stats.get("/ultra_fast", "GET").avg_content_length, len("This is an ultra fast response")) def test_request_stats_no_content_length(self): - class MyLocust(HttpLocust): - host = "http://127.0.0.1:%i" % self.port - l = MyLocust() path = "/no_content_length" - r = l.client.get(path) - self.assertEqual(global_stats.get(path, "GET").avg_content_length, len("This response does not have content-length in the header")) + r = self.locust.client.get(path) + self.assertEqual(self.runner.stats.get(path, "GET").avg_content_length, len("This response does not have content-length in the header")) def test_request_stats_no_content_length_streaming(self): - class MyLocust(HttpLocust): - host = "http://127.0.0.1:%i" % self.port - l = MyLocust() path = "/no_content_length" - r = l.client.get(path, stream=True) - self.assertEqual(0, global_stats.get(path, "GET").avg_content_length) + r = self.locust.client.get(path, stream=True) + self.assertEqual(0, self.runner.stats.get(path, "GET").avg_content_length) def test_request_stats_named_endpoint(self): - class MyLocust(HttpLocust): - host = "http://127.0.0.1:%i" % self.port - - locust = MyLocust() - locust.client.get("/ultra_fast", name="my_custom_name") - self.assertEqual(1, global_stats.get("my_custom_name", "GET").num_requests) + self.locust.client.get("/ultra_fast", name="my_custom_name") + self.assertEqual(1, self.runner.stats.get("my_custom_name", "GET").num_requests) def test_request_stats_query_variables(self): - class MyLocust(HttpLocust): - host = "http://127.0.0.1:%i" % self.port - - locust = MyLocust() - locust.client.get("/ultra_fast?query=1") - self.assertEqual(1, global_stats.get("/ultra_fast?query=1", "GET").num_requests) + self.locust.client.get("/ultra_fast?query=1") + self.assertEqual(1, self.runner.stats.get("/ultra_fast?query=1", "GET").num_requests) def test_request_stats_put(self): - class MyLocust(HttpLocust): - host = "http://127.0.0.1:%i" % self.port - - locust = MyLocust() - locust.client.put("/put") - self.assertEqual(1, global_stats.get("/put", "PUT").num_requests) + self.locust.client.put("/put") + self.assertEqual(1, self.runner.stats.get("/put", "PUT").num_requests) def test_request_connection_error(self): class MyLocust(HttpLocust): host = "http://localhost:1" - locust = MyLocust() + locust = MyLocust(self.environment) response = locust.client.get("/", timeout=0.1) self.assertEqual(response.status_code, 0) - self.assertEqual(1, global_stats.get("/", "GET").num_failures) - self.assertEqual(1, global_stats.get("/", "GET").num_requests) + self.assertEqual(1, self.runner.stats.get("/", "GET").num_failures) + self.assertEqual(1, self.runner.stats.get("/", "GET").num_requests) class MyTaskSet(TaskSet): diff --git a/locust/test/test_task_sequence_class.py b/locust/test/test_task_sequence_class.py index af5de0873f..3d848d2b11 100644 --- a/locust/test/test_task_sequence_class.py +++ b/locust/test/test_task_sequence_class.py @@ -1,5 +1,5 @@ from locust import InterruptTaskSet, ResponseError -from locust.core import HttpLocust, Locust, TaskSequence, events, seq_task, task +from locust.core import HttpLocust, Locust, TaskSequence, seq_task, task from locust.exception import (CatchResponseError, LocustError, RescheduleTask, RescheduleTaskImmediately) from locust.wait_time import between, constant @@ -13,7 +13,7 @@ def setUp(self): class User(Locust): host = "127.0.0.1" wait_time = between(0.001, 0.1) - self.locust = User() + self.locust = User(self.environment) def test_task_sequence_with_list(self): def t1(l): diff --git a/locust/test/test_wait_time.py b/locust/test/test_wait_time.py index 359c536dc8..5fa2918d50 100644 --- a/locust/test/test_wait_time.py +++ b/locust/test/test_wait_time.py @@ -1,11 +1,11 @@ import random import time -from locust.core import HttpLocust, Locust, TaskSet, events, task +from locust.core import Locust, TaskSet from locust.exception import MissingWaitTimeError from locust.wait_time import between, constant, constant_pacing -from .testcases import LocustTestCase, WebserverTestCase +from .testcases import LocustTestCase class TestWaitTime(LocustTestCase): @@ -17,7 +17,7 @@ class TaskSet1(TaskSet): class TaskSet2(TaskSet): wait_time = between(20.0, 21.0) - u = User() + u = User(self.environment) ts1 = TaskSet1(u) ts2 = TaskSet2(u) for i in range(100): @@ -37,18 +37,18 @@ class User(Locust): wait_time = constant(13) class TaskSet1(TaskSet): pass - self.assertEqual(13, User().wait_time()) - self.assertEqual(13, TaskSet1(User()).wait_time()) + self.assertEqual(13, User(self.environment).wait_time()) + self.assertEqual(13, TaskSet1(User(self.environment)).wait_time()) def test_constant_zero(self): class User(Locust): wait_time = constant(0) class TaskSet1(TaskSet): pass - self.assertEqual(0, User().wait_time()) - self.assertEqual(0, TaskSet1(User()).wait_time()) + self.assertEqual(0, User(self.environment).wait_time()) + self.assertEqual(0, TaskSet1(User(self.environment)).wait_time()) start_time = time.time() - TaskSet1(User()).wait() + TaskSet1(User(self.environment)).wait() self.assertLess(time.time() - start_time, 0.002) def test_constant_pacing(self): @@ -56,9 +56,9 @@ class User(Locust): wait_time = constant_pacing(0.1) class TS(TaskSet): pass - ts = TS(User()) + ts = TS(User(self.environment)) - ts2 = TS(User()) + ts2 = TS(User(self.environment)) previous_time = time.time() for i in range(7): @@ -75,5 +75,5 @@ class User(Locust): pass class TS(TaskSet): pass - self.assertRaises(MissingWaitTimeError, lambda: TS(User()).wait_time()) + self.assertRaises(MissingWaitTimeError, lambda: TS(User(self.environment)).wait_time()) diff --git a/locust/test/test_web.py b/locust/test/test_web.py index 8791ac7524..e90bb0fbf0 100644 --- a/locust/test/test_web.py +++ b/locust/test/test_web.py @@ -3,44 +3,40 @@ import json import sys import traceback +from io import StringIO import gevent import requests -from gevent import pywsgi -from locust import events, runners, stats, web -from locust.core import Locust -from locust.main import parse_options +from locust import constant +from locust.argument_parser import get_parser +from locust.core import Locust, TaskSet, task from locust.runners import LocustRunner -from io import StringIO +from locust.web import WebUI from .testcases import LocustTestCase -ALTERNATIVE_HOST = 'http://localhost' -SWARM_DATA_WITH_HOST = {'locust_count': 5, 'hatch_rate': 5, 'host': ALTERNATIVE_HOST} -SWARM_DATA_WITH_NO_HOST = {'locust_count': 5, 'hatch_rate': 5} -SWARM_DATA_WITH_STEP_LOAD = {"locust_count":5, "hatch_rate":2, "step_locust_count":2, "step_duration": "2m"} class TestWebUI(LocustTestCase): def setUp(self): super(TestWebUI, self).setUp() - stats.global_stats.clear_all() - parser = parse_options(default_config_files=[])[0] - self.options = parser.parse_args([]) - runners.locust_runner = LocustRunner([], self.options) + parser = get_parser(default_config_files=[]) + self.environment.options = parser.parse_args([]) + self.runner = LocustRunner(self.environment, []) + self.stats = self.runner.stats - web.request_stats.clear_cache() + self.web_ui = WebUI(self.environment, self.runner) + self.web_ui.app.view_functions["request_stats"].clear_cache() - self._web_ui_server = pywsgi.WSGIServer(('127.0.0.1', 0), web.app, log=None) - gevent.spawn(lambda: self._web_ui_server.serve_forever()) - gevent.sleep(0.01) - self.web_port = self._web_ui_server.server_port + gevent.spawn(lambda: self.web_ui.start("127.0.0.1", 0)) + gevent.sleep(0) + self.web_port = self.web_ui.server.server_port def tearDown(self): super(TestWebUI, self).tearDown() - runners.locust_runner = None - self._web_ui_server.stop() + self.web_ui.stop() + self.runner.quit() def test_index(self): self.assertEqual(200, requests.get("http://127.0.0.1:%i/" % self.web_port).status_code) @@ -49,7 +45,7 @@ def test_stats_no_data(self): self.assertEqual(200, requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port).status_code) def test_stats(self): - stats.global_stats.log_request("GET", "/", 120, 5612) + self.stats.log_request("GET", "/", 120, 5612) response = requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port) self.assertEqual(200, response.status_code) @@ -65,25 +61,25 @@ def test_stats(self): self.assertEqual(120, data["stats"][1]["avg_response_time"]) def test_stats_cache(self): - stats.global_stats.log_request("GET", "/test", 120, 5612) + self.stats.log_request("GET", "/test", 120, 5612) response = requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port) self.assertEqual(200, response.status_code) data = json.loads(response.text) self.assertEqual(2, len(data["stats"])) # one entry plus Aggregated # add another entry - stats.global_stats.log_request("GET", "/test2", 120, 5612) + self.stats.log_request("GET", "/test2", 120, 5612) data = json.loads(requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port).text) self.assertEqual(2, len(data["stats"])) # old value should be cached now - web.request_stats.clear_cache() + self.web_ui.app.view_functions["request_stats"].clear_cache() data = json.loads(requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port).text) self.assertEqual(3, len(data["stats"])) # this should no longer be cached def test_stats_rounding(self): - stats.global_stats.log_request("GET", "/test", 1.39764125, 2) - stats.global_stats.log_request("GET", "/test", 999.9764125, 1000) + self.stats.log_request("GET", "/test", 1.39764125, 2) + self.stats.log_request("GET", "/test", 999.9764125, 1000) response = requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port) self.assertEqual(200, response.status_code) @@ -92,22 +88,22 @@ def test_stats_rounding(self): self.assertEqual(1000, data["stats"][0]["max_response_time"]) def test_request_stats_csv(self): - stats.global_stats.log_request("GET", "/test2", 120, 5612) + self.stats.log_request("GET", "/test2", 120, 5612) response = requests.get("http://127.0.0.1:%i/stats/requests/csv" % self.web_port) self.assertEqual(200, response.status_code) def test_request_stats_history_csv(self): - stats.global_stats.log_request("GET", "/test2", 120, 5612) + self.stats.log_request("GET", "/test2", 120, 5612) response = requests.get("http://127.0.0.1:%i/stats/stats_history/csv" % self.web_port) self.assertEqual(200, response.status_code) def test_failure_stats_csv(self): - stats.global_stats.log_error("GET", "/", Exception("Error1337")) + self.stats.log_error("GET", "/", Exception("Error1337")) response = requests.get("http://127.0.0.1:%i/stats/failures/csv" % self.web_port) self.assertEqual(200, response.status_code) def test_request_stats_with_errors(self): - stats.global_stats.log_error("GET", "/", Exception("Error1337")) + self.stats.log_error("GET", "/", Exception("Error1337")) response = requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port) self.assertEqual(200, response.status_code) self.assertIn("Error1337", response.text) @@ -117,31 +113,31 @@ def test_reset_stats(self): raise Exception(u"A cool test exception") except Exception as e: tb = sys.exc_info()[2] - runners.locust_runner.log_exception("local", str(e), "".join(traceback.format_tb(tb))) - runners.locust_runner.log_exception("local", str(e), "".join(traceback.format_tb(tb))) + self.runner.log_exception("local", str(e), "".join(traceback.format_tb(tb))) + self.runner.log_exception("local", str(e), "".join(traceback.format_tb(tb))) - stats.global_stats.log_request("GET", "/test", 120, 5612) - stats.global_stats.log_error("GET", "/", Exception("Error1337")) + self.stats.log_request("GET", "/test", 120, 5612) + self.stats.log_error("GET", "/", Exception("Error1337")) response = requests.get("http://127.0.0.1:%i/stats/reset" % self.web_port) self.assertEqual(200, response.status_code) - self.assertEqual({}, stats.global_stats.errors) - self.assertEqual({}, runners.locust_runner.exceptions) + self.assertEqual({}, self.stats.errors) + self.assertEqual({}, self.runner.exceptions) - self.assertEqual(0, stats.global_stats.get("/", "GET").num_requests) - self.assertEqual(0, stats.global_stats.get("/", "GET").num_failures) - self.assertEqual(0, stats.global_stats.get("/test", "GET").num_requests) - self.assertEqual(0, stats.global_stats.get("/test", "GET").num_failures) + self.assertEqual(0, self.stats.get("/", "GET").num_requests) + self.assertEqual(0, self.stats.get("/", "GET").num_failures) + self.assertEqual(0, self.stats.get("/test", "GET").num_requests) + self.assertEqual(0, self.stats.get("/test", "GET").num_failures) def test_exceptions(self): try: raise Exception(u"A cool test exception") except Exception as e: tb = sys.exc_info()[2] - runners.locust_runner.log_exception("local", str(e), "".join(traceback.format_tb(tb))) - runners.locust_runner.log_exception("local", str(e), "".join(traceback.format_tb(tb))) + self.runner.log_exception("local", str(e), "".join(traceback.format_tb(tb))) + self.runner.log_exception("local", str(e), "".join(traceback.format_tb(tb))) response = requests.get("http://127.0.0.1:%i/exceptions" % self.web_port) self.assertEqual(200, response.status_code) @@ -155,8 +151,8 @@ def test_exceptions_csv(self): raise Exception("Test exception") except Exception as e: tb = sys.exc_info()[2] - runners.locust_runner.log_exception("local", str(e), "".join(traceback.format_tb(tb))) - runners.locust_runner.log_exception("local", str(e), "".join(traceback.format_tb(tb))) + self.runner.log_exception("local", str(e), "".join(traceback.format_tb(tb))) + self.runner.log_exception("local", str(e), "".join(traceback.format_tb(tb))) response = requests.get("http://127.0.0.1:%i/exceptions/csv" % self.web_port) self.assertEqual(200, response.status_code) @@ -171,19 +167,41 @@ def test_exceptions_csv(self): self.assertEqual(2, int(rows[1][0]), "Exception count should be 2") def test_swarm_host_value_specified(self): - response = requests.post("http://127.0.0.1:%i/swarm" % self.web_port, data=SWARM_DATA_WITH_HOST) - self.assertEqual(200, response.status_code) - self.assertEqual(runners.locust_runner.host, SWARM_DATA_WITH_HOST['host']) + class MyLocust(Locust): + wait_time = constant(1) + class task_set(TaskSet): + @task(1) + def my_task(self): + pass + self.environment.locust_classes = [MyLocust] + response = requests.post( + "http://127.0.0.1:%i/swarm" % self.web_port, + data={"locust_count": 5, "hatch_rate": 5, "host": "https://localhost"}, + ) + self.assertEqual(200, response.status_code) + self.assertEqual("https://localhost", response.json()["host"]) + self.assertEqual(self.environment.host, "https://localhost") def test_swarm_host_value_not_specified(self): - response = requests.post("http://127.0.0.1:%i/swarm" % self.web_port, data=SWARM_DATA_WITH_NO_HOST) - self.assertEqual(200, response.status_code) - self.assertEqual(runners.locust_runner.host, None) + class MyLocust(Locust): + wait_time = constant(1) + class task_set(TaskSet): + @task(1) + def my_task(self): + pass + self.runner.locust_classes = [MyLocust] + response = requests.post( + "http://127.0.0.1:%i/swarm" % self.web_port, + data={'locust_count': 5, 'hatch_rate': 5}, + ) + self.assertEqual(200, response.status_code) + self.assertEqual(None, response.json()["host"]) + self.assertEqual(self.environment.host, None) def test_host_value_from_locust_class(self): class MyLocust(Locust): host = "http://example.com" - runners.locust_runner = LocustRunner([MyLocust], options=self.options) + self.web_ui.runner.locust_classes = [MyLocust] response = requests.get("http://127.0.0.1:%i/" % self.web_port) self.assertEqual(200, response.status_code) self.assertIn("http://example.com", response.content.decode("utf-8")) @@ -193,8 +211,8 @@ def test_host_value_from_multiple_locust_classes(self): class MyLocust(Locust): host = "http://example.com" class MyLocust2(Locust): - host = "http://example.com" - runners.locust_runner = LocustRunner([MyLocust, MyLocust2], options=self.options) + host = "http://example.com" + self.web_ui.runner.locust_classes = [MyLocust, MyLocust2] response = requests.get("http://127.0.0.1:%i/" % self.web_port) self.assertEqual(200, response.status_code) self.assertIn("http://example.com", response.content.decode("utf-8")) @@ -205,14 +223,24 @@ class MyLocust(Locust): host = None class MyLocust2(Locust): host = "http://example.com" - runners.locust_runner = LocustRunner([MyLocust, MyLocust2], options=self.options) + self.web_ui.runner.locust_classes = [MyLocust, MyLocust2] response = requests.get("http://127.0.0.1:%i/" % self.web_port) self.assertEqual(200, response.status_code) self.assertNotIn("http://example.com", response.content.decode("utf-8")) self.assertIn("setting this will override the host on all Locust classes", response.content.decode("utf-8")) def test_swarm_in_step_load_mode(self): - runners.locust_runner.step_load = True - response = requests.post("http://127.0.0.1:%i/swarm" % self.web_port, SWARM_DATA_WITH_STEP_LOAD) + class MyLocust(Locust): + wait_time = constant(1) + class task_set(TaskSet): + @task(1) + def my_task(self): + pass + self.environment.locust_classes = [MyLocust] + self.environment.step_load = True + response = requests.post( + "http://127.0.0.1:%i/swarm" % self.web_port, + data={"locust_count":5, "hatch_rate":2, "step_locust_count":2, "step_duration": "2m"} + ) self.assertEqual(200, response.status_code) self.assertIn("Step Load Mode", response.text) diff --git a/locust/test/testcases.py b/locust/test/testcases.py index 75c097d955..eac5dca820 100644 --- a/locust/test/testcases.py +++ b/locust/test/testcases.py @@ -12,9 +12,11 @@ from flask import (Flask, Response, make_response, redirect, request, send_file, stream_with_context) -from locust import events +import locust +from locust.event import Events +from locust.env import Environment from locust.log import console_logger -from locust.stats import global_stats +from locust.runners import LocustRunner from locust.test.mock_logging import MockedLoggingHandler @@ -123,12 +125,10 @@ class LocustTestCase(unittest.TestCase): def setUp(self): # Prevent args passed to test runner from being passed to Locust del sys.argv[1:] - - self._event_handlers = {} - for name in dir(events): - event = getattr(events, name) - if isinstance(event, events.EventHook): - self._event_handlers[event] = copy(event._handlers) + + locust.events = Events() + self.environment = Environment(events=locust.events) + self.runner = LocustRunner(self.environment, []) # When running the tests in Python 3 we get warnings about unclosed sockets. # This causes tests that depends on calls to sys.stderr to fail, so we'll @@ -154,9 +154,6 @@ def setUp(self): self.mocked_log = MockedLoggingHandler def tearDown(self): - for event, handlers in self._event_handlers.items(): - event._handlers = handlers - # restore logging class logging.root.removeHandler(self._logger_class) [logging.root.addHandler(h) for h in self._root_log_handlers] @@ -175,7 +172,6 @@ def setUp(self): gevent.spawn(lambda: self._web_server.serve_forever()) gevent.sleep(0.01) self.port = self._web_server.server_port - global_stats.clear_all() def tearDown(self): super(WebserverTestCase, self).tearDown() diff --git a/locust/util/cache.py b/locust/util/cache.py index 02f86baed7..3b18daa581 100644 --- a/locust/util/cache.py +++ b/locust/util/cache.py @@ -1,5 +1,6 @@ from __future__ import absolute_import +import functools from time import time @@ -12,6 +13,7 @@ def memoize(timeout, dynamic_timeout=False): """ cache = {"timeout":timeout} def decorator(func): + @functools.wraps(func) def wrapper(*args, **kwargs): start = time() if (not "time" in cache) or (start - cache["time"] > cache["timeout"]): diff --git a/locust/web.py b/locust/web.py index bee4fbe2d1..631b10c168 100644 --- a/locust/web.py +++ b/locust/web.py @@ -32,182 +32,196 @@ DEFAULT_CACHE_TIME = 2.0 -app = Flask(__name__) -app.debug = True -app.root_path = os.path.dirname(os.path.abspath(__file__)) -@app.route('/') -def index(): - is_distributed = isinstance(runners.locust_runner, MasterLocustRunner) - if is_distributed: - slave_count = runners.locust_runner.slave_count - else: - slave_count = 0 +class WebUI: + server = None + """Refernce to pyqsgi.WSGIServer once it's started""" - override_host_warning = False - if runners.locust_runner.host: - host = runners.locust_runner.host - elif len(runners.locust_runner.locust_classes) > 0: - all_hosts = set([l.host for l in runners.locust_runner.locust_classes]) - if len(all_hosts) == 1: - host = list(all_hosts)[0] - else: - # since we have mulitple Locust classes with different host attributes, we'll - # inform that specifying host will override the host for all Locust classes - override_host_warning = True - host = None - else: - host = None + def __init__(self, environment, runner): + self.environment = environment + self.runner = runner + app = Flask(__name__) + self.app = app + app.debug = True + app.root_path = os.path.dirname(os.path.abspath(__file__)) + + @app.route('/') + def index(): + is_distributed = isinstance(runner, MasterLocustRunner) + if is_distributed: + slave_count = runner.slave_count + else: + slave_count = 0 + + override_host_warning = False + if environment.host: + host = environment.host + elif runner.locust_classes: + all_hosts = set([l.host for l in runner.locust_classes]) + if len(all_hosts) == 1: + host = list(all_hosts)[0] + else: + # since we have mulitple Locust classes with different host attributes, we'll + # inform that specifying host will override the host for all Locust classes + override_host_warning = True + host = None + else: + host = None + + return render_template("index.html", + state=runner.state, + is_distributed=is_distributed, + user_count=runner.user_count, + version=version, + host=host, + override_host_warning=override_host_warning, + slave_count=slave_count, + is_step_load=environment.step_load, + ) + + @app.route('/swarm', methods=["POST"]) + def swarm(): + assert request.method == "POST" + locust_count = int(request.form["locust_count"]) + hatch_rate = float(request.form["hatch_rate"]) + if (request.form.get("host")): + environment.host = str(request.form["host"]) + + if environment.step_load: + step_locust_count = int(request.form["step_locust_count"]) + step_duration = parse_timespan(str(request.form["step_duration"])) + runner.start_stepload(locust_count, hatch_rate, step_locust_count, step_duration) + return jsonify({'success': True, 'message': 'Swarming started in Step Load Mode', 'host': environment.host}) + + runner.start(locust_count, hatch_rate) + return jsonify({'success': True, 'message': 'Swarming started', 'host': environment.host}) + + @app.route('/stop') + def stop(): + runner.stop() + return jsonify({'success':True, 'message': 'Test stopped'}) + + @app.route("/stats/reset") + def reset_stats(): + runner.stats.reset_all() + runner.exceptions = {} + return "ok" + + @app.route("/stats/requests/csv") + def request_stats_csv(): + response = make_response(requests_csv(self.runner.stats)) + file_name = "requests_{0}.csv".format(time()) + disposition = "attachment;filename={0}".format(file_name) + response.headers["Content-type"] = "text/csv" + response.headers["Content-disposition"] = disposition + return response + + @app.route("/stats/stats_history/csv") + def stats_history_stats_csv(): + response = make_response(stats_history_csv(self.runner.stats, False, True)) + file_name = "stats_history_{0}.csv".format(time()) + disposition = "attachment;filename={0}".format(file_name) + response.headers["Content-type"] = "text/csv" + response.headers["Content-disposition"] = disposition + return response + + @app.route("/stats/failures/csv") + def failures_stats_csv(): + response = make_response(failures_csv(self.runner.stats)) + file_name = "failures_{0}.csv".format(time()) + disposition = "attachment;filename={0}".format(file_name) + response.headers["Content-type"] = "text/csv" + response.headers["Content-disposition"] = disposition + return response + + @app.route('/stats/requests') + @memoize(timeout=DEFAULT_CACHE_TIME, dynamic_timeout=True) + def request_stats(): + stats = [] + + for s in chain(sort_stats(self.runner.stats.entries), [runner.stats.total]): + stats.append({ + "method": s.method, + "name": s.name, + "safe_name": escape(s.name, quote=False), + "num_requests": s.num_requests, + "num_failures": s.num_failures, + "avg_response_time": s.avg_response_time, + "min_response_time": 0 if s.min_response_time is None else proper_round(s.min_response_time), + "max_response_time": proper_round(s.max_response_time), + "current_rps": s.current_rps, + "current_fail_per_sec": s.current_fail_per_sec, + "median_response_time": s.median_response_time, + "ninetieth_response_time": s.get_response_time_percentile(0.9), + "avg_content_length": s.avg_content_length, + }) + + errors = [] + for e in runner.errors.values(): + err_dict = e.to_dict() + err_dict["name"] = escape(err_dict["name"]) + err_dict["error"] = escape(err_dict["error"]) + errors.append(err_dict) + + # Truncate the total number of stats and errors displayed since a large number of rows will cause the app + # to render extremely slowly. Aggregate stats should be preserved. + report = {"stats": stats[:500], "errors": errors[:500]} + if len(stats) > 500: + report["stats"] += [stats[-1]] + + if stats: + report["total_rps"] = stats[len(stats)-1]["current_rps"] + report["fail_ratio"] = runner.stats.total.fail_ratio + report["current_response_time_percentile_95"] = runner.stats.total.get_current_response_time_percentile(0.95) + report["current_response_time_percentile_50"] = runner.stats.total.get_current_response_time_percentile(0.5) + + is_distributed = isinstance(runner, MasterLocustRunner) + if is_distributed: + slaves = [] + for slave in runner.clients.values(): + slaves.append({"id":slave.id, "state":slave.state, "user_count": slave.user_count, "cpu_usage":slave.cpu_usage}) + + report["slaves"] = slaves + + report["state"] = runner.state + report["user_count"] = runner.user_count + + return jsonify(report) + + @app.route("/exceptions") + def exceptions(): + return jsonify({ + 'exceptions': [ + { + "count": row["count"], + "msg": row["msg"], + "traceback": row["traceback"], + "nodes" : ", ".join(row["nodes"]) + } for row in runner.exceptions.values() + ] + }) + + @app.route("/exceptions/csv") + def exceptions_csv(): + data = StringIO() + writer = csv.writer(data) + writer.writerow(["Count", "Message", "Traceback", "Nodes"]) + for exc in runner.exceptions.values(): + nodes = ", ".join(exc["nodes"]) + writer.writerow([exc["count"], exc["msg"], exc["traceback"], nodes]) + + data.seek(0) + response = make_response(data.read()) + file_name = "exceptions_{0}.csv".format(time()) + disposition = "attachment;filename={0}".format(file_name) + response.headers["Content-type"] = "text/csv" + response.headers["Content-disposition"] = disposition + return response + + def start(self, host, port): + self.server = pywsgi.WSGIServer((host, port), self.app, log=None) + self.server.serve_forever() - is_step_load = runners.locust_runner.step_load - - return render_template("index.html", - state=runners.locust_runner.state, - is_distributed=is_distributed, - user_count=runners.locust_runner.user_count, - version=version, - host=host, - override_host_warning=override_host_warning, - slave_count=slave_count, - is_step_load=is_step_load - ) - -@app.route('/swarm', methods=["POST"]) -def swarm(): - assert request.method == "POST" - is_step_load = runners.locust_runner.step_load - locust_count = int(request.form["locust_count"]) - hatch_rate = float(request.form["hatch_rate"]) - if (request.form.get("host")): - runners.locust_runner.host = str(request.form["host"]) - - if is_step_load: - step_locust_count = int(request.form["step_locust_count"]) - step_duration = parse_timespan(str(request.form["step_duration"])) - runners.locust_runner.start_stepload(locust_count, hatch_rate, step_locust_count, step_duration) - return jsonify({'success': True, 'message': 'Swarming started in Step Load Mode', 'host': runners.locust_runner.host}) - - runners.locust_runner.start_hatching(locust_count, hatch_rate) - return jsonify({'success': True, 'message': 'Swarming started', 'host': runners.locust_runner.host}) - -@app.route('/stop') -def stop(): - runners.locust_runner.stop() - return jsonify({'success':True, 'message': 'Test stopped'}) - -@app.route("/stats/reset") -def reset_stats(): - runners.locust_runner.stats.reset_all() - runners.locust_runner.exceptions = {} - return "ok" - -@app.route("/stats/requests/csv") -def request_stats_csv(): - response = make_response(requests_csv()) - file_name = "requests_{0}.csv".format(time()) - disposition = "attachment;filename={0}".format(file_name) - response.headers["Content-type"] = "text/csv" - response.headers["Content-disposition"] = disposition - return response - -@app.route("/stats/stats_history/csv") -def stats_history_stats_csv(): - response = make_response(stats_history_csv(False, True)) - file_name = "stats_history_{0}.csv".format(time()) - disposition = "attachment;filename={0}".format(file_name) - response.headers["Content-type"] = "text/csv" - response.headers["Content-disposition"] = disposition - return response - -@app.route("/stats/failures/csv") -def failures_stats_csv(): - response = make_response(failures_csv()) - file_name = "failures_{0}.csv".format(time()) - disposition = "attachment;filename={0}".format(file_name) - response.headers["Content-type"] = "text/csv" - response.headers["Content-disposition"] = disposition - return response - -@app.route('/stats/requests') -@memoize(timeout=DEFAULT_CACHE_TIME, dynamic_timeout=True) -def request_stats(): - stats = [] - - for s in chain(sort_stats(runners.locust_runner.request_stats), [runners.locust_runner.stats.total]): - stats.append({ - "method": s.method, - "name": s.name, - "safe_name": escape(s.name, quote=False), - "num_requests": s.num_requests, - "num_failures": s.num_failures, - "avg_response_time": s.avg_response_time, - "min_response_time": 0 if s.min_response_time is None else proper_round(s.min_response_time), - "max_response_time": proper_round(s.max_response_time), - "current_rps": s.current_rps, - "current_fail_per_sec": s.current_fail_per_sec, - "median_response_time": s.median_response_time, - "ninetieth_response_time": s.get_response_time_percentile(0.9), - "avg_content_length": s.avg_content_length, - }) - - errors = [e.to_dict() for e in runners.locust_runner.errors.values()] - - # Truncate the total number of stats and errors displayed since a large number of rows will cause the app - # to render extremely slowly. Aggregate stats should be preserved. - report = {"stats": stats[:500], "errors": errors[:500]} - if len(stats) > 500: - report["stats"] += [stats[-1]] - - if stats: - report["total_rps"] = stats[len(stats)-1]["current_rps"] - report["fail_ratio"] = runners.locust_runner.stats.total.fail_ratio - report["current_response_time_percentile_95"] = runners.locust_runner.stats.total.get_current_response_time_percentile(0.95) - report["current_response_time_percentile_50"] = runners.locust_runner.stats.total.get_current_response_time_percentile(0.5) - - is_distributed = isinstance(runners.locust_runner, MasterLocustRunner) - if is_distributed: - slaves = [] - for slave in runners.locust_runner.clients.values(): - slaves.append({"id":slave.id, "state":slave.state, "user_count": slave.user_count, "cpu_usage":slave.cpu_usage}) - - report["slaves"] = slaves - - report["state"] = runners.locust_runner.state - report["user_count"] = runners.locust_runner.user_count - - return jsonify(report) - -@app.route("/exceptions") -def exceptions(): - return jsonify({ - 'exceptions': [ - { - "count": row["count"], - "msg": row["msg"], - "traceback": row["traceback"], - "nodes" : ", ".join(row["nodes"]) - } for row in runners.locust_runner.exceptions.values() - ] - }) - -@app.route("/exceptions/csv") -def exceptions_csv(): - data = StringIO() - writer = csv.writer(data) - writer.writerow(["Count", "Message", "Traceback", "Nodes"]) - for exc in runners.locust_runner.exceptions.values(): - nodes = ", ".join(exc["nodes"]) - writer.writerow([exc["count"], exc["msg"], exc["traceback"], nodes]) - - data.seek(0) - response = make_response(data.read()) - file_name = "exceptions_{0}.csv".format(time()) - disposition = "attachment;filename={0}".format(file_name) - response.headers["Content-type"] = "text/csv" - response.headers["Content-disposition"] = disposition - return response - -def start(locust, options): - pywsgi.WSGIServer((options.web_host, options.port), - app, log=None).serve_forever() + def stop(self): + self.server.stop()