From 87c446b2ed886ecfed87817842b72ce1be21cbed Mon Sep 17 00:00:00 2001 From: Calen Pennington Date: Tue, 17 Mar 2015 11:12:52 -0400 Subject: [PATCH 1/2] Remove trailing whitespace --- locust/main.py | 46 ++++++------- locust/runners.py | 50 +++++++------- locust/stats.py | 162 ++++++++++++++++++++++++++++++++++------------ locust/web.py | 16 ++--- 4 files changed, 178 insertions(+), 96 deletions(-) diff --git a/locust/main.py b/locust/main.py index 163e255fcc..e8348089b7 100644 --- a/locust/main.py +++ b/locust/main.py @@ -44,7 +44,7 @@ def parse_options(): default="", help="Host to bind the web interface to. Defaults to '' (all interfaces)" ) - + parser.add_option( '-P', '--port', '--web-port', type="int", @@ -52,7 +52,7 @@ def parse_options(): default=8089, help="Port on which to run web host" ) - + parser.add_option( '-f', '--locustfile', dest='locustfile', @@ -77,7 +77,7 @@ def parse_options(): default=False, help="Set locust to run in distributed mode with this process as slave" ) - + # master host options parser.add_option( '--master-host', @@ -87,7 +87,7 @@ def parse_options(): default="127.0.0.1", help="Host or IP address of locust master for distributed load testing. Only used when running with --slave. Defaults to 127.0.0.1." ) - + parser.add_option( '--master-port', action='store', @@ -105,7 +105,7 @@ def parse_options(): default="*", help="Interfaces (hostname, ip) that locust master should bind to. Only used when running with --master. Defaults to * (all available interfaces)." ) - + parser.add_option( '--master-bind-port', action='store', @@ -143,7 +143,7 @@ def parse_options(): default=1, help="The rate per second in which clients are spawned. Only used together with --no-web" ) - + # Number of requests parser.add_option( '-n', '--num-request', @@ -153,7 +153,7 @@ def parse_options(): default=None, help="Number of requests to perform. Only used together with --no-web" ) - + # log level parser.add_option( '--loglevel', '-L', @@ -163,7 +163,7 @@ def parse_options(): default='INFO', help="Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL. Default is INFO.", ) - + # log file parser.add_option( '--logfile', @@ -173,7 +173,7 @@ def parse_options(): default=None, help="Path to log file. If not set, log will go to stdout/stderr", ) - + # if we should print stats in the console parser.add_option( '--print-stats', @@ -191,7 +191,7 @@ def parse_options(): default=False, help='Only print the summary stats' ) - + # List locust commands found in loaded locust files/source files parser.add_option( '-l', '--list', @@ -200,7 +200,7 @@ def parse_options(): default=False, help="Show list of possible locust classes and exit" ) - + # Display ratio table of all tasks parser.add_option( '--show-task-ratio', @@ -217,7 +217,7 @@ def parse_options(): default=False, help="print json data of the locust classes' task execution ratio" ) - + # Version number (optparse gives you --version but we have to do it # ourselves to get -V too. sigh) parser.add_option( @@ -345,7 +345,7 @@ def main(): # setup logging setup_logging(options.loglevel, options.logfile) logger = logging.getLogger(__name__) - + if options.show_version: print "Locust %s" % (version,) sys.exit(0) @@ -381,7 +381,7 @@ def main(): locust_classes = [locusts[n] for n in names] else: locust_classes = locusts.values() - + if options.show_task_ratio: console_logger.info("\n Task ratio per locust class") console_logger.info( "-" * 80) @@ -393,12 +393,12 @@ def main(): if options.show_task_ratio_json: from json import dumps task_data = { - "per_class": get_task_ratio_dict(locust_classes), + "per_class": get_task_ratio_dict(locust_classes), "total": get_task_ratio_dict(locust_classes, total=True) } console_logger.info(dumps(task_data)) sys.exit(0) - + # if --master is set, make sure --no-web isn't set if options.master and options.no_web: logger.error("Locust can not run distributed with the web interface disabled (do not use --no-web and --master together)") @@ -408,7 +408,7 @@ def main(): # spawn web greenlet logger.info("Starting web monitor at %s:%s" % (options.web_host or "*", options.port)) main_greenlet = gevent.spawn(web.start, locust_classes, options) - + if not options.master and not options.slave: runners.locust_runner = LocalLocustRunner(locust_classes, options) # spawn client spawning/hatching greenlet @@ -424,11 +424,11 @@ def main(): except socket.error, e: logger.error("Failed to connect to the Locust master: %s", e) sys.exit(-1) - + if not options.only_summary and (options.print_stats or (options.no_web and not options.slave)): # spawn stats printing greenlet gevent.spawn(stats_printer) - + def shutdown(code=0): """ Shut down locust by firing quitting event, printing stats and exiting @@ -436,18 +436,18 @@ def shutdown(code=0): logger.info("Shutting down (exit code %s), bye." % code) events.quitting.fire() - print_stats(runners.locust_runner.request_stats) - print_percentile_stats(runners.locust_runner.request_stats) + print_stats(runners.locust_runner.stats) + print_percentile_stats(runners.locust_runner.stats) print_error_report() sys.exit(code) - + # install SIGTERM handler def sig_term_handler(): logger.info("Got SIGTERM signal") shutdown(0) gevent.signal(signal.SIGTERM, sig_term_handler) - + try: logger.info("Starting Locust %s" % version) main_greenlet.join() diff --git a/locust/runners.py b/locust/runners.py index 3ec811e384..d1c27e6e20 100644 --- a/locust/runners.py +++ b/locust/runners.py @@ -37,7 +37,7 @@ def __init__(self, locust_classes, options): self.hatching_greenlet = None self.exceptions = {} self.stats = global_stats - + # register listener that resets stats when hatching is complete def on_hatch_complete(user_count): self.state = STATE_RUNNING @@ -48,11 +48,11 @@ def on_hatch_complete(user_count): @property def request_stats(self): return self.stats.entries - + @property def errors(self): return self.stats.errors - + @property def user_count(self): return len(self.locusts) @@ -97,7 +97,7 @@ def spawn_locusts(self, spawn_count=None, stop_timeout=None, wait=False): logger.info("Hatching and swarming %i clients at the rate %g clients/s..." % (spawn_count, self.hatch_rate)) occurence_count = dict([(l.__name__, 0) for l in self.locust_classes]) - + def hatch(): sleep_time = 1.0 / self.hatch_rate while True: @@ -117,7 +117,7 @@ def start_locust(_): if len(self.locusts) % 10 == 0: logger.debug("%i locusts hatched" % len(self.locusts)) gevent.sleep(sleep_time) - + hatch() if wait: self.locusts.join() @@ -208,7 +208,7 @@ def __init__(self, locust_classes, options): self.master_port = options.master_port self.master_bind_host = options.master_bind_host self.master_bind_port = options.master_bind_port - + def noop(self, *args, **kwargs): """ Used to link() greenlets to in order to be compatible with gevent 1.0 """ pass @@ -222,28 +222,28 @@ def __init__(self, id, state=STATE_INIT): class MasterLocustRunner(DistributedLocustRunner): def __init__(self, *args, **kwargs): super(MasterLocustRunner, self).__init__(*args, **kwargs) - + class SlaveNodesDict(dict): def get_by_state(self, state): return [c for c in self.itervalues() if c.state == state] - + @property def ready(self): return self.get_by_state(STATE_INIT) - + @property def hatching(self): return self.get_by_state(STATE_HATCHING) - + @property def running(self): return self.get_by_state(STATE_RUNNING) - + self.clients = SlaveNodesDict() self.server = rpc.Server(self.master_bind_host, self.master_bind_port) self.greenlet = Group() self.greenlet.spawn(self.client_listener).link_exception(callback=self.noop) - + # listener that gathers info on how many locust users the slaves has spawned def on_slave_report(client_id, data): if client_id not in self.clients: @@ -252,16 +252,16 @@ def on_slave_report(client_id, data): self.clients[client_id].user_count = data["user_count"] events.slave_report += on_slave_report - + # register listener that sends quit message to slave nodes def on_quitting(): self.quit() events.quitting += on_quitting - + @property def user_count(self): return sum([c.user_count for c in self.clients.itervalues()]) - + def start_hatching(self, locust_count, hatch_rate): num_slaves = len(self.clients.ready) + len(self.clients.running) if not num_slaves: @@ -280,7 +280,7 @@ def start_hatching(self, locust_count, hatch_rate): self.stats.clear_all() self.exceptions = {} events.master_start_hatching.fire() - + for client in self.clients.itervalues(): data = { "hatch_rate":slave_hatch_rate, @@ -295,7 +295,7 @@ def start_hatching(self, locust_count, hatch_rate): remaining -= 1 self.server.send(Message("hatch", data, None)) - + self.stats.start_time = time() self.state = STATE_HATCHING @@ -303,12 +303,12 @@ def stop(self): for client in self.clients.hatching + self.clients.running: self.server.send(Message("stop", None, None)) events.master_stop_hatching.fire() - + def quit(self): for client in self.clients.itervalues(): self.server.send(Message("quit", None, None)) self.greenlet.kill(block=True) - + def client_listener(self): while True: msg = self.server.recv() @@ -349,24 +349,24 @@ class SlaveLocustRunner(DistributedLocustRunner): def __init__(self, *args, **kwargs): super(SlaveLocustRunner, self).__init__(*args, **kwargs) self.client_id = socket.gethostname() + "_" + md5(str(time() + random.randint(0,10000))).hexdigest() - + self.client = rpc.Client(self.master_host, self.master_port) self.greenlet = Group() self.greenlet.spawn(self.worker).link_exception(callback=self.noop) self.client.send(Message("client_ready", None, self.client_id)) self.greenlet.spawn(self.stats_reporter).link_exception(callback=self.noop) - + # register listener for when all locust users have hatched, and report it to the master node def on_hatch_complete(user_count): self.client.send(Message("hatch_complete", {"count":user_count}, self.client_id)) events.hatch_complete += on_hatch_complete - - # register listener that adds the current number of spawned locusts to the report that is sent to the master node + + # register listener that adds the current number of spawned locusts to the report that is sent to the master node def on_report_to_master(client_id, data): data["user_count"] = self.user_count events.report_to_master += on_report_to_master - + # register listener that sends quit message to master def on_quitting(): self.client.send(Message("quit", None, self.client_id)) @@ -407,5 +407,5 @@ def stats_reporter(self): except: logger.error("Connection lost to master server. Aborting...") break - + gevent.sleep(SLAVE_REPORT_INTERVAL) diff --git a/locust/stats.py b/locust/stats.py index 89f4befec5..2f05839481 100644 --- a/locust/stats.py +++ b/locust/stats.py @@ -33,17 +33,17 @@ def get(self, name, method): entry = StatsEntry(self, name, method, self.precision) self.entries[(name, method)] = entry return entry - + def aggregated_stats(self, name="Total", full_request_history=False): """ - Returns a StatsEntry which is an aggregate of all stats entries + Returns a StatsEntry which is an aggregate of all stats entries within entries. """ total = StatsEntry(self, name, method=None, precision=self.precision) for r in self.entries.itervalues(): total.extend(r, full_request_history=full_request_history) return total - + def reset_all(self): """ Go through all stats entries and reset them to zero @@ -53,7 +53,7 @@ def reset_all(self): self.num_failures = 0 for r in self.entries.itervalues(): r.reset() - + def clear_all(self): """ Remove all stats entries and errors @@ -65,56 +65,138 @@ def clear_all(self): self.max_requests = None self.last_request_timestamp = None self.start_time = None - + + def percentile_column_name(self, percentile): + """ + Return the name of the column for the `percentile` value. + """ + return "{:.0%}".format(percentile) + + def confidence_interval_column_name(self, percentile): + """ + Return a ErrorBar of column names for the (lower, upper) error bar + values for the `percentile` value. + """ + return ErrorBar( + "{:.0%} low error bound".format(percentile), + "{:.0%} high error bound".format(percentile), + ) + + def get_percentile_dataset(self, include_empty=False): + data = tablib.Dataset() + data.headers = ['Method', 'Name', '# reqs'] + + for percentile in PERCENTILES: + data.headers.append(self.percentile_column_name(percentile)) + + if CONFIDENCE_INTERVALS: + data.headers.extend(self.confidence_interval_column_name(percentile)) + + data.headers.append("100%") + + for _, stats in sorted(self.entries.iteritems()): + data.append(stats.percentile(include_empty)) + + total_stats = self.aggregated_stats(full_request_history=True) + if total_stats.response_times: + data.append(total_stats.percentile()) + + return data + + def get_request_stats_dataset(self): + data = tablib.Dataset() + data.headers = [ + "Method", + "Name", + "# requests", + "# failures", + "Median response time", + "Average response time", + "Min response time", + "Max response time", + "Average Content Size", + "Requests/s", + ] + + for _, stats in sorted(self.entries.iteritems()): + data.append(( + stats.method, + stats.name, + stats.num_requests, + stats.num_failures, + stats.median_response_time, + stats.avg_response_time, + stats.min_response_time or 0, + stats.max_response_time, + stats.avg_content_length, + stats.total_rps, + )) + + total = self.aggregated_stats("Total", full_request_history=True) + data.append(( + total.method, + total.name, + total.num_requests, + total.num_failures, + total.median_response_time, + total.avg_response_time, + total.min_response_time or 0, + total.max_response_time, + total.avg_content_length, + total.total_rps, + )) + + return data + class StatsEntry(object): """ Represents a single stats entry (name and method) """ - + name = None """ Name (URL) of this stats entry """ - + method = None """ Method (GET, POST, PUT, etc.) """ - + num_requests = None """ The number of requests made """ - + num_failures = None """ Number of failed request """ - + total_response_time = None """ Total sum of the response times """ - + min_response_time = None """ Minimum response time """ - + max_response_time = None """ Maximum response time """ - + num_reqs_per_sec = None """ A {second => request_count} dict that holds the number of requests made per second """ - + response_times = None """ A {response_time => count} dict that holds the response time distribution of all the requests. - - The keys (the response time in ms) are rounded to store 1, 2, ... 9, 10, 20. .. 90, + + The keys (the response time in ms) are rounded to store 1, 2, ... 9, 10, 20. .. 90, 100, 200 .. 900, 1000, 2000 ... 9000, in order to save memory. The amount of rounding is controlled by the precision argument. This dict is used to calculate the median and percentile response times. """ - + total_content_length = None """ The sum of the content length of all the requests for this entry """ - + start_time = None """ Time of the first request for this entry """ - + last_request_timestamp = None """ Time of the last request for this entry """ @@ -131,7 +213,7 @@ def __init__(self, stats, name, method, precision=2): self.method = method self.precision = precision self.reset() - + def reset(self): self.start_time = time.time() self.num_requests = 0 @@ -143,7 +225,7 @@ def reset(self): self.last_request_timestamp = int(time.time()) self.num_reqs_per_sec = {} self.total_content_length = 0 - + def log(self, response_time, content_length): self.stats.num_requests += 1 self.num_requests += 1 @@ -245,16 +327,16 @@ def avg_content_length(self): return self.total_content_length / self.num_requests except ZeroDivisionError: return 0 - + def extend(self, other, full_request_history=False): """ Extend the data fro the current StatsEntry with the stats from another - StatsEntry instance. - - If full_request_history is False, we'll only care to add the data from - the last 20 seconds of other's stats. The reason for this argument is that - extend can be used to generate an aggregate of multiple different StatsEntry - instances on the fly, in order to get the *total* current RPS, average + StatsEntry instance. + + If full_request_history is False, we'll only care to add the data from + the last 20 seconds of other's stats. The reason for this argument is that + extend can be used to generate an aggregate of multiple different StatsEntry + instances on the fly, in order to get the *total* current RPS, average response time, etc. """ self.last_request_timestamp = max(self.last_request_timestamp, other.last_request_timestamp) @@ -277,7 +359,7 @@ def extend(self, other, full_request_history=False): for i in xrange(other.last_request_timestamp-20, other.last_request_timestamp+1): if i in other.num_reqs_per_sec: self.num_reqs_per_sec[i] = self.num_reqs_per_sec.get(i, 0) + other.num_reqs_per_sec[i] - + def serialize(self): return { "name": self.name, @@ -294,7 +376,7 @@ def serialize(self): "num_reqs_per_sec": self.num_reqs_per_sec, "precision": self.precision, } - + @classmethod def unserialize(cls, data): obj = cls(None, data["name"], data["method"], data["precision"]) @@ -312,7 +394,7 @@ def unserialize(cls, data): ]: setattr(obj, key, data[key]) return obj - + def get_stripped_report(self): """ Return the serialized version of this StatsEntry, and then clear the current stats. @@ -326,7 +408,7 @@ def __str__(self): fail_percent = (self.num_failures/float(self.num_requests + self.num_failures))*100 except ZeroDivisionError: fail_percent = 0 - + return (" %-" + str(STATS_NAME_WIDTH) + "s %7d %12s %7d %7d %7d | %7d %7.2f") % ( self.method + " " + self.name, self.num_requests, @@ -337,12 +419,12 @@ def __str__(self): self.median_response_time or 0, self.current_rps or 0 ) - + def get_response_time_percentile(self, percent): """ Get the response time that a certain number of percent of the requests finished within. - + Percent specified in range: 0.0 - 1.0 """ num_of_request = int((self.num_requests * percent)) @@ -356,7 +438,7 @@ def get_response_time_percentile(self, percent): def percentile(self, tpl=" %-" + str(STATS_NAME_WIDTH) + "s %8d %6d %6d %6d %6d %6d %6d %6d %6d %6d"): if not self.num_requests: raise ValueError("Can't calculate percentile on url with no successful requests") - + return tpl % ( str(self.method) + " " + self.name, self.num_requests, @@ -387,7 +469,7 @@ def occured(self): self.occurences += 1 def to_name(self): - return "%s %s: %r" % (self.method, + return "%s %s: %r" % (self.method, self.name, repr(self.error)) def to_dict(self): @@ -401,9 +483,9 @@ def to_dict(self): @classmethod def from_dict(cls, data): return cls( - data["method"], - data["name"], - data["error"], + data["method"], + data["name"], + data["error"], data["occurences"] ) @@ -495,7 +577,7 @@ def print_percentile_stats(stats): if r.response_times: console_logger.info(r.percentile()) console_logger.info("-" * (80 + STATS_NAME_WIDTH)) - + total_stats = global_stats.aggregated_stats() if total_stats.response_times: console_logger.info(total_stats.percentile()) diff --git a/locust/web.py b/locust/web.py index 7887a01364..ca397c9e50 100644 --- a/locust/web.py +++ b/locust/web.py @@ -34,7 +34,7 @@ def index(): slave_count = runners.locust_runner.slave_count else: slave_count = 0 - + return render_template("index.html", state=runners.locust_runner.state, is_distributed=is_distributed, @@ -65,7 +65,7 @@ def stop(): def reset_stats(): runners.locust_runner.stats.reset_all() return "ok" - + @app.route("/stats/requests/csv") def request_stats_csv(): rows = [ @@ -149,12 +149,12 @@ def request_stats(): "median_response_time": s.median_response_time, "avg_content_length": s.avg_content_length, }) - + report = {"stats":stats, "errors":[e.to_dict() for e in runners.locust_runner.errors.itervalues()]} if stats: report["total_rps"] = stats[len(stats)-1]["current_rps"] report["fail_ratio"] = runners.locust_runner.stats.aggregated_stats("Total").fail_ratio - + # since generating a total response times dict with all response times from all # urls is slow, we make a new total response time dict which will consist of one # entry per url with the median response time as key and the number of requests as @@ -162,14 +162,14 @@ def request_stats(): response_times = defaultdict(int) # used for calculating total median for i in xrange(len(stats)-1): response_times[stats[i]["median_response_time"]] += stats[i]["num_requests"] - + # calculate total median stats[len(stats)-1]["median_response_time"] = median_from_dict(stats[len(stats)-1]["num_requests"], response_times) - + is_distributed = isinstance(runners.locust_runner, MasterLocustRunner) if is_distributed: report["slave_count"] = runners.locust_runner.slave_count - + report["state"] = runners.locust_runner.state report["user_count"] = runners.locust_runner.user_count return json.dumps(report) @@ -188,7 +188,7 @@ def exceptions_csv(): for exc in runners.locust_runner.exceptions.itervalues(): nodes = ", ".join(exc["nodes"]) writer.writerow([exc["count"], exc["msg"], exc["traceback"], nodes]) - + data.seek(0) response = make_response(data.read()) file_name = "exceptions_{0}.csv".format(time()) From 5ec91bd074ef2757769d887a06200703ed476f76 Mon Sep 17 00:00:00 2001 From: Calen Pennington Date: Tue, 17 Mar 2015 11:25:32 -0400 Subject: [PATCH 2/2] Use tablib and tabulate to format the csv and printed tables --- locust/stats.py | 94 +++++++++++++++++-------------------------------- locust/web.py | 54 +++------------------------- setup.py | 2 +- 3 files changed, 37 insertions(+), 113 deletions(-) diff --git a/locust/stats.py b/locust/stats.py index 2f05839481..0a725cc180 100644 --- a/locust/stats.py +++ b/locust/stats.py @@ -2,12 +2,15 @@ import gevent import hashlib import math +import tablib +from tabulate import tabulate import events from exception import StopLocust from log import console_logger STATS_NAME_WIDTH = 60 +PERCENTILES = (0.5, 0.66, 0.75, 0.80, 0.9, 0.95, 0.98, 0.99) class RequestStatsAdditionError(Exception): pass @@ -70,17 +73,7 @@ def percentile_column_name(self, percentile): """ Return the name of the column for the `percentile` value. """ - return "{:.0%}".format(percentile) - - def confidence_interval_column_name(self, percentile): - """ - Return a ErrorBar of column names for the (lower, upper) error bar - values for the `percentile` value. - """ - return ErrorBar( - "{:.0%} low error bound".format(percentile), - "{:.0%} high error bound".format(percentile), - ) + return "{0:.0%}".format(percentile) def get_percentile_dataset(self, include_empty=False): data = tablib.Dataset() @@ -89,17 +82,16 @@ def get_percentile_dataset(self, include_empty=False): for percentile in PERCENTILES: data.headers.append(self.percentile_column_name(percentile)) - if CONFIDENCE_INTERVALS: - data.headers.extend(self.confidence_interval_column_name(percentile)) - data.headers.append("100%") + # Using iteritems() allows us to sort by the key while only using + # the value. for _, stats in sorted(self.entries.iteritems()): data.append(stats.percentile(include_empty)) total_stats = self.aggregated_stats(full_request_history=True) if total_stats.response_times: - data.append(total_stats.percentile()) + data.append(total_stats.percentile(include_empty)) return data @@ -118,6 +110,8 @@ def get_request_stats_dataset(self): "Requests/s", ] + # Using iteritems() allows us to sort by the key while only using + # the value. for _, stats in sorted(self.entries.iteritems()): data.append(( stats.method, @@ -132,7 +126,7 @@ def get_request_stats_dataset(self): stats.total_rps, )) - total = self.aggregated_stats("Total", full_request_history=True) + total = self.aggregated_stats(full_request_history=True) data.append(( total.method, total.name, @@ -435,23 +429,24 @@ def get_response_time_percentile(self, percent): if((self.num_requests - processed_count) <= num_of_request): return response_time - def percentile(self, tpl=" %-" + str(STATS_NAME_WIDTH) + "s %8d %6d %6d %6d %6d %6d %6d %6d %6d %6d"): - if not self.num_requests: + def percentile(self, include_empty=False): + if not self.num_requests and not include_empty: raise ValueError("Can't calculate percentile on url with no successful requests") - return tpl % ( - str(self.method) + " " + self.name, - self.num_requests, - self.get_response_time_percentile(0.5), - self.get_response_time_percentile(0.66), - self.get_response_time_percentile(0.75), - self.get_response_time_percentile(0.80), - self.get_response_time_percentile(0.90), - self.get_response_time_percentile(0.95), - self.get_response_time_percentile(0.98), - self.get_response_time_percentile(0.99), - self.max_response_time - ) + results = [self.method, self.name, self.num_requests] + + if self.num_requests > 0: + for percentile in PERCENTILES: + results.append(self.get_response_time_percentile(percentile)) + + results.append(self.max_response_time) + else: + entry_count = len(PERCENTILES) + 1 + + result.extend(["N/A"] * entry_count) + + return tuple(results) + class StatsError(object): def __init__(self, method, name, error, occurences=0): @@ -547,40 +542,15 @@ def on_slave_report(client_id, data): def print_stats(stats): - console_logger.info((" %-" + str(STATS_NAME_WIDTH) + "s %7s %12s %7s %7s %7s | %7s %7s") % ('Name', '# reqs', '# fails', 'Avg', 'Min', 'Max', 'Median', 'req/s')) - console_logger.info("-" * (80 + STATS_NAME_WIDTH)) - total_rps = 0 - total_reqs = 0 - total_failures = 0 - for key in sorted(stats.iterkeys()): - r = stats[key] - total_rps += r.current_rps - total_reqs += r.num_requests - total_failures += r.num_failures - console_logger.info(r) - console_logger.info("-" * (80 + STATS_NAME_WIDTH)) - - try: - fail_percent = (total_failures/float(total_reqs))*100 - except ZeroDivisionError: - fail_percent = 0 - - console_logger.info((" %-" + str(STATS_NAME_WIDTH) + "s %7d %12s %42.2f") % ('Total', total_reqs, "%d(%.2f%%)" % (total_failures, fail_percent), total_rps)) + data = stats.get_request_stats_dataset() + console_logger.info(tabulate(data.dict, headers="keys")) console_logger.info("") def print_percentile_stats(stats): - console_logger.info("Percentage of the requests completed within given times") - console_logger.info((" %-" + str(STATS_NAME_WIDTH) + "s %8s %6s %6s %6s %6s %6s %6s %6s %6s %6s") % ('Name', '# reqs', '50%', '66%', '75%', '80%', '90%', '95%', '98%', '99%', '100%')) - console_logger.info("-" * (80 + STATS_NAME_WIDTH)) - for key in sorted(stats.iterkeys()): - r = stats[key] - if r.response_times: - console_logger.info(r.percentile()) - console_logger.info("-" * (80 + STATS_NAME_WIDTH)) + data = stats.get_percentile_dataset() - total_stats = global_stats.aggregated_stats() - if total_stats.response_times: - console_logger.info(total_stats.percentile()) + console_logger.info("Percentage of the requests completed within given times") + console_logger.info(tabulate(data.dict, headers="keys")) console_logger.info("") def print_error_report(): @@ -597,5 +567,5 @@ def print_error_report(): def stats_printer(): from runners import locust_runner while True: - print_stats(locust_runner.request_stats) + print_stats(locust_runner.stats) gevent.sleep(2) diff --git a/locust/web.py b/locust/web.py index ca397c9e50..0aa28cc6d9 100644 --- a/locust/web.py +++ b/locust/web.py @@ -68,36 +68,8 @@ def reset_stats(): @app.route("/stats/requests/csv") def request_stats_csv(): - rows = [ - ",".join([ - '"Method"', - '"Name"', - '"# requests"', - '"# failures"', - '"Median response time"', - '"Average response time"', - '"Min response time"', - '"Max response time"', - '"Average Content Size"', - '"Requests/s"', - ]) - ] - - for s in chain(_sort_stats(runners.locust_runner.request_stats), [runners.locust_runner.stats.aggregated_stats("Total", full_request_history=True)]): - rows.append('"%s","%s",%i,%i,%i,%i,%i,%i,%i,%.2f' % ( - s.method, - s.name, - s.num_requests, - s.num_failures, - s.median_response_time, - s.avg_response_time, - s.min_response_time or 0, - s.max_response_time, - s.avg_content_length, - s.total_rps, - )) - - response = make_response("\n".join(rows)) + data = runners.locust_runner.stats.get_request_stats_dataset() + response = make_response(data.csv) file_name = "requests_{0}.csv".format(time()) disposition = "attachment;filename={0}".format(file_name) response.headers["Content-type"] = "text/csv" @@ -106,26 +78,8 @@ def request_stats_csv(): @app.route("/stats/distribution/csv") def distribution_stats_csv(): - rows = [",".join(( - '"Name"', - '"# requests"', - '"50%"', - '"66%"', - '"75%"', - '"80%"', - '"90%"', - '"95%"', - '"98%"', - '"99%"', - '"100%"', - ))] - for s in chain(_sort_stats(runners.locust_runner.request_stats), [runners.locust_runner.stats.aggregated_stats("Total", full_request_history=True)]): - if s.num_requests: - rows.append(s.percentile(tpl='"%s",%i,%i,%i,%i,%i,%i,%i,%i,%i,%i')) - else: - rows.append('"%s",0,"N/A","N/A","N/A","N/A","N/A","N/A","N/A","N/A","N/A"' % s.name) - - response = make_response("\n".join(rows)) + data = runners.locust_runner.stats.get_percentile_dataset(include_empty=True) + response = make_response(data.csv) file_name = "distribution_{0}.csv".format(time()) disposition = "attachment;filename={0}".format(file_name) response.headers["Content-type"] = "text/csv" diff --git a/setup.py b/setup.py index 2b466dbcf5..39b8cb198a 100644 --- a/setup.py +++ b/setup.py @@ -44,7 +44,7 @@ def run(self): packages=find_packages(exclude=['ez_setup', 'examples', 'tests']), include_package_data=True, zip_safe=False, - install_requires=["gevent>=1.0.1", "flask>=0.10.1", "requests>=2.4.1", "msgpack-python>=0.4.2"], + install_requires=["gevent>=1.0.1", "flask>=0.10.1", "requests>=2.4.1", "msgpack-python>=0.4.2", "tablib", "tabulate"], tests_require=['unittest2', 'mock', 'pyzmq'], entry_points={ 'console_scripts': [