Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactoring stats to handle custom percentiles #1477

Merged
merged 16 commits into from
Aug 13, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 35 additions & 0 deletions docs/configuration.rst
Original file line number Diff line number Diff line change
Expand Up @@ -76,3 +76,38 @@ All available configuration options
Here's a table of all the available configuration options, and their corresponding Environment and config file keys:

.. include:: config-options.rst

Customization of statistics settings
------------------------------------

Default configuration for Locust statistics is set in constants of stats.py file.
It can be tuned to specific requirements by overriding these values.
To do this, import locust.stats module and override required settings
For example:

.. code-block:: python

import locust.stats
locust.stats.CONSOLE_STATS_INTERVAL_SEC = 15

It can be done directly in Locust file or extracted to separate file for common usage by all Locust files.

The list of statistics parameters that can be modified is:

+-------------------------------------------+--------------------------------------------------------------------------------------+
| Parameter name | Purpose |
+-------------------------------------------+--------------------------------------------------------------------------------------+
| STATS_NAME_WIDTH | Width of column for request name in console output |
+-------------------------------------------+--------------------------------------------------------------------------------------+
| STATS_TYPE_WIDTH | Width of column for request type in console output |
+-------------------------------------------+--------------------------------------------------------------------------------------+
| CSV_STATS_INTERVAL_SEC | Interval for how frequently the CSV file is written if this option is configured |
+-------------------------------------------+--------------------------------------------------------------------------------------+
| CONSOLE_STATS_INTERVAL_SEC | Interval for how frequently results are written to console |
+-------------------------------------------+--------------------------------------------------------------------------------------+
| CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW | Window size/resolution - in seconds - when calculating the current response |
| | time percentile |
+-------------------------------------------+--------------------------------------------------------------------------------------+
| PERCENTILES_TO_REPORT | The list of response time percentiles to be calculated & reported |
+-------------------------------------------+--------------------------------------------------------------------------------------+

121 changes: 43 additions & 78 deletions locust/stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,16 @@ class RequestStatsAdditionError(Exception):
pass


def get_readable_percentiles(percentile_list):
"""
Converts a list of percentiles from 0-1 fraction to 0%-100% view for using in console & csv reporting
:param percentile_list: The list of percentiles in range 0-1
:return: The list of string representation for each percentile in 0%-100% view
"""
return [f"{int(percentile * 100) if (percentile * 100).is_integer() else round(100 * percentile, 6)}%"
for percentile in percentile_list]


def calculate_response_time_percentile(response_times, num_requests, percent):
"""
Get the response time that a certain number of percent of the requests
Expand Down Expand Up @@ -565,27 +575,15 @@ def get_current_response_time_percentile(self, percent):
percent,
)

def percentile(self, tpl=" %-" + str(STATS_TYPE_WIDTH) + "s %-" + str(STATS_NAME_WIDTH) + "s %8d %6d %6d %6d %6d %6d %6d %6d %6d %6d %6d %6d"):
def percentile(self):
if not self.num_requests:
raise ValueError("Can't calculate percentile on url with no successful requests")

return tpl % (
self.method,
self.name,
self.num_requests,
self.get_response_time_percentile(0.5),
self.get_response_time_percentile(0.66),
self.get_response_time_percentile(0.75),
self.get_response_time_percentile(0.80),
self.get_response_time_percentile(0.90),
self.get_response_time_percentile(0.95),
self.get_response_time_percentile(0.98),
self.get_response_time_percentile(0.99),
self.get_response_time_percentile(0.999),
self.get_response_time_percentile(0.9999),
self.get_response_time_percentile(1.00)
)


tpl = f" %-{str(STATS_TYPE_WIDTH)}s %-{str(STATS_NAME_WIDTH)}s %8d {' '.join(['%7d'] * len(PERCENTILES_TO_REPORT))}"

return tpl % ((self.method, self.name, self.num_requests)
+ tuple([self.get_response_time_percentile(p) for p in PERCENTILES_TO_REPORT]))

def _cache_response_times(self, t):
self.response_times_cache[t] = CachedResponseTimes(
response_times=copy(self.response_times),
Expand Down Expand Up @@ -711,33 +709,22 @@ def print_stats(stats, current=True):

def print_percentile_stats(stats):
console_logger.info("Percentage of the requests completed within given times")
console_logger.info((" %-" + str(STATS_TYPE_WIDTH) + "s %-" + str(STATS_NAME_WIDTH) + "s %8s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s") % (
'Type',
'Name',
'# reqs',
'50%',
'66%',
'75%',
'80%',
'90%',
'95%',
'98%',
'99%',
'99.9%',
'99.99%',
'100%',
))
console_logger.info("-" * (90 + STATS_NAME_WIDTH))
headers = ('Type', 'Name', '# reqs') + tuple(get_readable_percentiles(PERCENTILES_TO_REPORT))
console_logger.info((f" %-{str(STATS_TYPE_WIDTH)}s %-{str(STATS_NAME_WIDTH)}s %8s "
f"{' '.join(['%7s'] * len(PERCENTILES_TO_REPORT))}") % headers)
separator = f'{"-" * STATS_TYPE_WIDTH}|{"-" * STATS_NAME_WIDTH}|{"-" * 9}|{("-" * 7 + "|") * len(PERCENTILES_TO_REPORT)}'
console_logger.info(separator)
for key in sorted(stats.entries.keys()):
r = stats.entries[key]
if r.response_times:
console_logger.info(r.percentile())
console_logger.info("-" * (90 + STATS_NAME_WIDTH))
console_logger.info(separator)

if stats.total.response_times:
console_logger.info(stats.total.percentile())
console_logger.info("")


def print_error_report(stats):
if not len(stats.errors):
return
Expand Down Expand Up @@ -785,7 +772,7 @@ def sort_stats(stats):

def requests_csv(stats, csv_writer):
"""Returns the contents of the 'requests' & 'distribution' tab as CSV."""
csv_writer.writerow([
headers = [
"Type",
"Name",
"Request Count",
Expand All @@ -796,20 +783,9 @@ def requests_csv(stats, csv_writer):
"Max Response Time",
"Average Content Size",
"Requests/s",
"Failures/s",
"50%",
"66%",
"75%",
"80%",
"90%",
"95%",
"98%",
"99%",
"99.9%",
"99.99%",
"99.999%",
"100%",
])
"Failures/s"]
headers.extend(get_readable_percentiles(PERCENTILES_TO_REPORT))
csv_writer.writerow(headers)

for s in chain(sort_stats(stats.entries), [stats.total]):
if s.num_requests:
Expand All @@ -836,32 +812,21 @@ def requests_csv(stats, csv_writer):
def stats_history_csv_header():
"""Headers for the stats history CSV"""

return ','.join((
'"Timestamp"',
'"User Count"',
'"Type"',
'"Name"',
'"Requests/s"',
'"Failures/s"',
'"50%"',
'"66%"',
'"75%"',
'"80%"',
'"90%"',
'"95%"',
'"98%"',
'"99%"',
'"99.9%"',
'"99.99%"',
'"99.999%"',
'"100%"',
'"Total Request Count"',
'"Total Failure Count"',
'"Total Median Response Time"',
'"Total Average Response Time"',
'"Total Min Response Time"',
'"Total Max Response Time"',
'"Total Average Content Size"',
return ",".join((
"Timestamp",
"User Count",
"Type",
"Name",
"Requests/s",
"Failures/s",
",".join(get_readable_percentiles(PERCENTILES_TO_REPORT)),
"Total Request Count",
"Total Failure Count",
"Total Median Response Time",
"Total Average Response Time",
"Total Min Response Time",
"Total Max Response Time",
"Total Average Content Size",
)) + '\n'

def stats_history_csv(environment, all_entries=False):
Expand Down
22 changes: 17 additions & 5 deletions locust/test/test_stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from locust import HttpUser, TaskSet, task, User, constant
from locust.env import Environment
from locust.rpc.protocol import Message
from locust.stats import CachedResponseTimes, RequestStats, StatsEntry, diff_response_time_dicts, stats_writer
from locust.stats import CachedResponseTimes, RequestStats, StatsEntry, diff_response_time_dicts, stats_writer, PERCENTILES_TO_REPORT
from locust.test.testcases import LocustTestCase
from locust.user.inspectuser import get_task_ratio_dict

Expand All @@ -21,11 +21,15 @@

class TestRequestStats(unittest.TestCase):
def setUp(self):
locust.stats.PERCENTILES_TO_REPORT = PERCENTILES_TO_REPORT
self.stats = RequestStats()

def log(response_time, size):
self.stats.log_request("GET", "test_entry", response_time, size)

def log_error(exc):
self.stats.log_error("GET", "test_entry", exc)

log(45, 1)
log(135, 1)
log(44, 1)
Expand Down Expand Up @@ -221,14 +225,22 @@ def test_aggregation_last_request_timestamp(self):
def test_percentile_rounded_down(self):
s1 = StatsEntry(self.stats, "rounding down!", "GET")
s1.log(122, 0) # (rounded 120) min
actual_percentile = s1.percentile()
self.assertEqual(actual_percentile, " GET rounding down! 1 120 120 120 120 120 120 120 120 120 120 120")
actual_percentile = s1.percentile().split()
self.assertEqual(actual_percentile, ['GET', 'rounding', 'down!', '1'] + ['120'] * len(PERCENTILES_TO_REPORT))

def test_percentile_rounded_up(self):
s2 = StatsEntry(self.stats, "rounding up!", "GET")
s2.log(127, 0) # (rounded 130) min
actual_percentile = s2.percentile()
self.assertEqual(actual_percentile, " GET rounding up! 1 130 130 130 130 130 130 130 130 130 130 130")
actual_percentile = s2.percentile().split()
self.assertEqual(actual_percentile, ['GET', 'rounding', 'up!', '1'] + ['130'] * len(PERCENTILES_TO_REPORT))

def test_custom_percentile_list(self):
s = StatsEntry(self.stats, "custom_percentiles", "GET")
custom_percentile_list = [0.50, 0.90, 0.95, 0.99]
locust.stats.PERCENTILES_TO_REPORT = custom_percentile_list
s.log(150, 0)
actual_percentile = s.percentile().split()
self.assertEqual(actual_percentile, ['GET', 'custom_percentiles', '1'] + ['150'] * len(custom_percentile_list))

def test_error_grouping(self):
# reset stats
Expand Down