Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added a new metric: Client Processing Time #450

Merged
merged 7 commits into from
Feb 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 36 additions & 3 deletions osbenchmark/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,17 +55,29 @@ def request_start(self):

@property
def request_end(self):
return self.ctx["request_end"]
return max((value for value in self.ctx["request_end_list"] if value < self.client_request_end))

@property
def client_request_start(self):
return self.ctx["client_request_start"]

@property
def client_request_end(self):
return self.ctx["client_request_end"]

async def __aexit__(self, exc_type, exc_val, exc_tb):
# propagate earliest request start and most recent request end to parent
client_request_start = self.client_request_start
client_request_end = self.client_request_end
request_start = self.request_start
request_end = self.request_end
self.ctx_holder.restore_context(self.token)
# don't attempt to restore these values on the top-level context as they don't exist
if self.token.old_value != contextvars.Token.MISSING:
self.ctx_holder.update_request_start(request_start)
self.ctx_holder.update_request_end(request_end)
self.ctx_holder.update_client_request_start(client_request_start)
self.ctx_holder.update_client_request_end(client_request_end)
self.token = None
return False

Expand Down Expand Up @@ -93,13 +105,34 @@ def restore_context(cls, token):
def update_request_start(cls, new_request_start):
meta = cls.request_context.get()
# this can happen if multiple requests are sent on the wire for one logical request (e.g. scrolls)
if "request_start" not in meta:
saimedhi marked this conversation as resolved.
Show resolved Hide resolved
if "request_start" not in meta and "client_request_start" in meta:
meta["request_start"] = new_request_start

@classmethod
def update_request_end(cls, new_request_end):
meta = cls.request_context.get()
meta["request_end"] = new_request_end
if "request_end_list" not in meta:
meta["request_end_list"] = []
meta["request_end_list"].append(new_request_end)

@classmethod
def update_client_request_start(cls, new_client_request_start):
meta = cls.request_context.get()
if "client_request_start" not in meta:
meta["client_request_start"] = new_client_request_start

@classmethod
def update_client_request_end(cls, new_client_request_end):
meta = cls.request_context.get()
meta["client_request_end"] = new_client_request_end

@classmethod
def on_client_request_start(cls):
cls.update_client_request_start(time.perf_counter())

@classmethod
def on_client_request_end(cls):
cls.update_client_request_end(time.perf_counter())

@classmethod
def on_request_start(cls):
Expand Down
7 changes: 6 additions & 1 deletion osbenchmark/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -1762,6 +1762,7 @@ def __call__(self):
self.summary_stats("throughput", t, op_type, percentiles_list=self.throughput_percentiles),
self.single_latency(t, op_type),
self.single_latency(t, op_type, metric_name="service_time"),
self.single_latency(t, op_type, metric_name="client_processing_time"),
self.single_latency(t, op_type, metric_name="processing_time"),
error_rate,
duration,
Expand Down Expand Up @@ -2030,6 +2031,8 @@ def op_metrics(op_item, key, single_value=False):
all_results.append(op_metrics(item, "latency"))
if "service_time" in item:
all_results.append(op_metrics(item, "service_time"))
if "client_processing_time" in item:
all_results.append(op_metrics(item, "client_processing_time"))
if "processing_time" in item:
all_results.append(op_metrics(item, "processing_time"))
if "error_rate" in item:
Expand Down Expand Up @@ -2074,13 +2077,15 @@ def op_metrics(op_item, key, single_value=False):
def v(self, d, k, default=None):
return d.get(k, default) if d else default

def add_op_metrics(self, task, operation, throughput, latency, service_time, processing_time, error_rate, duration, meta):
def add_op_metrics(self, task, operation, throughput, latency, service_time, client_processing_time,
processing_time, error_rate, duration, meta):
doc = {
"task": task,
"operation": operation,
"throughput": throughput,
"latency": latency,
"service_time": service_time,
"client_processing_time": client_processing_time,
"processing_time": processing_time,
"error_rate": error_rate,
"duration": duration
Expand Down
Loading
Loading