Skip to content

Commit

Permalink
[APM] Replace manual rate calculation with rate agg (elastic#115651)
Browse files Browse the repository at this point in the history
* adding rate agg

* fixing build

* roolback changes

* updating snaps

* fixing test

Co-authored-by: Kibana Machine <[email protected]>
  • Loading branch information
cauemarcondes and kibanamachine committed Nov 5, 2021
1 parent 416182a commit 4366432
Show file tree
Hide file tree
Showing 4 changed files with 17 additions and 22 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ import {
const INITIAL_STATE = {
currentPeriod: [],
previousPeriod: [],
throughputUnit: 'minute' as const,
};

export function ServiceOverviewThroughputChart({
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ import { ProcessorEvent } from '../../../common/processor_event';
import { Setup } from '../helpers/setup_request';
import { getOffsetInMs } from '../../../common/utils/get_offset_in_ms';
import { getBucketSize } from '../helpers/get_bucket_size';
import { calculateThroughputWithInterval } from '../helpers/calculate_throughput';

export async function getThroughputChartsForBackend({
backendName,
Expand All @@ -42,7 +41,7 @@ export async function getThroughputChartsForBackend({
offset,
});

const { intervalString, bucketSize } = getBucketSize({
const { intervalString } = getBucketSize({
start: startWithOffset,
end: endWithOffset,
minBucketSize: 60,
Expand Down Expand Up @@ -73,9 +72,10 @@ export async function getThroughputChartsForBackend({
extended_bounds: { min: startWithOffset, max: endWithOffset },
},
aggs: {
spanDestinationLatencySum: {
sum: {
throughput: {
rate: {
field: SPAN_DESTINATION_SERVICE_RESPONSE_TIME_COUNT,
unit: 'minute',
},
},
},
Expand All @@ -88,10 +88,7 @@ export async function getThroughputChartsForBackend({
response.aggregations?.timeseries.buckets.map((bucket) => {
return {
x: bucket.key + offsetInMs,
y: calculateThroughputWithInterval({
bucketSize,
value: bucket.spanDestinationLatencySum.value || 0,
}),
y: bucket.throughput.value,
};
}) ?? []
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,7 @@ import {
getDocumentTypeFilterForTransactions,
getProcessorEventForTransactions,
} from '../helpers/transactions';
import {
calculateThroughputWithInterval,
calculateThroughputWithRange,
} from '../helpers/calculate_throughput';
import { calculateThroughputWithRange } from '../helpers/calculate_throughput';

export async function getTransactionsPerMinute({
setup,
Expand Down Expand Up @@ -70,6 +67,9 @@ export async function getTransactionsPerMinute({
fixed_interval: intervalString,
min_doc_count: 0,
},
aggs: {
throughput: { rate: { unit: 'minute' as const } },
},
},
},
},
Expand Down Expand Up @@ -98,10 +98,7 @@ export async function getTransactionsPerMinute({
timeseries:
topTransactionTypeBucket?.timeseries.buckets.map((bucket) => ({
x: bucket.key,
y: calculateThroughputWithInterval({
bucketSize,
value: bucket.doc_count,
}),
y: bucket.throughput.value,
})) || [],
};
}
12 changes: 7 additions & 5 deletions x-pack/plugins/apm/server/lib/services/get_throughput.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
* 2.0.
*/

import { AggregationsDateInterval } from '@elastic/elasticsearch/lib/api/types';
import { ESFilter } from '../../../../../../src/core/types/elasticsearch';
import {
SERVICE_NAME,
Expand All @@ -18,7 +19,6 @@ import {
getProcessorEventForTransactions,
} from '../helpers/transactions';
import { Setup } from '../helpers/setup_request';
import { calculateThroughputWithInterval } from '../helpers/calculate_throughput';

interface Options {
environment: string;
Expand Down Expand Up @@ -81,6 +81,11 @@ export async function getThroughput({
min_doc_count: 0,
extended_bounds: { min: start, max: end },
},
aggs: {
throughput: {
rate: { unit: 'minute' as AggregationsDateInterval },
},
},
},
},
},
Expand All @@ -95,10 +100,7 @@ export async function getThroughput({
response.aggregations?.timeseries.buckets.map((bucket) => {
return {
x: bucket.key,
y: calculateThroughputWithInterval({
bucketSize,
value: bucket.doc_count,
}),
y: bucket.throughput.value,
};
}) ?? []
);
Expand Down

0 comments on commit 4366432

Please sign in to comment.