-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathflexran-post-process
executable file
·152 lines (134 loc) · 5.6 KB
/
flexran-post-process
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
#!/usr/bin/perl
## -*- mode: perl; indent-tabs-mode: nil; perl-indent-level: 4 -*-
## vim: autoindent tabstop=4 shiftwidth=4 expandtab softtabstop=4 filetype=perl
## crucible get metric --source flexran --type {UL, DL} --period <> --begin <> --end <> --breakout type
## Debug: invoke "crucible console" and run this script by hand
use strict;
use warnings;
use JSON::XS;
use Data::Dumper;
use Getopt::Long;
BEGIN {
if (!(exists $ENV{'TOOLBOX_HOME'} && -d "$ENV{'TOOLBOX_HOME'}/perl")) {
print "This script requires libraries that are provided by the toolbox project.\n";
print "Toolbox can be acquired from https://github.com/perftool-incubator/toolbox and\n";
print "then use 'export TOOLBOX_HOME=/path/to/toolbox' so that it can be located.\n";
exit 1;
}
}
use lib "$ENV{'TOOLBOX_HOME'}/perl";
use toolbox::json;
use toolbox::metrics;
my $ignore;
my %times;
my $test_file_mode="";
my $log_test;
GetOptions ("test-file=s" => \$test_file_mode,
"fec-mode=s" => \$ignore,
"usr1=s" => \$ignore,
"usr2=s" => \$ignore,
"usr3=s" => \$ignore,
"usr4=s" => \$ignore,
"usr5=s" => \$ignore,
"log-test=s" => \$log_test
);
#
# Extract begin/end timestamps recorded by the flexran run.
#
foreach my $i (qw(begin end)) {
my $file = $i . ".txt";
open(FH, $file) || die "Could not open " . $file;
$times{$i} = int (<FH> * 1000);
close FH;
}
my $primary_metric = 'GNB_DL_FEC_LINK-AVG';
my $metric_data_name;
my %names = ();
my $num_sample=0;
my %ul_min_desc = ('source' => 'flexran', 'class' => 'count', 'type' => 'GNB_UL_FEC_LINK-MIN');
my %ul_avg_desc = ('source' => 'flexran', 'class' => 'count', 'type' => 'GNB_UL_FEC_LINK-AVG');
my %ul_max_desc = ('source' => 'flexran', 'class' => 'count', 'type' => 'GNB_UL_FEC_LINK-MIN');
my %dl_min_desc = ('source' => 'flexran', 'class' => 'count', 'type' => 'GNB_DL_FEC_LINK-MIN');
my %dl_avg_desc = ('source' => 'flexran', 'class' => 'count', 'type' => 'GNB_DL_FEC_LINK-AVG');
my %dl_max_desc = ('source' => 'flexran', 'class' => 'count', 'type' => 'GNB_DL_FEC_LINK-MAX');
my $desc_min_ref;
my $desc_avg_ref;
my $desc_max_ref;
my $result_file = "l1_mlog_stats.txt";
my $match = 0;
my %low_name = ('type' => 'low latency');
my %avg_name = ('type' => 'avg latency');
my %high_name = ('type' => 'high latency');
(my $rc, my $fh) = open_read_text_file($result_file);
if ($rc == 0 and defined $fh) {
my $num_sample = 0;
# A run may execute multiple tests. At this moment, only index FEC metrics of the
# designated test, specified by $log_test.
while (<$fh>) {
# LATENCY_TASKNAME MIN HIST_LOW AVG HIST_HIGH MAX
# GNB_DL_FEC_LINK AVG (MU 0) : 0.00 28.00 29.47 32.00 30.00
# GNB_UL_FEC_LINK AVG (MU 0) : 0.00 160.00 160.46 162.00 160.00
# Skip until the log-test.
if ( $match == 0 ) {
if ( /$log_test/ ) {
$match = 1;
printf "Capture: %s\n", $_;
}
next;
}
if ( /GNB_DL_FEC_LINK *AVG/ || /GNB_UL_FEC_LINK *AVG/ ) {
my @latencies = split(/\s+/, $_);
if ( /GNB_UL_FEC_LINK *AVG/ ) {
printf "UL log: %s\n", $_;
$desc_min_ref = \%ul_min_desc;
$desc_avg_ref = \%ul_avg_desc;
$desc_max_ref = \%ul_max_desc;
} else {
$desc_min_ref = \%dl_min_desc;
$desc_avg_ref = \%dl_avg_desc;
$desc_max_ref = \%dl_max_desc;
printf "DL log: %s\n", $_;
}
my %s_low = ('begin' => $times{'begin'}, 'end' => $times{'end'}, 'value' => $latencies[7]);
my %s_avg = ('begin' => $times{'begin'}, 'end' => $times{'end'}, 'value' => $latencies[8]);
my %s_high = ('begin' => $times{'begin'}, 'end' => $times{'end'}, 'value' => $latencies[9]);
# Note, some tests have 0's FEC metrics. log_sample() rejects a sample with all 0's.
log_sample("flexran", $desc_min_ref, \%low_name, \%s_low);
log_sample("flexran", $desc_avg_ref, \%avg_name, \%s_avg);
log_sample("flexran", $desc_max_ref, \%high_name, \%s_high);
$num_sample++;
}
if ( /Test:/ ) {
# The start of the next test. Skip it and the rest.
$match = 0 ;
$log_test = "EINVAL";
}
} # while
close($fh);
printf "finishing_samples\n";
my $metric_data_name = finish_samples();
if ( $num_sample > 0 ) {
#my $metric_data_name = finish_samples();
# Associate the metrics with a benchmark-period (in this case "measurement")
my %sample;
my @periods;
my %period = ('name' => 'measurement');
$sample{'rickshaw-bench-metric'}{'schema'}{'version'} = "2021.04.12";
my @metric_files = ( $metric_data_name );
$period{'metric-files'} = \@metric_files;
push(@periods, \%period);
$sample{'periods'} = \@periods;
$sample{'primary-period'} = 'measurement';
$sample{'primary-metric'} = $primary_metric;
$sample{'benchmark'} = 'flexran';
$rc = put_json_file("post-process-data.json", \%sample);
if ( $rc > 0 ) {
printf "flexran-post-process(): Could not write file post-process-data.json\n";
exit 1
}
}
} else {
printf "flexran-post-process(): open_read_text_file() failed with return code %d for file %s\n", $rc, $result_file;
printf "Is the current directory for a flexran server (no result file)?\n";
}
# EOF