-
Notifications
You must be signed in to change notification settings - Fork 23
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge branch 'main' into alternative-cpu-utilization-reporting
* main: Tcp dump (#919) Hash must be decoded to understand spaces [skip ci] Allowing Deeplinks to specific phases [skip ci] (#1016) Bump python from 3.13.0-slim-bookworm to 3.13.1-slim-bookworm in /docker (#1015) Bump pydantic from 2.10.2 to 2.10.3 (#1010) Bump fastapi[standard] from 0.115.5 to 0.115.6 (#1011) Bump aiohttp from 3.11.9 to 3.11.10 (#1013) Bump redis from 5.2.0 to 5.2.1 (#1014) Bump python from 3.12.7-slim-bookworm to 3.13.0-slim-bookworm in /docker (#949) Bump hiredis from 3.0.0 to 3.1.0 (#1012) Bump pylint from 3.3.1 to 3.3.2 (#1008) Bump pytest from 8.3.3 to 8.3.4 (#1007) Bump aiohttp from 3.11.7 to 3.11.9 (#1009) Added kill script for GMT Adding cachetools as requirement EE Update
- Loading branch information
Showing
13 changed files
with
265 additions
and
20 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Submodule ee
updated
from 6e26b4 to b16348
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
3 changes: 3 additions & 0 deletions
3
metric_providers/network/connections/tcpdump/system/README.md
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
# Information | ||
|
||
See https://docs.green-coding.io/docs/measuring/metric-providers/network-connections-tcpdump-system/ for details. |
179 changes: 179 additions & 0 deletions
179
metric_providers/network/connections/tcpdump/system/provider.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,179 @@ | ||
import os | ||
import re | ||
from collections import defaultdict | ||
import ipaddress | ||
#import netifaces | ||
|
||
from metric_providers.base import BaseMetricProvider | ||
from lib.db import DB | ||
|
||
class NetworkConnectionsTcpdumpSystemProvider(BaseMetricProvider): | ||
def __init__(self, *, split_ports=True, skip_check=False): | ||
super().__init__( | ||
metric_name='network_connections_tcpdump_system', | ||
metrics={}, | ||
resolution=None, | ||
unit=None, | ||
current_dir=os.path.dirname(os.path.abspath(__file__)), | ||
metric_provider_executable='tcpdump.sh', | ||
skip_check=skip_check | ||
) | ||
self.split_ports = split_ports | ||
|
||
|
||
def read_metrics(self, run_id, containers=None): | ||
with open(self._filename, 'r', encoding='utf-8') as file: | ||
lines = file.readlines() | ||
|
||
stats = parse_tcpdump(lines, split_ports=self.split_ports) | ||
|
||
if rows := len(stats): | ||
DB().query(""" | ||
UPDATE runs | ||
SET logs= COALESCE(logs, '') || %s -- append | ||
WHERE id = %s | ||
""", params=(generate_stats_string(stats), run_id)) | ||
return rows | ||
|
||
return 0 | ||
|
||
def get_stderr(self): | ||
stderr = super().get_stderr() | ||
|
||
if not stderr: | ||
return stderr | ||
|
||
# truncate the first two bogus line with information similar to: | ||
# tcpdump: listening on eno2, link-type EN10MB (Ethernet), snapshot length 262144 bytes | ||
line_token = stderr.find("\n") | ||
if line_token and 'tcpdump: data link type' in stderr[:line_token]: | ||
stderr = stderr[stderr.find("\n")+1:] | ||
if line_token and 'tcpdump: listening on' in stderr[:line_token]: | ||
stderr = stderr[stderr.find("\n")+1:] | ||
|
||
return stderr | ||
|
||
def get_primary_interface(): | ||
gateways = netifaces.gateways() | ||
if 'default' in gateways and netifaces.AF_INET in gateways['default']: | ||
return gateways['default'][netifaces.AF_INET][1] | ||
|
||
raise RuntimeError('Could not get primary network interface') | ||
|
||
def get_ip_addresses(interface): | ||
addresses = [] | ||
|
||
try: | ||
addrs = netifaces.ifaddresses(interface) | ||
|
||
if netifaces.AF_INET in addrs: | ||
addresses.append(addrs[netifaces.AF_INET][0]['addr']) | ||
|
||
if netifaces.AF_INET6 in addrs: | ||
# Get the first non-link-local IPv6 address | ||
for addr in addrs[netifaces.AF_INET6]: | ||
if not addr['addr'].startswith('fe80:') and not addr['addr'].startswith('fd00:'): | ||
addresses.append(addr['addr']) | ||
break | ||
except RuntimeError as e: | ||
print(f"Error getting IP addresses: {e}") | ||
|
||
if not addresses: | ||
raise RuntimeError('Could not determine either IPv4 or IPv6 address') | ||
|
||
return addresses | ||
|
||
def parse_tcpdump(lines, split_ports=False): | ||
stats = defaultdict(lambda: {'ports': defaultdict(lambda: {'packets': 0, 'bytes': 0}), 'total_bytes': 0}) | ||
ip_pattern = r'(\S+) > (\S+):' | ||
#tcp_pattern = r'Flags \[(.+?)\]' | ||
|
||
for line in lines: | ||
ip_match = re.search(ip_pattern, line) | ||
#tcp_match = re.search(tcp_pattern, line) | ||
|
||
if ip_match: | ||
src, dst = ip_match.groups() | ||
src_ip, src_port = parse_ip_port(src) | ||
dst_ip, dst_port = parse_ip_port(dst) | ||
|
||
if src_ip and dst_ip: | ||
protocol = "UDP" if "UDP" in line else "TCP" | ||
|
||
if protocol == "UDP": | ||
# For UDP, use the reported length | ||
length_pattern = r'length:? (\d+)' | ||
length_match = re.search(length_pattern, line) | ||
if not length_match or not length_match.group(1): | ||
raise RuntimeError(f"Could not find UDP packet length for line: {line}") | ||
packet_length = int(length_match.group(1)) | ||
|
||
else: | ||
# For TCP, estimate packet length (this is a simplification) | ||
length_pattern = r'length (\d+)' | ||
length_match = re.search(length_pattern, line) | ||
|
||
if not length_match or not length_match.group(1): | ||
if '.53 ' in line or '.53:' in line or '.5353 ' in line or '.5353:' in line: # try DNS / MDNS match | ||
dns_packet_length = re.match(r'.*\((\d+)\)$', line) | ||
if not dns_packet_length: | ||
raise RuntimeError(f"Could not find TCP packet length for line: {line}") | ||
packet_length = int(dns_packet_length[1]) | ||
else: | ||
raise RuntimeError(f"No packet length was detected for line {line}") | ||
else: | ||
packet_length = 40 + int(length_match.group(1)) # Assuming 40 bytes for IP + TCP headers | ||
|
||
# Update source IP stats | ||
if split_ports: | ||
stats[src_ip]['ports'][f"{src_port}/{protocol}"]['packets'] += 1 | ||
stats[src_ip]['ports'][f"{src_port}/{protocol}"]['bytes'] += packet_length | ||
else: | ||
stats[src_ip]['ports'][f"{protocol}"]['packets'] += 1 # alternative without splitting by port | ||
stats[src_ip]['ports'][f"{protocol}"]['bytes'] += packet_length # alternative without splitting by port | ||
|
||
stats[src_ip]['total_bytes'] += packet_length | ||
|
||
# Update destination IP stats | ||
if split_ports: | ||
stats[dst_ip]['ports'][f"{dst_port}/{protocol}"]['packets'] += 1 | ||
stats[dst_ip]['ports'][f"{dst_port}/{protocol}"]['bytes'] += packet_length | ||
else: | ||
stats[dst_ip]['ports'][f"{protocol}"]['packets'] += 1 # alternative without splitting by port | ||
stats[dst_ip]['ports'][f"{protocol}"]['bytes'] += packet_length # alternative without splitting by port | ||
|
||
stats[dst_ip]['total_bytes'] += packet_length | ||
|
||
return stats | ||
|
||
def parse_ip_port(address): | ||
try: | ||
if ']' in address: # IPv6 | ||
ip, port = address.rsplit('.', 1) | ||
ip = ip.strip('[]') | ||
else: # IPv4 | ||
ip, port = address.rsplit('.', 1) | ||
|
||
# Validate IP address | ||
ipaddress.ip_address(ip) | ||
return ip, int(port) | ||
except ValueError: | ||
return None, None | ||
|
||
def generate_stats_string(stats, filter_host=False): | ||
primary_interface = get_primary_interface() | ||
ip_addresses = get_ip_addresses(primary_interface) | ||
|
||
buffer = [] | ||
for ip, data in stats.items(): | ||
if filter_host and ip in ip_addresses: | ||
continue | ||
|
||
buffer.append(f"IP: {ip} (as sender or receiver. aggregated)") | ||
buffer.append(f" Total transmitted data: {data['total_bytes']} bytes") | ||
buffer.append(' Ports:') | ||
for port, port_data in data['ports'].items(): | ||
buffer.append(f" {port}: {port_data['packets']} packets, {port_data['bytes']} bytes") | ||
buffer.append('\n') | ||
|
||
return '\n'.join(buffer) |
25 changes: 25 additions & 0 deletions
25
metric_providers/network/connections/tcpdump/system/tcpdump.sh
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,25 @@ | ||
#! /bin/bash | ||
set -euo pipefail | ||
|
||
check_system=false | ||
while getopts "c" o; do | ||
case "$o" in | ||
c) | ||
check_system=true | ||
;; | ||
esac | ||
done | ||
|
||
|
||
if $check_system; then | ||
# This will try to capture one packet only. However since no network traffic might be happening we also limit to 5 seconds | ||
first_line=$(timeout 3 tcpdump -tt --micro -n -v -c 1) | ||
# timeout will raise error code 124 | ||
if [ $? -eq 1 ]; then | ||
echo "tcpdump could not be started. Missing sudo permissions?" | ||
exit 1 | ||
fi | ||
exit 0 | ||
fi | ||
|
||
tcpdump -tt --micro -n -v |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,5 @@ | ||
-r requirements.txt | ||
pydantic==2.10.2 | ||
pylint==3.3.1 | ||
pydantic==2.10.3 | ||
pylint==3.3.2 | ||
pytest-randomly==3.16.0 | ||
pytest-playwright==0.6.2 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,12 @@ | ||
#!/usr/env bash | ||
|
||
read -p "This will kill all processes know to be forked by GMT. It may also kill other similar named processes and should only be used on dedicated measurement nodes. In case you are looged in remotely it will also kill the current terminal session, so you must log in again. Do you want to continue? (y/N) : " kill_gmt | ||
|
||
if [[ "$kill_gmt" == "Y" || "$kill_gmt" == "y" ]] ; then | ||
pgrep python3 | xargs kill | ||
pgrep tinyproxy | xargs kill | ||
pgrep metric_providers | xargs kill | ||
pgrep tcpdump | xargs kill | ||
docker rm -f $(docker ps -aq) 2>/dev/null | ||
pgrep bash | xargs kill -9 | ||
fi |