diff --git a/.github/workflows/CI-production-testing.yml b/.github/workflows/CI-production-testing.yml
index 451be1889..1e5e8c6be 100644
--- a/.github/workflows/CI-production-testing.yml
+++ b/.github/workflows/CI-production-testing.yml
@@ -42,28 +42,146 @@ jobs:
run: |
python -m pip install --upgrade pip
grep -v 'black' install/requirements.txt | xargs pip3 install --no-cache-dir
+ pip install coverage
- name: Start redis server
run: redis-server --daemonize yes
- name: Run unit tests
- run: python3 -m pytest tests/ --ignore="tests/test_database.py" --ignore="tests/integration_tests" -n 7 -p no:warnings -vv -s
+ run: coverage run --source=./ -m pytest tests/ --ignore="tests/test_database.py" --ignore="tests/integration_tests" -n 7 -p no:warnings -vv -s
+
- name: Run database unit tests
- run: python3 -m pytest tests/test_database.py -p no:warnings -vv
+ run: |
+ coverage run --source=./ -m pytest tests/test_database.py -p no:warnings -vv
+ coverage report --include="slips_files/core/database/*"
+ coverage html --include="slips_files/core/database/*" -d coverage_reports/database
+
+ - name: Flowalerts test
+ run: |
+ coverage run --source=./ -m pytest tests/test_flowalerts.py -p no:warnings -vv
+ coverage report --include="modules/flowalerts/*"
+ coverage html --include="modules/flowalerts/*" -d coverage_reports/flowalerts
+
+ - name: Whitelist test
+ run: |
+ coverage run --source=./ -m pytest tests/test_whitelist.py -p no:warnings -vv
+ coverage report --include="slips_files/core/helpers/whitelist.py*"
+ coverage html --include="slips_files/core/helpers/whitelist.py*" -d coverage_reports/whitelist
+
+ - name: arp test
+ run: |
+ coverage run --source=./ -m pytest tests/test_arp.py -p no:warnings -vv
+ coverage report --include="modules/arp/*"
+ coverage html --include="modules/arp/*" -d coverage_reports/arp
+
+ - name: blocking test
+ run: |
+ coverage run --source=./ -m pytest tests/test_blocking.py -p no:warnings -vv
+ coverage report --include="modules/blocking/*"
+ coverage html --include="modules/blocking/*" -d coverage_reports/blocking
+
+ - name: flowhandler test
+ run: |
+ coverage run --source=./ -m pytest tests/test_flow_handler.py -p no:warnings -vv
+ coverage report --include="slips_files/core/helpers/flow_handler.py*"
+ coverage html --include="slips_files/core/helpers/flow_handler.py*" -d coverage_reports/flowhandler
+
+ - name: horizontal_portscans test
+ run: |
+ coverage run --source=./ -m pytest tests/test_horizontal_portscans.py -p no:warnings -vv
+ coverage report --include="modules/network_discovery/horizontal_portscan.py*"
+ coverage html --include="modules/network_discovery/horizontal_portscan.py*" -d coverage_reports/horizontal_portscan
+
+ - name: http_analyzer test
+ run: |
+ coverage run --source=./ -m pytest tests/test_http_analyzer.py -p no:warnings -vv
+ coverage report --include="modules/http_analyzer/http_analyzer.py*"
+ coverage html --include="modules/http_analyzer/http_analyzer.py*" -d coverage_reports/http_analyzer
+
+ - name: vertical_portscans test
+ run: |
+ coverage run --source=./ -m pytest tests/test_vertical_portscans.py -p no:warnings -vv
+ coverage report --include="modules/network_discovery/vertical_portscan.py*"
+ coverage html --include="modules/network_discovery/vertical_portscan.py*" -d coverage_reports/vertical_portscan
+
+ - name: virustotal test
+ run: |
+ coverage run --source=./ -m pytest tests/test_virustotal.py -p no:warnings -vv
+ coverage report --include="modules/virustotal/virustotal.py*"
+ coverage html --include="modules/virustotal/virustotal.py*" -d coverage_reports/virustotal
+
+ - name: updatemanager test
+ run: |
+ coverage run --source=./ -m pytest tests/test_update_file_manager.py -p no:warnings -vv
+ coverage report --include="modules/update_manager/update_manager.py*"
+ coverage html --include="modules/update_manager/update_manager.py*" -d coverage_reports/updatemanager
+
+ - name: threatintelligence test
+ run: |
+ coverage run --source=./ -m pytest tests/test_threat_intelligence.py -p no:warnings -vv
+ coverage report --include="modules/threat_intelligence/threat_intelligence.py*"
+ coverage html --include="modules/threat_intelligence/threat_intelligence.py*" -d coverage_reports/threat_intelligence
+
+ - name: slipsutils test
+ run: |
+ coverage run --source=./ -m pytest tests/test_slips_utils.py -p no:warnings -vv
+ coverage report --include="slips_files/common/slips_utils.py*"
+ coverage html --include="slips_files/common/slips_utils.py*" -d coverage_reports/slips_utils
+
+ - name: slips test
+ run: |
+ coverage run --source=./ -m pytest tests/test_slips.py -p no:warnings -vv
+ coverage report --include="slips.py*"
+ coverage html --include="slips.py*" -d coverage_reports/slips
+
+ - name: profiler test
+ run: |
+ coverage run --source=./ -m pytest tests/test_profiler.py -p no:warnings -vv
+ coverage report --include="slips_files/core/profiler.py*"
+ coverage html --include="slips_files/core/profiler.py*" -d coverage_reports/profiler
+
+ - name: leak detector test
+ run: |
+ coverage run --source=./ -m pytest tests/test_leak_detector.py -p no:warnings -vv
+ coverage report --include="modules/leak_detector/leak_detector.py*"
+ coverage html --include="modules/leak_detector/leak_detector.py*" -d coverage_reports/leak_detector
+
+ - name: ipinfo test
+ run: |
+ coverage run --source=./ -m pytest tests/test_ip_info.py -p no:warnings -vv
+ coverage report --include="modules/ip_info/ip_info.py*"
+ coverage html --include="modules/ip_info/ip_info.py*" -d coverage_reports/ip_info
+
+ - name: input test
+ run: |
+ coverage run --source=./ -m pytest tests/test_inputProc.py -p no:warnings -vv
+ coverage report --include="slips_files/core/input.py*"
+ coverage html --include="slips_files/core/input.py*" -d coverage_reports/input
- name: Clear redis cache
run: ./slips.py -cc
- name: Portscan tests
- run: python3 -m pytest -s tests/integration_tests/test_portscans.py -p no:warnings -vv
+ run: |
+ coverage run --source=./ -m pytest -s tests/integration_tests/test_portscans.py -p no:warnings -vv
+ coverage report --include="modules/network_discovery/*"
+ coverage html --include="modules/network_discovery/*" -d coverage_reports/network_discovery
- name: Integration tests
- run: python3 -m pytest -s tests/integration_tests/test_dataset.py -p no:warnings -vv
+ run: |
+ python3 -m pytest -s tests/integration_tests/test_dataset.py -p no:warnings -vv
+# coverage run --source=./ -m pytest -s tests/integration_tests/test_dataset.py -p no:warnings -vv
+# coverage report --include="dataset/*"
+# coverage html --include="dataset/*" -d coverage_reports/dataset
- - name: Config file tests
- run: python3 -m pytest -s tests/integration_tests/test_config_files.py -p no:warnings -vv
+ - name: Config file tests
+ run: |
+ python3 -m pytest -s tests/integration_tests/test_config_files.py -p no:warnings -vv
+# coverage run --source=./ -m pytest -s tests/integration_tests/test_config_files.py -p no:warnings -vv
+# coverage report --include="dataset/*"
+# coverage html --include="dataset/*" -d coverage_reports/dataset
- name: Upload Artifact
# run this job whether the above jobs failed or passed
@@ -73,3 +191,4 @@ jobs:
name: test_slips_locally-integration-tests-output
path: |
output/integration_tests
+ coverage_reports/
diff --git a/.github/workflows/CI-publishing.yml b/.github/workflows/CI-publishing.yml
index bd0de0772..6ed413aa2 100644
--- a/.github/workflows/CI-publishing.yml
+++ b/.github/workflows/CI-publishing.yml
@@ -90,6 +90,8 @@ jobs:
ref: 'master'
# Fetch all history for all tags and branches
fetch-depth: ''
+ submodules: true
+
- name: Login to DockerHub
uses: docker/login-action@v2
@@ -102,8 +104,7 @@ jobs:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- # build p2p image for ubuntu from dockerfile
- - name: Build our p2p image from dockerfile
+ - name: Build and push p2p image using dockerfile
id: docker_build_p2p_for_slips
uses: docker/build-push-action@v3
with:
@@ -164,4 +165,4 @@ jobs:
context: ./
file: ./docker/dependency-image/Dockerfile
tags: stratosphereips/slips_dependencies:latest
- push: true
\ No newline at end of file
+ push: true
diff --git a/.github/workflows/CI-staging.yml b/.github/workflows/CI-staging.yml
index 497d95048..a1e68b2de 100644
--- a/.github/workflows/CI-staging.yml
+++ b/.github/workflows/CI-staging.yml
@@ -1,11 +1,9 @@
# This workflow will install Slips dependencies and run unit tests
-
name: CI-staging
on:
push:
branches:
- # features will be added to this branch using PRs, not need to re-run the tests on push
- '!develop'
- '!master'
pull_request:
@@ -15,46 +13,187 @@ on:
jobs:
- run_tests:
- # specify the host OS
- runs-on: ubuntu-latest
- # 2 hours timeout
- timeout-minutes: 7200
- # start a container using slips dependencies image
- container:
- image: stratosphereips/slips_dependencies:latest
+ test_slips_locally:
+ # runs the tests on a GH VM
+ runs-on: ubuntu-20.04
+ # 2 hours timeout
+ timeout-minutes: 7200
+
+
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ ref: 'develop'
+ # Fetch all history for all tags and branches
+ fetch-depth: ''
+
+ - name: Install slips dependencies
+ run: sudo apt-get update --fix-missing && sudo apt-get -y --no-install-recommends install python3 redis-server python3-pip python3-certifi python3-dev build-essential file lsof net-tools iproute2 iptables python3-tzlocal nfdump tshark git whois golang nodejs notify-osd yara libnotify-bin
+
+ - name: Install Zeek
+ run: |
+ sudo echo 'deb http://download.opensuse.org/repositories/security:/zeek/xUbuntu_20.04/ /' | sudo tee /etc/apt/sources.list.d/security:zeek.list
+ curl -fsSL https://download.opensuse.org/repositories/security:zeek/xUbuntu_20.04/Release.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/security_zeek.gpg > /dev/null
+ sudo apt update
+ sudo apt install -y --no-install-recommends zeek
+ sudo ln -s /opt/zeek/bin/zeek /usr/local/bin/bro
+
+ - name: Set up Python 3.8
+ uses: actions/setup-python@v2
+ with:
+ python-version: "3.8"
+
+ - name: Install Python dependencies
+ run: |
+ python -m pip install --upgrade pip
+ # exclude black when installing slips dependencies due to dependency conflict with tensorflow
+ grep -v 'black' install/requirements.txt | xargs pip3 install --no-cache-dir
+ pip install coverage
+
+ - name: Start redis server
+ run: redis-server --daemonize yes
+
+ - name: Run unit tests
+ run: coverage run --source=./ -m pytest tests/ --ignore="tests/test_database.py" --ignore="tests/integration_tests" -n 7 -p no:warnings -vv -s
+
+
+ - name: Run database unit tests
+ run: |
+ coverage run --source=./ -m pytest tests/test_database.py -p no:warnings -vv
+ coverage report --include="slips_files/core/database/*"
+ coverage html --include="slips_files/core/database/*" -d coverage_reports/database
+
+ - name: Clear redis cache
+ run: ./slips.py -cc
+
+ - name: Portscan tests
+ run: |
+ coverage run --source=./ -m pytest -s tests/integration_tests/test_portscans.py -p no:warnings -vv
+ coverage report --include="modules/network_discovery/*"
+ coverage html --include="modules/network_discovery/*" -d coverage_reports/network_discovery
+
+ - name: Integration tests
+ run: |
+ python3 -m pytest -s tests/integration_tests/test_dataset.py -p no:warnings -vv
+# coverage run --source=./ -m pytest -s tests/integration_tests/test_dataset.py -p no:warnings -vv
+# coverage report --include="dataset/*"
+# coverage html --include="dataset/*" -d coverage_reports/dataset
+
+ - name: Flowalerts test
+ run: |
+ coverage run --source=./ -m pytest tests/test_flowalerts.py -p no:warnings -vv
+ coverage report --include="modules/flowalerts/*"
+ coverage html --include="modules/flowalerts/*" -d coverage_reports/flowalerts
+
+ - name: Whitelist test
+ run: |
+ coverage run --source=./ -m pytest tests/test_whitelist.py -p no:warnings -vv
+ coverage report --include="slips_files/core/helpers/whitelist.py*"
+ coverage html --include="slips_files/core/helpers/whitelist.py*" -d coverage_reports/whitelist
+
+ - name: arp test
+ run: |
+ coverage run --source=./ -m pytest tests/test_arp.py -p no:warnings -vv
+ coverage report --include="modules/arp/*"
+ coverage html --include="modules/arp/*" -d coverage_reports/arp
+
+ - name: blocking test
+ run: |
+ coverage run --source=./ -m pytest tests/test_blocking.py -p no:warnings -vv
+ coverage report --include="modules/blocking/*"
+ coverage html --include="modules/blocking/*" -d coverage_reports/blocking
+
+ - name: flowhandler test
+ run: |
+ coverage run --source=./ -m pytest tests/test_flow_handler.py -p no:warnings -vv
+ coverage report --include="slips_files/core/helpers/flow_handler.py*"
+ coverage html --include="slips_files/core/helpers/flow_handler.py*" -d coverage_reports/flowhandler
+
+ - name: horizontal_portscans test
+ run: |
+ coverage run --source=./ -m pytest tests/test_horizontal_portscans.py -p no:warnings -vv
+ coverage report --include="modules/network_discovery/horizontal_portscan.py*"
+ coverage html --include="modules/network_discovery/horizontal_portscan.py*" -d coverage_reports/horizontal_portscan
+
+ - name: http_analyzer test
+ run: |
+ coverage run --source=./ -m pytest tests/test_http_analyzer.py -p no:warnings -vv
+ coverage report --include="modules/http_analyzer/http_analyzer.py*"
+ coverage html --include="modules/http_analyzer/http_analyzer.py*" -d coverage_reports/http_analyzer
+
+ - name: vertical_portscans test
+ run: |
+ coverage run --source=./ -m pytest tests/test_vertical_portscans.py -p no:warnings -vv
+ coverage report --include="modules/network_discovery/vertical_portscan.py*"
+ coverage html --include="modules/network_discovery/vertical_portscan.py*" -d coverage_reports/vertical_portscan
- steps:
- - uses: actions/checkout@v2
+ - name: virustotal test
+ run: |
+ coverage run --source=./ -m pytest tests/test_virustotal.py -p no:warnings -vv
+ coverage report --include="modules/virustotal/virustotal.py*"
+ coverage html --include="modules/virustotal/virustotal.py*" -d coverage_reports/virustotal
- - name: Start redis server
- run: redis-server --daemonize yes
+ - name: updatemanager test
+ run: |
+ coverage run --source=./ -m pytest tests/test_update_file_manager.py -p no:warnings -vv
+ coverage report --include="modules/update_manager/update_manager.py*"
+ coverage html --include="modules/update_manager/update_manager.py*" -d coverage_reports/updatemanager
- - name: Run unit tests
- run: python3 -m pytest tests/ --ignore="tests/test_daemon.py" --ignore="tests/test_database.py" --ignore="tests/integration_tests" -n 7 -p no:warnings -vv -s
+ - name: threatintelligence test
+ run: |
+ coverage run --source=./ -m pytest tests/test_threat_intelligence.py -p no:warnings -vv
+ coverage report --include="modules/threat_intelligence/threat_intelligence.py*"
+ coverage html --include="modules/threat_intelligence/threat_intelligence.py*" -d coverage_reports/threat_intelligence
- - name: Run database unit tests
- run: python3 -m pytest tests/test_database.py -p no:warnings -vv
+ - name: slipsutils test
+ run: |
+ coverage run --source=./ -m pytest tests/test_slips_utils.py -p no:warnings -vv
+ coverage report --include="slips_files/common/slips_utils.py*"
+ coverage html --include="slips_files/common/slips_utils.py*" -d coverage_reports/slips_utils
+ - name: slips test
+ run: |
+ coverage run --source=./ -m pytest tests/test_slips.py -p no:warnings -vv
+ coverage report --include="slips.py*"
+ coverage html --include="slips.py*" -d coverage_reports/slips
- - name: Clear redis cache
- run: ./slips.py -cc
+ - name: profiler test
+ run: |
+ coverage run --source=./ -m pytest tests/test_profiler.py -p no:warnings -vv
+ coverage report --include="slips_files/core/profiler.py*"
+ coverage html --include="slips_files/core/profiler.py*" -d coverage_reports/profiler
- - name: Portscan tests
- run: python3 -m pytest -s tests/integration_tests/test_portscans.py -p no:warnings -vv
+ - name: leak detector test
+ run: |
+ coverage run --source=./ -m pytest tests/test_leak_detector.py -p no:warnings -vv
+ coverage report --include="modules/leak_detector/leak_detector.py*"
+ coverage html --include="modules/leak_detector/leak_detector.py*" -d coverage_reports/leak_detector
- - name: Integration tests
- run: python3 -m pytest -s tests/integration_tests/test_dataset.py -p no:warnings -vv
+ - name: ipinfo test
+ run: |
+ coverage run --source=./ -m pytest tests/test_ip_info.py -p no:warnings -vv
+ coverage report --include="modules/ip_info/ip_info.py*"
+ coverage html --include="modules/ip_info/ip_info.py*" -d coverage_reports/ip_info
- - name: Config file tests
- run: python3 -m pytest -s tests/integration_tests/test_config_files.py -p no:warnings -vv
+ - name: input test
+ run: |
+ coverage run --source=./ -m pytest tests/test_inputProc.py -p no:warnings -vv
+ coverage report --include="slips_files/core/input.py*"
+ coverage html --include="slips_files/core/input.py*" -d coverage_reports/input
+ - name: Config file tests
+ run: |
+ python3 -m pytest -s tests/integration_tests/test_config_files.py -p no:warnings -vv
+# coverage run --source=./ -m pytest -s tests/integration_tests/test_config_files.py -p no:warnings -vv
+# coverage report --include="dataset/*"
+# coverage html --include="dataset/*" -d coverage_reports/dataset
- - name: Upload Artifact
- # run this job whether the above jobs failed or passed
- if: success() || failure()
- uses: actions/upload-artifact@v3
- with:
- name: integration-tests-output
- path: |
- output/integration_tests
\ No newline at end of file
+ - name: Upload Artifact
+ # run this job whether the above jobs failed or passed
+ if: success() || failure()
+ uses: actions/upload-artifact@v3
+ with:
+ name: test_slips_locally-integration-tests-output
+ path: |
+ output/integration_tests
+ coverage_reports/
diff --git a/.gitmodules b/.gitmodules
index c4aa2806a..973d05aba 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,12 +1,16 @@
[submodule "p2p4slips"]
path = p2p4slips
url = https://github.com/stratosphereips/p2p4slips
+ branch = master
[submodule "iris"]
path = iris
url = https://github.com/stratosphereips/iris
+ branch = main
[submodule "fides"]
path = fides
url = https://github.com/stratosphereips/fides
+ branch = master
[submodule "feel_project"]
path = feel_project
url = https://github.com/stratosphereips/feel_project
+ branch = main
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 84ee0107b..e88b4b5f2 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,12 @@
+- 1.0.14 (May 2024)
+- Improve whitelists. better matching of ASNs, domains, and organizations.
+- Whitelist Microsoft, Apple, Twitter, Facebook and Google alerts by default to reduce false positives.
+- Better unit tests. thanks to @Sekhar-Kumar-Dash
+- Speed up portscan detections.
+- Fix the issue of overwriting redis config file every run.
+- Add more info to metadata/info.txt for each run.
+
+
- 1.0.13 (April 2024)
- Whitelist alerts to all organizations by default to reduce false positives.
- Improve and compress Slips Docker images. thanks to @verovaleros
diff --git a/README.md b/README.md
index 67fd9fb53..84d5865c0 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
-Slips v1.0.13
+Slips v1.0.14
@@ -31,6 +31,7 @@ Slips v1.0.13
- [Introduction](#introduction)
- [Usage](#usage)
- [GUI](#graphical-user-interface)
+- [Requirements](#requirements)
- [Installation](#installation)
- [Extended Usage](#extended-usage)
- [Configuration](#configuration)
@@ -71,7 +72,7 @@ The recommended way to use Slips is on Docker.
#### Linux
```
-docker run --rm -it -p 55000:55000 --net=host --cap-add=NET_ADMIN --name slips stratosphereips/slips:latest
+docker run --rm -it -p 55000:55000 --cpu-shares "700" --memory="8g" --memory-swap="8g" --net=host --cap-add=NET_ADMIN --name slips stratosphereips/slips:latest
```
```
@@ -85,7 +86,7 @@ cat output_dir/alerts.log
#### Macos M1
```
-docker run --rm -it -p 55000:55000 --net=host --cap-add=NET_ADMIN --name slips stratosphereips/slips_macos_m1:latest
+docker run --rm -it -p 55000:55000 --cpu-shares "700" --memory="8g" --memory-swap="8g" --net=host --cap-add=NET_ADMIN --name slips stratosphereips/slips_macos_m1:latest
```
```
@@ -100,7 +101,7 @@ cat output_dir/alerts.log
#### Macos Intel
```
-docker run --rm -it -p 55000:55000 --net=host --cap-add=NET_ADMIN --name slips stratosphereips/slips:latest
+docker run --rm -it -p 55000:55000 --cpu-shares "700" --memory="8g" --memory-swap="8g" --net=host --cap-add=NET_ADMIN --name slips stratosphereips/slips:latest
```
```
@@ -146,6 +147,10 @@ For more info about the Kalipso interface, check the docs: https://stratospherel
---
+# Requirements
+
+Slips requires Python 3.8+ and at least 4 GBs of RAM to run smoothly.
+
# Installation
@@ -318,6 +323,13 @@ We are grateful for the generous support and funding provided by the following o
- NlNet Foundation, https://nlnet.nl/
+
+This project is funded through [NGI0 Entrust](https://nlnet.nl/entrust), a fund established by [NLnet](https://nlnet.nl) with financial support from the European Commission's [Next Generation Internet](https://ngi.eu) program. Learn more at the [NLnet project page](https://nlnet.nl/project/Iris-P2P).
+
+[](https://nlnet.nl)
+[](https://nlnet.nl/entrust)
+
+
- Artificial Intelligence Centre at the Czech Technical University in Prague, https://www.aic.fel.cvut.cz/
- Avast, https://www.avast.com/
- CESNET, https://www.cesnet.cz/
diff --git a/VERSION b/VERSION
index 2ac9634d3..5b09c67c2 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.0.13
+1.0.14
diff --git a/config/slips.conf b/config/slips.conf
index 7b3785dc7..7c8efdf77 100644
--- a/config/slips.conf
+++ b/config/slips.conf
@@ -43,17 +43,21 @@ time_window_width = 3600
-# This option determines whether to analyze only what goes OUT of the local network or also what is coming IN the local network
+# This option determines whether to analyze only traffic going out
+# of the current local network (out mode) or also traffic in the local network (all mode)
# Options: out, all
# In the 'out' configuration, SLIPS focuses on analyzing outbound traffic
# originating from the internal local IPs.
-# It creates profiles for local IPs and public external IPs, but only analyzes the outgoing traffic from the private IPs
+# It creates profiles for local IPs and public external IPs, but only analyzes
+# the outgoing traffic from the private IPs (aka local network)
# to public destinations.
-# Any inbound traffic or attacks from external IPs are not processed.
+# Any inbound traffic or attacks from external IPs to your local
+# network are not processed in this mode.
# In the 'all' configuration, Slips creates profiles for both private and public IPs,
# and analyzes traffic in both directions, inbound and outbound.
-# It processes traffic originating from private IP addresses, as well as external public IP addresses.
+# It processes traffic originating from private IP addresses,
+# as well as external public IP addresses.
# This mode provides comprehensive network monitoring, allowing you to detect
# outgoing as well as incoming attacks and connections.
#analysis_direction = all
@@ -167,7 +171,7 @@ client_ips = []
# May lead to false negatives
# - 3.1: The start of the Optimal range, has more false positives but more accurate.
# - 3.86: The end of the Optimal range, has less false positives but less accurate.
-evidence_detection_threshold = 3.46
+evidence_detection_threshold = 0.2
# Slips can show a popup/notification with every alert. Only yes or no
@@ -303,7 +307,7 @@ pastebin_download_threshold = 700
# available options [slack,stix] without quotes
#export_to = [stix]
#export_to = [slack]
-export_to = []
+export_to = [slack]
# We'll use this channel to send alerts
slack_channel_name = proj_slips_alerting_module
diff --git a/docs/P2P.md b/docs/P2P.md
index c54cdfb14..f37e46c54 100644
--- a/docs/P2P.md
+++ b/docs/P2P.md
@@ -1,4 +1,4 @@
-# P2P
+# P2P
The P2P module makes Slips be a peer in a peer to peer network of computers in the local network. The peers are only in the local network and they communicate using multicast packets. The P2P module is a highly complex system of data sharing, reports on malicious computers, asking about IoC to the peers and a complex trust model that is designed to resiste adversarial peers in the network. Adversarial peers are malicious peers that lie about the data being shared (like saying that a computer is maliciuos when is not, or that an attacker is benign).
@@ -27,12 +27,12 @@ docker pull stratosphereips/slips_p2p
docker run -it --rm --net=host --cap-add=NET_ADMIN stratosphereips/slips_p2p
```
-For the p2p to be able to listen on the network interfaces
+For the p2p to be able to listen on the network interfaces
and receive packets you should use ```--cap-add=NET_ADMIN```
## Installation:
-1. download and install go:
+1. download and install go:
```
apt install golang
@@ -42,14 +42,17 @@ or by hand
```
curl https://dl.google.com/go/go1.18.linux-amd64.tar.gz --output go.tar.gz
-rm -rf /usr/local/go && tar -C /usr/local -xzf go.tar.gz
+rm -rf /usr/local/go && tar -C /usr/local -xzf go.tar.gz
export PATH=$PATH:/usr/local/go/bin
```
2. build the pigeon:
-- if you installed slips with the submodules using
-```git clone --recurse-submodules --remote-submodules https://github.com/stratosphereips/StratosphereLinuxIPS ```
+- if you installed slips with the submodules using
+```
+git clone --recurse-submodules --remote-submodules https://github.com/stratosphereips/StratosphereLinuxIPS -j4
+```
+
then you should only build the pigeon using:
```cd p2p4slips && go build```
- If you installed Slips without the submodules then you should download and build the pigeon using:
@@ -62,11 +65,11 @@ The p2p binary should now be in ```p2p4slips/``` dir and slips will be able to f
***NOTE***
-If you installed the p2p4slips submodule anywhere other than slips main directory, remember to add it to PATH
+If you installed the p2p4slips submodule anywhere other than slips main directory, remember to add it to PATH
by using the following commands:
```
-echo "export PATH=$PATH:/path/to/StratosphereLinuxIPS/p2p4slips/" >> ~/.bashrc
+echo "export PATH=$PATH:/path/to/StratosphereLinuxIPS/p2p4slips/" >> ~/.bashrc
source ~/.bashrc
```
@@ -74,7 +77,7 @@ source ~/.bashrc
The P2P module is disabled by default in Slips.
-To enable it, change ```use_p2p=no``` to ```use_p2p=yes``` in ```config/slips.conf```
+To enable it, change ```use_p2p=no``` to ```use_p2p=yes``` in ```config/slips.conf```
P2P is only available when running slips in you local network using an interface. (with -i )
@@ -88,7 +91,7 @@ You don't have to do anything in particular for the P2P module to work, just ena
4- Send blame reports to the whole network about attackers
5- Receive blame reports on attackers from the network, balanced by the trust model
-
+
6- Keep a trust score on each peer, which varies in time based on the interactions and quality of data shared
## Project sections
@@ -105,17 +108,17 @@ following code is related to Dovecot:
## Dovecot experiments
Experiments are not essential to the module, and the whole project runs just fine without them. They are useful for
-development of new trust models and modelling behavior of the P2P network.
+development of new trust models and modelling behavior of the P2P network.
To use the experiments, clone
the https://github.com/stratosphereips/p2p4slips-experiments repository into
`modules/p2ptrust/testing/experiments`.
The experiments run independently (outside of Slips) and start all processes that are needed, including relevant parts
-of Slips.
-The code needs to be placed inside the module, so that necessary dependencies are accessible.
+of Slips.
+The code needs to be placed inside the module, so that necessary dependencies are accessible.
This is not the
-best design choice, but it was the simplest quick solution.
+best design choice, but it was the simplest quick solution.
## How it works:
@@ -127,7 +130,7 @@ If slips finds that an IP is malicious given enough evidence, it blocks it and t
### Receiving Blames
-When slips receives a blame report from the network,
+When slips receives a blame report from the network,
which means some other slips instance in th network set an evidence about
an IP and is letting other peers know about it.
@@ -138,9 +141,9 @@ when deciding to block the attacker's IP.
### Asking the network about an IP
-Whenever slips sees a new IP, it asks other peers about it, and waits 3 seconds for them to reply.
+Whenever slips sees a new IP, it asks other peers about it, and waits 3 seconds for them to reply.
-The network then replies with a score and confidence for the IP. The higher the score the more malicious this IP is.
+The network then replies with a score and confidence for the IP. The higher the score the more malicious this IP is.
Once we get the score of the IP, we store it in the database,
and we alert if the score of this IP is more than 0 (threat level=info).
@@ -164,7 +167,7 @@ Slips rotates the p2p.log every 1 day by default, and keeps the logs of 1 past d
## Limitations
-For now, slips only supports requests and blames about IPs.
+For now, slips only supports requests and blames about IPs.
Domains, URLs, or hashes are not supported, but can easily be added in the future.
@@ -173,4 +176,3 @@ Domains, URLs, or hashes are not supported, but can easily be added in the futur
Slips only shares scores and confidence (numbers) generated by slips about IPs to the network,
no private information is shared.
-
diff --git a/docs/contributing.md b/docs/contributing.md
index bbbcd4677..7e68d3791 100644
--- a/docs/contributing.md
+++ b/docs/contributing.md
@@ -1,6 +1,6 @@
-# Contributing
+# Contributing
-All contributions are welcomed, thank you for taking the time to contribute to this project!
+All contributions are welcomed, thank you for taking the time to contribute to this project!
These are a set of guidelines for contributing to the development of Slips [1].
## How can you contribute?
@@ -18,7 +18,7 @@ The following git branches in the Slips repository are permanent:
## Naming Git branches for Pull Requests
-To keep the Git history clean and facilitate the revision of contributions we
+To keep the Git history clean and facilitate the revision of contributions we
ask all branches to follow concise namings. These are the branch-naming patterns
to follow when contributing to Slips:
@@ -49,22 +49,33 @@ Pull Requests:
Here's a very simple beginner-level steps on how to create your PR in Slips
-1. Clone the Slips repo
-2. In your clone, checkout origin/develop: ```git checkout origin develop```
-3. Install slips pre-commit hooks ```pre-commit install```
-4. Generate a baseline for detecting secrets before they're committed ```detect-secrets scan --exclude-files ".*dataset/.*|(?x)(^config/local_ti_files/own_malicious_JA3.csv$|.*test.*|.*\.md$)" > .secrets.baseline```
-3. Create your own branch off develop using your name and the feature name: ```git checkout -b _ develop```
-4. Change the code, add the feature or fix the bug, etc. then commit with a descriptive msg ```git commit -m "descriptive msg here" ```
-5. Test your code: this is a very important step. you shouldn't open a PR with code that is not working: ```./tests/run_all_tests.sh```
-6. If some tests didn't pass, it means you need to fix something in your branch.
-7. Push to your own repo: ```git push -u origin _```
-8. Open a PR in Slips, remember to set the base branch as develop.
-9. List your changes in the PR description
-
-Some IDEs like [PyCharm](https://www.jetbrains.com/help/pycharm/work-with-github-pull-requests.html) and [vscode](https://levelup.gitconnected.com/how-to-create-a-pull-request-on-github-using-vs-code-f03db28308c4) have the option
-to open a PR from within the IDE.
+1. Fork the Slips repo
+2. Clone the forked repo
+3. In your clone, checkout origin/develop: ```git checkout origin develop```
+4. Install slips pre-commit hooks ```pre-commit install```
+5. Generate a baseline for detecting secrets before they're committed ```detect-secrets scan --exclude-files ".*dataset/.*|(?x)(^config/local_ti_files/own_malicious_JA3.csv$|.*test.*|.*\.md$)" > .secrets.baseline```
+6. Create your own branch off develop using your name and the feature name: ```git checkout -b _ develop```
+7. Change the code, add the feature or fix the bug, etc. then commit with a descriptive msg ```git commit -m "descriptive msg here" ```
+8. Test your code: this is a very important step. you shouldn't open a PR with code that is not working: ```./tests/run_all_tests.sh```
+9. If some tests didn't pass, it means you need to fix something in your branch.
+10. Push to your own repo: ```git push -u origin _```
+11. Open a PR in Slips, remember to set the base branch as develop.
+12. List your changes in the PR description
+
+
+## Rejected PRs
+
+We will not review PRs that have the following:
+
+* Code that's not tested. a screenshot of the passed tests is required for each PR.
+* PRs without steps to reproduce your proposed changes.
+* Asking for a step by step guide on how to solve the problem. It is ok to ask us clarifications after putting some effort into reading the code and the docs. but asking how exactly should i do X shows that you didn't look at the code
+
+
+Some IDEs like [PyCharm](https://www.jetbrains.com/help/pycharm/work-with-github-pull-requests.html) and [vscode](https://levelup.gitconnected.com/how-to-create-a-pull-request-on-github-using-vs-code-f03db28308c4) have the option
+to open a PR from within the IDE.
That's it, now you have a ready-to-merge PR!
***
-[1] These contributions guidelines are inspired by the project [Snoopy](https://raw.githubusercontent.com/a2o/snoopy/master/.github/CONTRIBUTING.md)
\ No newline at end of file
+[1] These contributions guidelines are inspired by the project [Snoopy](https://raw.githubusercontent.com/a2o/snoopy/master/.github/CONTRIBUTING.md)
diff --git a/docs/images/slips.gif b/docs/images/slips.gif
index f9b30afe3..962187fa5 100644
Binary files a/docs/images/slips.gif and b/docs/images/slips.gif differ
diff --git a/docs/installation.md b/docs/installation.md
index 5e9cd5392..e0833e349 100644
--- a/docs/installation.md
+++ b/docs/installation.md
@@ -1,6 +1,6 @@
# Installation
-There are two ways to install and run Slips: inside a Docker or in your own computer. We suggest to install and to run Slips inside a Docker since all dependencies are already installed in there. However, current version of docker with Slips does not allow to capture the traffic from the computer's interface. We will describe both ways of installation anyway.
+There are two ways to install and run Slips: inside a Docker or in your own computer. We suggest to install and to run Slips inside a Docker since all dependencies are already installed in there. However, current version of docker with Slips does not allow to capture the traffic from the computer's interface. We will describe both ways of installation anyway.
@@ -14,7 +14,7 @@ There are two ways to install and run Slips: inside a Docker or in your own comp
* On MacOS M1 host
* [Without P2P support](https://stratospherelinuxips.readthedocs.io/en/develop/installation.html#for-macos-m1)
* On MacOS Intel processor
- * [Without P2P support](https://stratospherelinuxips.readthedocs.io/en/develop/installation.html#for-macos-intel-processors)
+ * [Without P2P support](https://stratospherelinuxips.readthedocs.io/en/develop/installation.html#for-macos-intel-processors)
* [With P2P support](https://stratospherelinuxips.readthedocs.io/en/develop/installation.html#for-p2p-support-on-macos-intel)
* [Docker-compose](https://stratospherelinuxips.readthedocs.io/en/develop/installation.html#running-slips-using-docker-compose)
* [Dockerfile](https://stratospherelinuxips.readthedocs.io/en/develop/installation.html#building-slips-from-the-dockerfile)
@@ -25,6 +25,10 @@ There are two ways to install and run Slips: inside a Docker or in your own comp
+## Requirements
+
+Slips requires Python 3.8+ and at least 4 GBs of RAM to run smoothly.
+
## Slips in Docker
Slips can be run inside a Docker. Either using our docker image with from DockerHub (recommended)
@@ -45,29 +49,29 @@ For more advanced users, you can:
1. First, choose the correct image for your architecture
-#### For linux
+#### For linux
###### Analyse your own traffic
- - `docker run --rm -it -p 55000:55000 --net=host --cap-add=NET_ADMIN -v $(pwd)/output:/StratosphereLinuxIPS/output -v $(pwd)/dataset:/StratosphereLinuxIPS/dataset --name slips stratosphereips/slips:latest /StratosphereLinuxIPS/slips.py -i eno1`
- - Please change the name of the interface for your own.
+ - `docker run --rm -it -p 55000:55000 --cpu-shares "700" --memory="8g" --memory-swap="8g" --net=host --cap-add=NET_ADMIN -v $(pwd)/output:/StratosphereLinuxIPS/output -v $(pwd)/dataset:/StratosphereLinuxIPS/dataset --name slips stratosphereips/slips:latest /StratosphereLinuxIPS/slips.py -i eno1`
+ - Please change the name of the interface for your own.
- Check the alerts slips generated
- ```tail -f output/eno1*/alerts.log ```
-###### Analyze your PCAP file
+###### Analyze your PCAP file
- Prepare a dataset directory
- `mkdir dataset`
- `cp myfile.pcap dataset`
- Run Slips
- - `docker run --rm -it -p 55000:55000 --net=host --cap-add=NET_ADMIN -v $(pwd)/output:/StratosphereLinuxIPS/output -v $(pwd)/dataset:/StratosphereLinuxIPS/dataset --name slips stratosphereips/slips:latest /StratosphereLinuxIPS/slips.py -f dataset/myfile.pcap`
+ - `docker run --rm -it -p 55000:55000 --cpu-shares "700" --memory="8g" --memory-swap="8g" --net=host --cap-add=NET_ADMIN -v $(pwd)/output:/StratosphereLinuxIPS/output -v $(pwd)/dataset:/StratosphereLinuxIPS/dataset --name slips stratosphereips/slips:latest /StratosphereLinuxIPS/slips.py -f dataset/myfile.pcap`
- Check the alerts slips generated
- ```tail -f output/myfile*/alerts.log ```
#### For MacOS M1
-###### Analyse your own traffic
- - `docker run --rm -it -p 55000:55000 --net=host --cap-add=NET_ADMIN -v $(pwd)/output:/StratosphereLinuxIPS/output -v $(pwd)/dataset:/StratosphereLinuxIPS/dataset --name slips stratosphereips/slips:latest /StratosphereLinuxIPS/slips.py -i eno1`
- - Please change the name of the interface for your own.
+###### Analyse your own traffic
+ - `docker run --rm -it -p 55000:55000 --cpu-shares "700" --memory="8g" --memory-swap="8g" --net=host --cap-add=NET_ADMIN -v $(pwd)/output:/StratosphereLinuxIPS/output -v $(pwd)/dataset:/StratosphereLinuxIPS/dataset --name slips stratosphereips/slips:latest /StratosphereLinuxIPS/slips.py -i eno1`
+ - Please change the name of the interface for your own.
- Check the alerts slips generated
- ```tail -f output/eno1*/alerts.log ```
@@ -78,36 +82,36 @@ Docker with P2P is not supported for MacOS M1.
#### For MacOS Intel processors
-###### Analyse your own traffic
- - `docker run --rm -it -p 55000:55000 --net=host --cap-add=NET_ADMIN -v $(pwd)/output:/StratosphereLinuxIPS/output -v $(pwd)/dataset:/StratosphereLinuxIPS/dataset --name slips stratosphereips/slips:latest /StratosphereLinuxIPS/slips.py -i eno1`
- - Please change the name of the interface for your own.
+###### Analyse your own traffic
+ - `docker run --rm -it -p 55000:55000 --cpu-shares "700" --memory="8g" --memory-swap="8g" --net=host --cap-add=NET_ADMIN -v $(pwd)/output:/StratosphereLinuxIPS/output -v $(pwd)/dataset:/StratosphereLinuxIPS/dataset --name slips stratosphereips/slips:latest /StratosphereLinuxIPS/slips.py -i eno1`
+ - Please change the name of the interface for your own.
- Check the alerts slips generated
- ```tail -f output/eno1*/alerts.log ```
-
-###### Analyze your PCAP file
+
+###### Analyze your PCAP file
- Prepare a dataset directory
- `mkdir dataset`
- `cp myfile.pcap dataset`
- Run Slips
- - `docker run --rm -it -p 55000:55000 --net=host --cap-add=NET_ADMIN -v $(pwd)/output:/StratosphereLinuxIPS/output -v $(pwd)/dataset:/StratosphereLinuxIPS/dataset --name slips stratosphereips/slips:latest /StratosphereLinuxIPS/slips.py -f dataset/myfile.pcap`
+ - `docker run --rm -it -p 55000:55000 --cpu-shares "700" --memory="8g" --memory-swap="8g" --net=host --cap-add=NET_ADMIN -v $(pwd)/output:/StratosphereLinuxIPS/output -v $(pwd)/dataset:/StratosphereLinuxIPS/dataset --name slips stratosphereips/slips:latest /StratosphereLinuxIPS/slips.py -f dataset/myfile.pcap`
- Check the alerts slips generated
- ```tail -f output/myfile*/alerts.log ```
-#### For P2P support on Linux
+#### For P2P support on Linux
###### To analyze your own traffic with p2p
- - `docker run --rm -it -p 55000:55000 --net=host --cap-add=NET_ADMIN -v $(pwd)/output:/StratosphereLinuxIPS/output -v $(pwd)/dataset:/StratosphereLinuxIPS/dataset --name slips stratosphereips/slips_p2p:latest /StratosphereLinuxIPS/slips.py -i eno1 -o output_dir `
- - Please change the name of the interface for your own.
+ - `docker run --rm -it -p 55000:55000 --cpu-shares "700" --memory="8g" --memory-swap="8g" --net=host --cap-add=NET_ADMIN -v $(pwd)/output:/StratosphereLinuxIPS/output -v $(pwd)/dataset:/StratosphereLinuxIPS/dataset --name slips stratosphereips/slips_p2p:latest /StratosphereLinuxIPS/slips.py -i eno1 -o output_dir `
+ - Please change the name of the interface for your own.
- Check evidence
```tail -f output_dir/alerts.log ```
#### For P2P support on MacOS Intel
-###### Analyze your own traffic
- - `docker run --rm -it -p 55000:55000 --net=host --cap-add=NET_ADMIN -v $(pwd)/output:/StratosphereLinuxIPS/output -v $(pwd)/dataset:/StratosphereLinuxIPS/dataset --name slips stratosphereips/slips_p2p:latest /StratosphereLinuxIPS/slips.py -i eno1 -o output_dir `
- - Please change the name of the interface for your own.
+###### Analyze your own traffic
+ - `docker run --rm -it -p 55000:55000 --cpu-shares "700" --memory="8g" --memory-swap="8g" --net=host --cap-add=NET_ADMIN -v $(pwd)/output:/StratosphereLinuxIPS/output -v $(pwd)/dataset:/StratosphereLinuxIPS/dataset --name slips stratosphereips/slips_p2p:latest /StratosphereLinuxIPS/slips.py -i eno1 -o output_dir `
+ - Please change the name of the interface for your own.
- Check evidence
```tail -f output_dir/alerts.log ```
@@ -141,11 +145,11 @@ If you were running slips directly from the docker without cloning the repo, you
2. Get into the docker with `docker exec -it slips /bin/bash`, and then modifying the configuration file in `config/slips.conf` to add the disabled modules
3. Run Slips from inside the docker
`./slips.py -i enp7s0`
-1. You can
- 1. Clone the Slips repo (clone the same version as the docker you are downloading),
+1. You can
+ 1. Clone the Slips repo (clone the same version as the docker you are downloading),
2. Modify your local `config/slips.conf`
3. Run the docker command above but by mounting the volume of the config.
- `docker run --rm -it -p 55000:55000 --net=host --cap-add=NET_ADMIN -v $(pwd)/config:/StratosphereLinuxIPS/config/ -v $(pwd)/output:/StratosphereLinuxIPS/output -v $(pwd)/dataset:/StratosphereLinuxIPS/dataset --name slips stratosphereips/slips:latest /StratosphereLinuxIPS/slips.py -i eno1`
+ `docker run --rm -it -p 55000:55000 --cpu-shares "700" --memory="8g" --memory-swap="8g" --net=host --cap-add=NET_ADMIN -v $(pwd)/config:/StratosphereLinuxIPS/config/ -v $(pwd)/output:/StratosphereLinuxIPS/output -v $(pwd)/dataset:/StratosphereLinuxIPS/dataset --name slips stratosphereips/slips:latest /StratosphereLinuxIPS/slips.py -i eno1`
---
### Run Slips sharing files between the host and the container
@@ -155,13 +159,13 @@ The following instructions will guide you on how to run a Slips docker container
```bash
# create a directory to load pcaps in your host computer
mkdir ~/dataset
-
+
# copy the pcap to analyze to the newly created folder
cp /myfile.pcap ~/dataset
-
+
# create a new Slips container mapping the folder in the host to a folder in the container
docker run -it --rm --net=host --name slips -v $(pwd)/dataset:/StratosphereLinuxIPS/dataset stratosphereips/slips:latest
-
+
# run Slips on the pcap file mapped to the container
./slips.py -f dataset/myfile.pcap
```
@@ -175,7 +179,7 @@ Change eno1 in the command below to your own interface
```bash
# run a new Slips container with the option to interact with the network stack of the host
docker run -it --rm --net=host --cap-add=NET_ADMIN --name slips stratosphereips/slips:latest
-
+
# run Slips on the host interface `eno1` with active blocking `-p`
./slips.py -i eno1 -p
```
@@ -186,7 +190,7 @@ Change eno1 in the command below to your own interface
Change enp1s0 to your current interface in docker/docker-compose.yml and start slips using
-
+
docker compose -f docker/docker-compose.yml up
Now everything inside your host's ```config``` and ```dataset``` directories is
@@ -215,27 +219,29 @@ First, you need to check which image is suitable for your architecture.
-Before building the docker locally from the Dockerfile, first you should clone Slips repo or download the code directly:
+Before building the docker locally from the Dockerfile, first you should clone Slips repo or download the code directly:
git clone https://github.com/stratosphereips/StratosphereLinuxIPS.git
-If you cloned Slips in '~/code/StratosphereLinuxIPS', then you can build the Docker image with:
+If you cloned Slips in '~/StratosphereLinuxIPS', then you can build the Docker image with:
**NOTE: replace ubuntu-image with the image that fits your archiecture**
+**NOTE: you have to be in the main Slips directory to build this. **
- cd ~/code/StratosphereLinuxIPS/docker/ubunutu-image
- docker build --no-cache -t slips -f Dockerfile .
- docker run -it --rm --net=host -v ~/code/StratosphereLinuxIPS/dataset:/StratosphereLinuxIPS/dataset slips
+
+ cd ~/StratosphereLinuxIPS
+ docker build --no-cache -t slips -f docker/ubunutu-image/Dockerfile .
+ docker run -it --rm --net=host slips
./slips.py -c config/slips.conf -f dataset/test3-mixed.binetflow
If you don't have Internet connection from inside your Docker image while building, you may have another set of networks defined in your Docker. For that try:
- docker build --network=host --no-cache -t slips -f Dockerfile .
-
+ docker build --network=host --no-cache -t slips -f docker/ubunutu-image/Dockerfile .
+
You can also put your own files in the /dataset/ folder and analyze them with Slips:
- cp some-pcap-file.pcap ~/code/StratosphereLinuxIPS/dataset
- docker run -it --rm --net=host -v ../dataset/:/StratosphereLinuxIPS/dataset slips
+ cp some-pcap-file.pcap ~/StratosphereLinuxIPS/dataset
+ docker run -it --rm --net=host -v ~/StratosphereLinuxIPS/dataset/:/StratosphereLinuxIPS/dataset slips
./slips.py -f dataset/some-pcap-file.pcap
@@ -249,13 +255,13 @@ To fix this you can disable all machine learning based modules when running Slip
## Installing Slips natively
-Slips is dependent on three major elements:
+Slips is dependent on three major elements:
Python 3.8
Zeek
Redis database 7.0.4
-To install these elements we will use APT package manager. After that, we will install python packages required for Slips to run and its modules to work. Also, Slips' interface Kalipso depend on Node.JS and several npm packages.
+To install these elements we will use APT package manager. After that, we will install python packages required for Slips to run and its modules to work. Also, Slips' interface Kalipso depend on Node.JS and several npm packages.
@@ -276,12 +282,12 @@ You can install it using install.sh
Update the repository of packages so you see the latest versions:
apt-get update
-
+
Install the required packages (-y to install without asking for approval):
apt-get -y install tshark iproute2 python3.8 python3-tzlocal net-tools python3-dev build-essential python3-certifi curl git gnupg ca-certificates redis wget python3-minimal python3-redis python3-pip python3-watchdog nodejs redis-server npm lsof file iptables nfdump zeek whois yara
apt install -y --no-install-recommends nodejs
-
+
Even though we just installed pip3, the package installer for Python (3.8), we need to upgrade it to its latest version:
python3 -m pip install --upgrade pip
@@ -311,7 +317,7 @@ Finally, we will update the package manager repositories and install zeek
apt-get update
apt-get -y install zeek
-
+
To make sure that zeek can be found in the system we will add its link to a known path:
ln -s /opt/zeek/bin/zeek /usr/local/bin
@@ -319,16 +325,16 @@ To make sure that zeek can be found in the system we will add its link to a know
#### Running Slips for the First Time
-Be aware that the first time you run Slips it will start updating
+Be aware that the first time you run Slips it will start updating
all the databases and threat intelligence files in the background.
-However, it will give you as many detections as possible _while_ updating.
+However, it will give you as many detections as possible _while_ updating.
You may have more detections if you rerun Slips after the updates.
-Slips behaves like this, so you don't have to wait for the updates to
+Slips behaves like this, so you don't have to wait for the updates to
finish to have some detections. however, you can change that in the config file by setting ```wait_for_TI_to_finish``` to yes.
-Depending on the remote sites, downloading and updating the DB may take up to 4 minutes.
-Slips stores this information in a cache Redis database,
+Depending on the remote sites, downloading and updating the DB may take up to 4 minutes.
+Slips stores this information in a cache Redis database,
which is kept in memory when Slips stops. Next time Slips runs, it will read from this database.
The information in the DB is updated periodically according to the configuration file (usually one day).
@@ -353,8 +359,8 @@ then choosing 1.
## Installing Slips on a Raspberry PI
-Slips on RPI is currently in beta and is actively under development.
-While it is functional, please be aware that there may be occasional bugs or changes in functionality as we work to
+Slips on RPI is currently in beta and is actively under development.
+While it is functional, please be aware that there may be occasional bugs or changes in functionality as we work to
improve and refine this feature. Your feedback and contributions are highly valuable during this stage!
@@ -368,4 +374,3 @@ Packages for Raspbian 11:
Packages for Raspbian 10:
[https://download.opensuse.org/repositories/security:/zeek/Raspbian_10/armhf/zeek_4.2.1-0_armhf.deb](https://download.opensuse.org/repositories/security:/zeek/Raspbian_10/armhf/zeek_4.2.1-0_armhf.deb)
-
diff --git a/install/requirements.txt b/install/requirements.txt
index 1b4b96dbd..39e7e65a5 100644
--- a/install/requirements.txt
+++ b/install/requirements.txt
@@ -45,3 +45,4 @@ aid_hash
black
ruff
pre-commit
+coverage
diff --git a/managers/metadata_manager.py b/managers/metadata_manager.py
index 36e549555..329668dd0 100644
--- a/managers/metadata_manager.py
+++ b/managers/metadata_manager.py
@@ -66,7 +66,8 @@ def store_host_ip(self):
def add_metadata(self):
"""
- Create a metadata dir output/metadata/ that has a copy of slips.conf, whitelist.conf, current commit and date
+ Create a metadata dir output/metadata/ that has a copy of
+ slips.conf, whitelist.conf, current commit and date
"""
if not self.enable_metadata:
return
@@ -90,12 +91,14 @@ def add_metadata(self):
now = utils.convert_format(now, utils.alerts_format)
self.info_path = os.path.join(metadata_dir, "info.txt")
+ cmd = " ".join(sys.argv)
with open(self.info_path, "w") as f:
f.write(
f"Slips version: {self.main.version}\n"
f"File: {self.main.input_information}\n"
f"Branch: {self.main.db.get_branch()}\n"
f"Commit: {self.main.db.get_commit()}\n"
+ f"Command: {cmd}\n"
f"Slips start date: {now}\n"
)
diff --git a/modules/exporting_alerts/exporting_alerts.py b/modules/exporting_alerts/exporting_alerts.py
index 90f47e1f0..96fd33f4c 100644
--- a/modules/exporting_alerts/exporting_alerts.py
+++ b/modules/exporting_alerts/exporting_alerts.py
@@ -1,16 +1,9 @@
-from slips_files.common.parsers.config_parser import ConfigParser
+import json
+
+from modules.exporting_alerts.slack_exporter import SlackExporter
+from modules.exporting_alerts.stix_exporter import StixExporter
from slips_files.common.slips_utils import utils
from slips_files.common.abstracts.module import IModule
-from slack import WebClient
-from slack.errors import SlackApiError
-import os
-import json
-from stix2 import Indicator, Bundle
-from cabby import create_client
-import time
-import threading
-import sys
-import datetime
class ExportingAlerts(IModule):
@@ -25,349 +18,63 @@ class ExportingAlerts(IModule):
authors = ["Alya Gomaa"]
def init(self):
- self.port = None
+ self.slack = SlackExporter(self.logger, self.db)
+ self.stix = StixExporter(self.logger, self.db)
self.c1 = self.db.subscribe("export_evidence")
self.channels = {"export_evidence": self.c1}
- self.read_configuration()
- # This bundle should be created once and we should
- # append all indicators to it
- self.is_bundle_created = False
- # To avoid duplicates in STIX_data.json
- self.added_ips = set()
- self.is_running_on_interface = (
- "-i" in sys.argv or self.db.is_growing_zeek_dir()
- )
- self.export_to_taxii_thread = threading.Thread(
- target=self.schedule_sending_to_taxii_server, daemon=True
- )
-
- def read_configuration(self):
- """Read the configuration file for what we need"""
- conf = ConfigParser()
- # Available options ['slack','stix']
- self.export_to = conf.export_to()
-
- if "slack" in self.export_to:
- self.slack_token_filepath = conf.slack_token_filepath()
- self.slack_channel_name = conf.slack_channel_name()
- self.sensor_name = conf.sensor_name()
-
- if self.should_export_to_stix():
- self.TAXII_server = conf.taxii_server()
- self.port = conf.taxii_port()
- self.use_https = conf.use_https()
- self.discovery_path = conf.discovery_path()
- self.inbox_path = conf.inbox_path()
- # push_delay is only used when slips is running using -i
- self.push_delay = conf.push_delay()
- self.collection_name = conf.collection_name()
- self.taxii_username = conf.taxii_username()
- self.taxii_password = conf.taxii_password()
- self.jwt_auth_path = conf.jwt_auth_path()
- # push delay exists -> create thread that waits
- # push delay doesnt exist -> running using file not interface
- # -> only push to taxii server once before
- # stopping
-
- def get_slack_token(self):
- if not hasattr(self, "slack_token_filepath"):
- return False
-
- try:
- with open(self.slack_token_filepath, "r") as f:
- self.BOT_TOKEN = f.read()
- if len(self.BOT_TOKEN) < 5:
- del self.BOT_TOKEN
- raise NameError
- except (FileNotFoundError, NameError):
- return False
-
- def ip_exists_in_stix_file(self, ip):
- """Searches for ip in STIX_data.json to avoid exporting duplicates"""
- return ip in self.added_ips
-
- def send_to_slack(self, msg_to_send: str) -> bool:
- # Msgs sent in this channel will be exported to slack
- # Token to login to your slack bot. it should be
- # set in slack_bot_token_secret
- if self.BOT_TOKEN == "":
- # The file is empty
- self.print(
- f"Can't find SLACK_BOT_TOKEN "
- f"in {self.slack_token_filepath}.",
- 0,
- 2,
- )
- return False
-
- slack_client = WebClient(token=self.BOT_TOKEN)
- try:
- slack_client.chat_postMessage(
- # Channel name is set in slips.conf
- channel=self.slack_channel_name,
- # Sensor name is set in slips.conf
- text=f"{self.sensor_name}: {msg_to_send}",
- )
- return True
-
- except SlackApiError as e:
- # You will get a SlackApiError if "ok" is False
- assert e.response[
- "error"
- ], "Problem while exporting to slack." # str like
- # 'invalid_auth', 'channel_not_found'
- return False
-
- def push_to_taxii_server(self):
- """
- Use Inbox Service (TAXII Service to Support Producer-initiated
- pushes of cyber threat information) to publish
- our STIX_data.json file
- """
- # Create a cabby client
- client = create_client(
- self.TAXII_server,
- use_https=self.use_https,
- port=self.port,
- discovery_path=self.discovery_path,
- )
- # jwt_auth_url is optional
- if self.jwt_auth_path != "":
- client.set_auth(
- username=self.taxii_username,
- password=self.taxii_password,
- # URL used to obtain JWT token
- jwt_auth_url=self.jwt_auth_path,
- )
- else:
- # User didn't provide jwt_auth_path in slips.conf
- client.set_auth(
- username=self.taxii_username,
- password=self.taxii_password,
- )
-
- # Check the available services to make sure inbox service is there
- services = client.discover_services()
- # Check if inbox is there
- for service in services:
- if "inbox" in service.type.lower():
- break
- else:
- # Comes here if it cant find inbox in services
- self.print(
- "Server doesn't have inbox available. "
- "Exporting STIX_data.json is cancelled.",
- 0,
- 2,
- )
- return False
-
- # Get the data that we want to send
- with open("STIX_data.json") as stix_file:
- stix_data = stix_file.read()
- # Make sure we don't push empty files
- if len(stix_data) > 0:
- binding = "urn:stix.mitre.org:json:2.1"
- # URI is the path to the inbox service we want to
- # use in the taxii server
- client.push(
- stix_data,
- binding,
- collection_names=[self.collection_name],
- uri=self.inbox_path,
- )
- self.print(
- f"Successfully exported to TAXII server: "
- f"{self.TAXII_server}.",
- 1,
- 0,
- )
- return True
-
- def export_to_stix(self, msg_to_send: tuple) -> bool:
- """
- Function to export evidence to a STIX_data.json file in the cwd.
- It keeps appending the given indicator to STIX_data.json until they're
- sent to the
- taxii server
- msg_to_send is a tuple: (evidence_type, attacker_direction,attacker,
- description)
- evidence_type: e.g PortScan, ThreatIntelligence etc
- attacker_direction: e.g dip sip dport sport
- attacker: ip or port OR ip:port:proto
- description: e.g 'New horizontal port scan detected to port 23.
- Not Estab TCP from IP: ip-address. Tot pkts sent all IPs: 9'
- """
- # self.print(f"Exporting STIX data to {self.TAXII_server} every
- # {self.push_delay} seconds.")
- # ---------------- set name attribute ----------------
- evidence_type, attacker_direction, attacker, description = (
- msg_to_send[0],
- msg_to_send[1],
- msg_to_send[2],
- msg_to_send[3],
- )
- # In case of ssh connection, evidence_type is set to
- # SSHSuccessful-by-ip (special case) , ip here is variable
- # So we change that to be able to access it in the below dict
- if "SSHSuccessful" in evidence_type:
- evidence_type = "SSHSuccessful"
- # This dict contains each type and the way we should describe
- # it in STIX name attribute
-
- # Get the right description to use in stix
- name = evidence_type
-
- # ---------------- set pattern attribute ----------------
- if "port" in attacker_direction:
- # attacker is a port probably coming from a
- # portscan we need the ip instead
- attacker = description[
- description.index("IP: ") + 4 : description.index(" Tot") - 1
- ]
- elif "tcp" in attacker:
- # for example 127.0.0.1:443:tcp
- # Get the ip
- attacker = attacker.split(":")[0]
- ioc_type = utils.detect_data_type(attacker)
- if ioc_type == "ip":
- pattern = f"[ip-addr:value = '{attacker}']"
- elif ioc_type == "domain":
- pattern = f"[domain-name:value = '{attacker}']"
- elif ioc_type == "url":
- pattern = f"[url:value = '{attacker}']"
- else:
- self.print(f"Can't set pattern for STIX. {attacker}", 0, 3)
- return False
- # Required Indicator Properties: type, spec_version, id, created,
- # modified , all are set automatically
- # Valid_from, created and modified attribute will
- # be set to the current time
- # ID will be generated randomly
- # ref https://docs.oasis-open.org/cti/stix/v2.1/os/stix-v2.1-os.html#_6khi84u7y58g
- indicator = Indicator(
- name=name, pattern=pattern, pattern_type="stix"
- ) # the pattern language that the indicator pattern is expressed in.
- # Create and Populate Bundle.
- # All our indicators will be inside bundle['objects'].
- bundle = Bundle()
- if not self.is_bundle_created:
- bundle = Bundle(indicator)
- # Clear everything in the existing STIX_data.json
- # if it's not empty
- open("STIX_data.json", "w").close()
- # Write the bundle.
- with open("STIX_data.json", "w") as stix_file:
- stix_file.write(str(bundle))
- self.is_bundle_created = True
- elif not self.ip_exists_in_stix_file(attacker):
- # Bundle is already created just append to it
- # r+ to delete last 4 chars
- with open("STIX_data.json", "r+") as stix_file:
- # delete the last 4 characters in the file ']\n}\n' so we
- # can append to the objects array and add them back later
- stix_file.seek(0, os.SEEK_END)
- stix_file.seek(stix_file.tell() - 4, 0)
- stix_file.truncate()
-
- # Append mode to add the new indicator to the objects array
- with open("STIX_data.json", "a") as stix_file:
- # Append the indicator in the objects array
- stix_file.write(f",{str(indicator)}" + "]\n}\n")
-
- # Set of unique ips added to stix_data.json to avoid duplicates
- self.added_ips.add(attacker)
- self.print("Indicator added to STIX_data.json", 2, 0)
- return True
-
- def schedule_sending_to_taxii_server(self):
- """
- Responsible for publishing STIX_data.json to the taxii server every
- self.push_delay seconds when running on an interface only
- """
- while True:
- # on an interface, we use the push delay from slips.conf
- # on files, we push once when slips is stopping
- time.sleep(self.push_delay)
- # Sometimes the time's up and we need to send to
- # server again but there's no
- # new alerts in stix_data.json yet
- if os.path.exists("STIX_data.json"):
- self.push_to_taxii_server()
- # Delete stix_data.json file so we don't send duplicates
- os.remove("STIX_data.json")
- self.is_bundle_created = False
- else:
- self.print(
- f"{self.push_delay} seconds passed, "
- f"no new alerts in STIX_data.json.",
- 2,
- 0,
- )
-
- def should_export_to_stix(self) -> bool:
- return "stix" in self.export_to
-
- def should_export_to_slack(self) -> bool:
- return "slack" in self.export_to
def shutdown_gracefully(self):
- # We need to publish to taxii server before stopping
- if self.should_export_to_stix():
- self.push_to_taxii_server()
-
- if self.should_export_to_slack() and hasattr(self, "BOT_TOKEN"):
- date_time = datetime.datetime.now()
- date_time = utils.convert_format(date_time, utils.alerts_format)
- self.send_to_slack(
- f"{date_time}: Slips finished on sensor: {self.sensor_name}."
- )
+ self.slack.shutdown_gracefully()
+ self.stix.shutdown_gracefully()
def pre_main(self):
utils.drop_root_privs()
- if self.should_export_to_slack():
- if not self.get_slack_token():
- self.print(
- f"Please add slack bot token to "
- f"{self.slack_token_filepath}. Exporting to Slack "
- f"aborted..",
- 0,
- 1,
- )
- if hasattr(self, "BOT_TOKEN"):
- date_time = datetime.datetime.now()
- date_time = utils.convert_format(
- date_time, utils.alerts_format
- )
- self.send_to_slack(
- f"{date_time}: Slips started on sensor:"
- f" {self.sensor_name}."
- )
+ export_to_slack = self.slack.should_export()
+ export_to_stix = self.stix.should_export()
- if self.is_running_on_interface and self.should_export_to_stix():
+ if export_to_slack:
+ self.slack.send_init_msg()
+
+ if export_to_stix:
# This thread is responsible for waiting n seconds before
# each push to the stix server
# it starts the timer when the first alert happens
- self.export_to_taxii_thread.start()
+ self.stix.start_exporting_thread()
+
+ if not export_to_slack or export_to_stix:
+ return 1
+
+ def remove_sensitive_info(self, evidence: dict) -> str:
+ """
+ removes the leaked location co-ords from the evidence
+ description before exporting
+ returns the description without sensitive info
+ """
+ if "NETWORK_GPS_LOCATION_LEAKED" not in evidence["evidence_type"]:
+ return evidence["description"]
+
+ description = evidence["description"]
+ return description[: description.index("Leaked location")]
def main(self):
+ # a msg is sent here for each evidence that was part of an alert
if msg := self.get_msg("export_evidence"):
evidence = json.loads(msg["data"])
- description: str = evidence["description"]
-
- if self.should_export_to_slack() and hasattr(self, "BOT_TOKEN"):
+ description = self.remove_sensitive_info(evidence)
+ if self.slack.should_export():
srcip = evidence["profile"]["ip"]
msg_to_send = f"Src IP {srcip} Detected {description}"
- self.send_to_slack(msg_to_send)
+ self.slack.export(msg_to_send)
- if self.should_export_to_stix():
+ if self.stix.should_export():
msg_to_send = (
evidence["evidence_type"],
- evidence["attacker"]["direction"],
evidence["attacker"]["value"],
- description,
)
- exported_to_stix = self.export_to_stix(msg_to_send)
- if not exported_to_stix:
- self.print("Problem in export_to_STIX()", 0, 3)
+ added_to_stix: bool = self.stix.add_to_stix_file(msg_to_send)
+ if added_to_stix:
+ # now export to taxii
+ self.stix.export()
+ else:
+ self.print("Problem in add_to_stix_file()", 0, 3)
diff --git a/modules/exporting_alerts/slack_exporter.py b/modules/exporting_alerts/slack_exporter.py
new file mode 100644
index 000000000..e571c7549
--- /dev/null
+++ b/modules/exporting_alerts/slack_exporter.py
@@ -0,0 +1,104 @@
+from slack import WebClient
+from slack.errors import SlackApiError
+import datetime
+
+from slips_files.common.slips_utils import utils
+from slips_files.common.abstracts.exporter import IExporter
+from slips_files.common.parsers.config_parser import ConfigParser
+
+
+class SlackExporter(IExporter):
+ def init(self):
+ self.configs_read: bool = self.read_configuration()
+ if self.should_export():
+ self.print("Exporting to Slack.")
+
+ @property
+ def name(self):
+ return "SlackExporter"
+
+ def read_configuration(self) -> bool:
+ """reurns true if all necessary configs are present and read"""
+ conf = ConfigParser()
+
+ # Available options ['slack','stix']
+ self.export_to = conf.export_to()
+ if "slack" not in self.export_to:
+ return False
+
+ slack_token_filepath = conf.slack_token_filepath()
+ try:
+ self.token: str = self.read_slack_token(slack_token_filepath)
+ except (FileNotFoundError, NameError):
+ self.print(
+ f"Please add slack bot token to "
+ f"{slack_token_filepath}. Exporting to Slack "
+ f"aborted..",
+ 0,
+ 1,
+ )
+ return False
+
+ self.slack_channel_name = conf.slack_channel_name()
+ self.sensor_name = conf.sensor_name()
+ return True
+
+ def get_human_readable_datetime(self) -> str:
+ date_time = datetime.datetime.now()
+ return utils.convert_format(date_time, utils.alerts_format)
+
+ def send_init_msg(self):
+ self.export(
+ f"{self.get_human_readable_datetime()}: "
+ f"Slips started on sensor: {self.sensor_name}."
+ )
+
+ def send_stop_msg(self):
+ self.export(
+ f"{self.get_human_readable_datetime()}: "
+ f"Slips stopped on sensor: {self.sensor_name}."
+ )
+
+ def read_slack_token(self, filepath) -> str:
+ """
+ reads slack_token_filepath.
+ returns the token as a str
+ """
+ with open(filepath) as f:
+ token = f.read()
+
+ if len(token) < 5:
+ self.print(f"invalid slack bot token in {filepath}.", 0, 2)
+ raise NameError
+ return token
+
+ def export(self, msg_to_send: str) -> bool:
+ """exports evidence/alerts to Slack"""
+ slack_client = WebClient(token=self.token)
+ try:
+ slack_client.chat_postMessage(
+ # Channel name is set in slips.conf
+ channel=self.slack_channel_name,
+ # Sensor name is set in slips.conf
+ text=f"{self.sensor_name}: {msg_to_send}",
+ )
+ return True
+
+ except SlackApiError as e:
+ # You will get a SlackApiError if "ok" is False
+ assert e.response[
+ "error"
+ ], "Problem while exporting to slack." # str like
+ # 'invalid_auth', 'channel_not_found'
+ return False
+
+ def shutdown_gracefully(self):
+ """Exits gracefully"""
+ if not self.should_export():
+ return
+ self.print("Done exporting to Slack.")
+ self.send_stop_msg()
+
+ def should_export(self) -> bool:
+ """Determines whether to export or not"""
+ return self.configs_read
diff --git a/modules/exporting_alerts/stix_exporter.py b/modules/exporting_alerts/stix_exporter.py
new file mode 100644
index 000000000..0c0a24990
--- /dev/null
+++ b/modules/exporting_alerts/stix_exporter.py
@@ -0,0 +1,261 @@
+from stix2 import Indicator, Bundle
+from cabby import create_client
+import time
+import threading
+import sys
+import os
+
+from slips_files.common.abstracts.exporter import IExporter
+from slips_files.common.parsers.config_parser import ConfigParser
+from slips_files.common.slips_utils import utils
+
+
+class StixExporter(IExporter):
+ def init(self):
+ self.port = None
+ self.is_running_on_interface = (
+ "-i" in sys.argv or self.db.is_growing_zeek_dir()
+ )
+ self.stix_filename = "STIX_data.json"
+ self.configs_read: bool = self.read_configuration()
+ if self.should_export():
+ self.print(
+ f"Exporting to Stix & TAXII very "
+ f"{self.push_delay} seconds."
+ )
+ # This bundle should be created once and we should
+ # append all indicators to it
+ self.is_bundle_created = False
+ # To avoid duplicates in STIX_data.json
+ self.added_ips = set()
+ self.export_to_taxii_thread = threading.Thread(
+ target=self.schedule_sending_to_taxii_server, daemon=True
+ )
+
+ def start_exporting_thread(self):
+ # This thread is responsible for waiting n seconds before
+ # each push to the stix server
+ # it starts the timer when the first alert happens
+ self.export_to_taxii_thread.start()
+
+ @property
+ def name(self):
+ return "StixExporter"
+
+ def create_client(self):
+ client = create_client(
+ self.TAXII_server,
+ use_https=self.use_https,
+ port=self.port,
+ discovery_path=self.discovery_path,
+ )
+
+ if self.jwt_auth_path != "":
+ client.set_auth(
+ username=self.taxii_username,
+ password=self.taxii_password,
+ # URL used to obtain JWT token
+ jwt_auth_url=self.jwt_auth_path,
+ )
+ else:
+ # User didn't provide jwt_auth_path in slips.conf
+ client.set_auth(
+ username=self.taxii_username,
+ password=self.taxii_password,
+ )
+ return client
+
+ def inbox_service_exists_in_taxii_server(self, services):
+ """
+ Checks if inbox service is available in the taxii server
+ """
+ for service in services:
+ if "inbox" in service.type.lower():
+ return True
+
+ self.print(
+ "Server doesn't have inbox available. "
+ "Exporting STIX_data.json is cancelled.",
+ 0,
+ 2,
+ )
+ return False
+
+ def read_stix_file(self) -> str:
+ with open(self.stix_filename) as stix_file:
+ stix_data = stix_file.read()
+ return stix_data
+
+ def export(self) -> bool:
+ """
+ Exports evidence/alerts to the TAXII server
+ Uses Inbox Service (TAXII Service to Support Producer-initiated
+ pushes of cyber threat information) to publish
+ our STIX_data.json file
+ """
+ if not self.should_export:
+ return False
+
+ client = self.create_client()
+
+ # Check the available services to make sure inbox service is there
+ services = client.discover_services()
+ if not self.inbox_service_exists_in_taxii_server(services):
+ return False
+
+ stix_data: str = self.read_stix_file()
+
+ # Make sure we don't push empty files
+ if len(stix_data) == 0:
+ return False
+
+ binding = "urn:stix.mitre.org:json:2.1"
+ # URI is the path to the inbox service we want to
+ # use in the taxii server
+ client.push(
+ stix_data,
+ binding,
+ collection_names=[self.collection_name],
+ uri=self.inbox_path,
+ )
+ self.print(
+ f"Successfully exported to TAXII server: " f"{self.TAXII_server}.",
+ 1,
+ 0,
+ )
+ return True
+
+ def shutdown_gracefully(self):
+ """Exits gracefully"""
+ # We need to publish to taxii server before stopping
+ if self.should_export():
+ self.export()
+
+ def should_export(self) -> bool:
+ """Determines whether to export or not"""
+ return self.is_running_on_interface and "stix" in self.export_to
+
+ def read_configuration(self) -> bool:
+ """Reads configuration"""
+ conf = ConfigParser()
+ # Available options ['slack','stix']
+ self.export_to = conf.export_to()
+
+ if "stix" not in self.export_to:
+ return False
+
+ self.TAXII_server = conf.taxii_server()
+ self.port = conf.taxii_port()
+ self.use_https = conf.use_https()
+ self.discovery_path = conf.discovery_path()
+ self.inbox_path = conf.inbox_path()
+ # push_delay is only used when slips is running using -i
+ self.push_delay = conf.push_delay()
+ self.collection_name = conf.collection_name()
+ self.taxii_username = conf.taxii_username()
+ self.taxii_password = conf.taxii_password()
+ self.jwt_auth_path = conf.jwt_auth_path()
+ # push delay exists -> create a thread that waits
+ # push delay doesn't exist -> running using file not interface
+ # -> only push to taxii server once before
+ # stopping
+ return True
+
+ def ip_exists_in_stix_file(self, ip):
+ """Searches for ip in STIX_data.json to avoid exporting duplicates"""
+ return ip in self.added_ips
+
+ def get_ioc_pattern(self, ioc_type: str, attacker) -> str:
+ patterns_map = {
+ "ip": f"[ip-addr:value = '{attacker}']",
+ "domain": f"[domain-name:value = '{attacker}']",
+ "url": f"[url:value = '{attacker}']",
+ }
+ if ioc_type not in ioc_type:
+ self.print(f"Can't set pattern for STIX. {attacker}", 0, 3)
+ return False
+ return patterns_map[ioc_type]
+
+ def add_to_stix_file(self, to_add: tuple) -> bool:
+ """
+ Function to export evidence to a STIX_data.json file in the cwd.
+ It keeps appending the given indicator to STIX_data.json until they're
+ sent to the
+ taxii server
+ msg_to_send is a tuple: (evidence_type,attacker)
+ evidence_type: e.g PortScan, ThreatIntelligence etc
+ attacker: ip of the attcker
+ """
+ evidence_type, attacker = (
+ to_add[0],
+ to_add[1],
+ )
+ # Get the right description to use in stix
+ name = evidence_type
+ ioc_type = utils.detect_data_type(attacker)
+ pattern: str = self.get_ioc_pattern(ioc_type, attacker)
+ # Required Indicator Properties: type, spec_version, id, created,
+ # modified , all are set automatically
+ # Valid_from, created and modified attribute will
+ # be set to the current time
+ # ID will be generated randomly
+ # ref https://docs.oasis-open.org/cti/stix/v2.1/os/stix-v2.1-os.html#_6khi84u7y58g
+ indicator = Indicator(
+ name=name, pattern=pattern, pattern_type="stix"
+ ) # the pattern language that the indicator pattern is expressed in.
+ # Create and Populate Bundle.
+ # All our indicators will be inside bundle['objects'].
+ bundle = Bundle()
+ if not self.is_bundle_created:
+ bundle = Bundle(indicator)
+ # Clear everything in the existing STIX_data.json
+ # if it's not empty
+ open(self.stix_filename, "w").close()
+ # Write the bundle.
+ with open(self.stix_filename, "w") as stix_file:
+ stix_file.write(str(bundle))
+ self.is_bundle_created = True
+ elif not self.ip_exists_in_stix_file(attacker):
+ # Bundle is already created just append to it
+ # r+ to delete last 4 chars
+ with open(self.stix_filename, "r+") as stix_file:
+ # delete the last 4 characters in the file ']\n}\n' so we
+ # can append to the objects array and add them back later
+ stix_file.seek(0, os.SEEK_END)
+ stix_file.seek(stix_file.tell() - 4, 0)
+ stix_file.truncate()
+
+ # Append mode to add the new indicator to the objects array
+ with open(self.stix_filename, "a") as stix_file:
+ # Append the indicator in the objects array
+ stix_file.write(f",{str(indicator)}" + "]\n}\n")
+
+ # Set of unique ips added to stix_data.json to avoid duplicates
+ self.added_ips.add(attacker)
+ self.print("Indicator added to STIX_data.json", 2, 0)
+ return True
+
+ def schedule_sending_to_taxii_server(self):
+ """
+ Responsible for publishing STIX_data.json to the taxii server every
+ self.push_delay seconds when running on an interface only
+ """
+ while True:
+ # on an interface, we use the push delay from slips.conf
+ # on files, we push once when slips is stopping
+ time.sleep(self.push_delay)
+ # Sometimes the time's up and we need to send to
+ # server again but there's no
+ # new alerts in stix_data.json yet
+ if os.path.exists(self.stix_filename):
+ self.export()
+ # Delete stix_data.json file so we don't send duplicates
+ os.remove(self.stix_filename)
+ self.is_bundle_created = False
+ else:
+ self.print(
+ f"{self.push_delay} seconds passed, "
+ f"no new alerts in STIX_data.json.",
+ 2,
+ 0,
+ )
diff --git a/modules/flowalerts/flowalerts.py b/modules/flowalerts/flowalerts.py
index 611655199..08ef387b5 100644
--- a/modules/flowalerts/flowalerts.py
+++ b/modules/flowalerts/flowalerts.py
@@ -1,6 +1,5 @@
import contextlib
-from slips_files.common.imports import *
-
+import multiprocessing
import json
import threading
import ipaddress
@@ -11,11 +10,13 @@
import math
import time
-from slips_files.common.imports import *
+
+from slips_files.common.parsers.config_parser import ConfigParser
+from slips_files.common.slips_utils import utils
+from slips_files.common.abstracts.module import IModule
from .timer_thread import TimerThread
from .set_evidence import SetEvidnceHelper
from slips_files.core.helpers.whitelist import Whitelist
-from slips_files.common.slips_utils import utils
from typing import List, Tuple, Dict
@@ -180,7 +181,7 @@ def check_long_connection(
# Do not check the duration of the flow
return
- if type(dur) == str:
+ if isinstance(dur, str):
dur = float(dur)
# If duration is above threshold, we should set an evidence
@@ -571,13 +572,13 @@ def is_well_known_org(self, ip):
ip_data = self.db.get_ip_info(ip)
try:
SNI = ip_data["SNI"]
- if type(SNI) == list:
+ if isinstance(SNI, list):
# SNI is a list of dicts, each dict contains the
# 'server_name' and 'port'
SNI = SNI[0]
if SNI in (None, ""):
SNI = False
- elif type(SNI) == dict:
+ elif isinstance(SNI, dict):
SNI = SNI.get("server_name", False)
except (KeyError, TypeError):
# No SNI data for this ip
@@ -973,7 +974,7 @@ def check_successful_ssh(
uid, timestamp, profileid, twid, auth_success
)
- def detect_incompatible_CN(
+ def detect_incompatible_cn(
self, daddr, server_name, issuer, profileid, twid, uid, timestamp
):
"""
@@ -992,11 +993,11 @@ def detect_incompatible_CN(
# to use it to set evidence later
found_org_in_cn = org
- # check that the domain belongs to that same org
+ # check that the ip belongs to that same org
if self.whitelist.is_ip_in_org(daddr, org):
return False
- # check that the ip belongs to that same org
+ # check that the domain belongs to that same org
if server_name and self.whitelist.is_domain_in_org(
server_name, org
):
@@ -1975,7 +1976,7 @@ def main(self):
saddr, daddr, ja3, ja3s, twid, uid, timestamp
)
- self.detect_incompatible_CN(
+ self.detect_incompatible_cn(
daddr, server_name, issuer, profileid, twid, uid, timestamp
)
diff --git a/modules/network_discovery/horizontal_portscan.py b/modules/network_discovery/horizontal_portscan.py
index 3bafb42f3..8cfe0520a 100644
--- a/modules/network_discovery/horizontal_portscan.py
+++ b/modules/network_discovery/horizontal_portscan.py
@@ -1,5 +1,7 @@
import ipaddress
+import validators
+
from slips_files.common.slips_utils import utils
from slips_files.core.evidence_structure.evidence import (
Evidence,
@@ -19,61 +21,16 @@
class HorizontalPortscan:
def __init__(self, db):
self.db = db
- # We need to know that after a detection, if we receive another flow
- # that does not modify the count for the detection, we are not
- # re-detecting again only because the threshold was overcomed last time.
- self.cached_tw_thresholds = {}
- # the separator used to separate the IP and the word profile
- self.fieldseparator = self.db.get_field_separator()
-
- # The minimum amount of ips to scan horizontal scan
- self.port_scan_minimum_dips = 5
- self.pending_horizontal_ps_evidence = {}
- # we should alert once we find 1 horizontal ps evidence then combine the rest of evidence every x seconds
- # format is { scanned_port: True/False , ...}
- self.alerted_once_horizontal_ps = {}
-
- def combine_evidence(self):
- """
- Combines all the evidence in pending_horizontal_ps_evidence into 1 evidence and calls set_evidence
- this function is called every 3 pending ev
- """
- for key, evidence_list in self.pending_horizontal_ps_evidence.items():
- # each key here is {profileid}-{twid}-{state}-{protocol}-{dport}
- # each value here is a list of evidence that should be combined
- profileid, twid, state, protocol, dport = key.split("-")
- final_evidence_uids = []
- final_pkts_sent = 0
- # combine all evidence that share the above key
- for evidence in evidence_list:
- # each evidence is a tuple of (timestamp, pkts_sent, uids, amount_of_dips)
- # in the final evidence, we'll be using the ts of the last evidence
- timestamp, pkts_sent, evidence_uids, amount_of_dips = evidence
- # since we're combining evidence, we want the uids of the final evidence
- # to be the sum of all the evidence we combined
- final_evidence_uids += evidence_uids
- final_pkts_sent += pkts_sent
-
- evidence = {
- "protocol": protocol,
- "profileid": profileid,
- "twid": twid,
- "uids": final_evidence_uids,
- "dport": dport,
- "pkts_sent": final_pkts_sent,
- "timestamp": timestamp,
- "state": state,
- "amount_of_dips": amount_of_dips,
- }
-
- self.set_evidence_horizontal_portscan(evidence)
- # reset the dict since we already combined the evidence
- self.pending_horizontal_ps_evidence = {}
+ # to keep track of the max dports reported per timewindow
+ self.cached_thresholds_per_tw = {}
+ # The minimum amount of scanned dstips to trigger an evidence
+ # is increased exponentially every evidence, and is reset each timewindow
+ self.minimum_dstips_to_set_evidence = 5
def get_resolved_ips(self, dstips: dict) -> list:
"""
- returns the list of dstips that have dns resolution, we will discard them when checking for
- horizontal portscans
+ returns the list of dstips that have dns resolution, we will
+ discard them when checking for horizontal portscans
"""
dstips_to_discard = []
# Remove dstips that have DNS resolution already
@@ -101,16 +58,19 @@ def get_not_estab_dst_ports(
totalflows: total flows seen by the profileid
totalpkt: total packets seen by the profileid
totalbytes: total bytes sent by the profileid
- stime: timestamp of the first flow seen from this profileid -> this dstip
+ stime: timestamp of the first flow seen from this
+ profileid -> this dstip
uid: list of uids where the given profileid was
contacting the dst_ip on this dstport
- dstports: dst ports seen in all flows where the given profileid was srcip
+ dstports: dst ports seen in all flows where the given
+ profileid was srcip
{
: < int spkts sent to this port>
}
}
"""
- # Get the list of dports that we connected as client using TCP not established
+ # Get the list of dports that we connected as client
+ # using TCP not established
direction = "Dst"
role = "Client"
type_data = "Ports"
@@ -119,10 +79,11 @@ def get_not_estab_dst_ports(
)
return dports
- def get_cache_key(self, profileid: str, twid: str, dport):
+ def get_twid_identifier(self, profileid: str, twid: str, dport) -> str:
if not dport:
return False
- return f"{profileid}:{twid}:dport:{dport}:HorizontalPortscan"
+
+ return f"{profileid}:{twid}:dport:{dport}"
def get_packets_sent(self, dstips: dict) -> int:
"""
@@ -147,28 +108,54 @@ def get_packets_sent(self, dstips: dict) -> int:
pkts_sent += int(dstips[dstip]["spkts"])
return pkts_sent
+ def are_dstips_greater_or_eq_minimum_dstips(self, dstips) -> bool:
+ return dstips >= self.minimum_dstips_to_set_evidence
+
+ @staticmethod
+ def are_ips_greater_or_eq_last_evidence(
+ dstips: int, ips_reported_last_evidence: int
+ ) -> bool:
+ """
+ Makes sure the amount of dports reported
+ each evidence is higher than the previous one +15
+ so the first alert will always report 5 dstips,
+ and then 20+,35+. etc
+
+ :param dstips: dstips to report in the current evidence
+ :param ips_reported_last_evidence: the amount of
+ ips reported in the last evidence in the current
+ evidence's timewindow
+ """
+ # the goal is to never get an evidence that's 1 or 2 ports
+ # more than the previous one so we dont have so many
+ # portscan evidence
+ if ips_reported_last_evidence == 0:
+ # first portscan evidence in this threshold, no past evidence
+ # to compare with
+ return True
+
+ return dstips >= ips_reported_last_evidence + 15
+
+ def should_set_evidence(self, dstips: int, twid_threshold: int) -> bool:
+ more_than_min = self.are_dstips_greater_or_eq_minimum_dstips(dstips)
+ exceeded_twid_threshold = self.are_ips_greater_or_eq_last_evidence(
+ dstips, twid_threshold
+ )
+ return more_than_min and exceeded_twid_threshold
+
def check_if_enough_dstips_to_trigger_an_evidence(
- self, cache_key: str, amount_of_dips: int
+ self, twid_identifier: str, amount_of_dips: int
) -> bool:
"""
checks if the scanned dst ips are enough to trigger and
evidence
- we make sure the amount of scammed dst ips reported each
- evidence
- is higher than the previous one +5
+ to make sure the amount of scanned dst ips reported each
+ evidence is higher than the previous one +15
"""
- prev_amount_dips = self.cached_tw_thresholds.get(cache_key, 0)
+ twid_threshold = self.cached_thresholds_per_tw.get(twid_identifier, 0)
- # so the first alert will always report 5 dstips,
- # and then 10+,15+,20+ etc
- # the goal is to never get an evidence that's 1 or 2 ports
- # more than the previous one so we dont
- # have so many portscan evidence
- if (
- amount_of_dips >= self.port_scan_minimum_dips
- and prev_amount_dips + 5 <= amount_of_dips
- ):
- self.cached_tw_thresholds[cache_key] = amount_of_dips
+ if self.should_set_evidence(amount_of_dips, twid_threshold):
+ self.cached_thresholds_per_tw[twid_identifier] = amount_of_dips
return True
return False
@@ -183,21 +170,76 @@ def get_uids(self, dstips: dict):
uids.append(uid)
return uids
- def check(self, profileid: str, twid: str):
- saddr = profileid.split(self.fieldseparator)[1]
- try:
+ def set_evidence_horizontal_portscan(self, evidence: dict):
+ threat_level = ThreatLevel.HIGH
+ confidence = utils.calculate_confidence(evidence["pkts_sent"])
+ srcip = evidence["profileid"].split("_")[-1]
+
+ attacker = Attacker(
+ direction=Direction.SRC, attacker_type=IoCType.IP, value=srcip
+ )
+ portproto = f'{evidence["dport"]}/{evidence["protocol"]}'
+ port_info = self.db.get_port_info(portproto) or ""
+ description = (
+ f"Horizontal port scan to port {port_info} {portproto}. "
+ f'From {srcip} to {evidence["amount_of_dips"]} '
+ f"unique destination IPs. "
+ f'Total packets sent: {evidence["pkts_sent"]}. '
+ f"Confidence: {confidence}. by Slips"
+ )
+
+ evidence = Evidence(
+ evidence_type=EvidenceType.HORIZONTAL_PORT_SCAN,
+ attacker=attacker,
+ threat_level=threat_level,
+ confidence=confidence,
+ description=description,
+ profile=ProfileID(ip=srcip),
+ timewindow=TimeWindow(
+ number=int(evidence["twid"].replace("timewindow", ""))
+ ),
+ uid=evidence["uids"],
+ timestamp=evidence["timestamp"],
+ category=IDEACategory.RECON_SCANNING,
+ conn_count=evidence["pkts_sent"],
+ proto=Proto(evidence["protocol"].lower()),
+ source_target_tag=Tag.RECON,
+ port=evidence["dport"],
+ )
+
+ self.db.set_evidence(evidence)
+
+ @staticmethod
+ def is_valid_saddr(profileid: str):
+ """
+ to avoid reporting port scans on the
+ broadcast or multicast addresses or invalid values
+ """
+ saddr = profileid.split("_")[1]
+ if saddr == "255.255.255.255":
+ return False
+
+ if validators.ipv4(saddr) or validators.ipv6(saddr):
saddr_obj = ipaddress.ip_address(saddr)
- if saddr == "255.255.255.255" or saddr_obj.is_multicast:
- # don't report port scans on the
- # broadcast or multicast addresses
- return False
- except ValueError:
- # it's a mac
- pass
+ return not saddr_obj.is_multicast
+
+ if validators.mac_address(saddr):
+ return True
+
+ return False
+
+ @staticmethod
+ def is_valid_twid(twid: str) -> bool:
+ return not (twid in ("", None) or "timewindow" not in twid)
+
+ def check(self, profileid: str, twid: str):
+ if not self.is_valid_saddr(profileid) or not self.is_valid_twid(twid):
+ return False
# if you're portscaning a port that is open it's gonna be established
# the amount of open ports we find is gonna be so small
- # theoretically this is incorrect bc we'll be ignoring established evidence,
+ # theoretically this is incorrect bc we'll be ignoring
+ # established evidence,
# but usually open ports are very few compared to the whole range
# so, practically this is correct to avoid FP
state = "Not Established"
@@ -215,13 +257,15 @@ def check(self, profileid: str, twid: str):
for ip in self.get_resolved_ips(dstips):
dstips.pop(ip)
- cache_key: str = self.get_cache_key(profileid, twid, dport)
- if not cache_key:
+ twid_identifier: str = self.get_twid_identifier(
+ profileid, twid, dport
+ )
+ if not twid_identifier:
continue
amount_of_dips = len(dstips)
if self.check_if_enough_dstips_to_trigger_an_evidence(
- cache_key, amount_of_dips
+ twid_identifier, amount_of_dips
):
evidence = {
"protocol": protocol,
@@ -235,90 +279,4 @@ def check(self, profileid: str, twid: str):
"amount_of_dips": amount_of_dips,
}
- self.decide_if_time_to_set_evidence_or_combine(
- evidence, cache_key
- )
-
- def decide_if_time_to_set_evidence_or_combine(
- self, evidence: dict, cache_key: str
- ) -> bool:
- """
- sets the evidence immediately if it was the
- first portscan evidence in this tw
- or combines past 3
- evidence and then sets an evidence.
- :return: True if evidence was set/combined,
- False if evidence was queued for combining later
- """
-
- if not self.alerted_once_horizontal_ps.get(cache_key, False):
- self.alerted_once_horizontal_ps[cache_key] = True
- self.set_evidence_horizontal_portscan(evidence)
- # from now on, we will be combining the next horizontal
- # ps evidence targeting this
- # dport
- return True
-
- # we will be combining further alerts to avoid alerting many times every portscan
- evidence_details = (
- evidence["timestamp"],
- evidence["pkts_sent"],
- evidence["uids"],
- evidence["amount_of_dips"],
- )
- # for all the combined alerts, the following params should be equal
- key = (
- f'{evidence["profileid"]}-{evidence["twid"]}-'
- f'{evidence["state"]}-{evidence["protocol"]}-'
- f'{evidence["dport"]}'
- )
-
- try:
- self.pending_horizontal_ps_evidence[key].append(evidence_details)
- except KeyError:
- # first time seeing this key
- self.pending_horizontal_ps_evidence[key] = [evidence_details]
-
- # combine evidence every 3 new portscans to the same dport
- if len(self.pending_horizontal_ps_evidence[key]) == 3:
- self.combine_evidence()
- return True
- return False
-
- def set_evidence_horizontal_portscan(self, evidence: dict):
- threat_level = ThreatLevel.HIGH
- confidence = utils.calculate_confidence(evidence["pkts_sent"])
- srcip = evidence["profileid"].split("_")[-1]
-
- attacker = Attacker(
- direction=Direction.SRC, attacker_type=IoCType.IP, value=srcip
- )
- portproto = f'{evidence["dport"]}/{evidence["protocol"]}'
- port_info = self.db.get_port_info(portproto) or ""
- description = (
- f"Horizontal port scan to port {port_info} {portproto}. "
- f'From {srcip} to {evidence["amount_of_dips"]} unique destination IPs. '
- f'Total packets sent: {evidence["pkts_sent"]}. '
- f"Confidence: {confidence}. by Slips"
- )
-
- evidence = Evidence(
- evidence_type=EvidenceType.HORIZONTAL_PORT_SCAN,
- attacker=attacker,
- threat_level=threat_level,
- confidence=confidence,
- description=description,
- profile=ProfileID(ip=srcip),
- timewindow=TimeWindow(
- number=int(evidence["twid"].replace("timewindow", ""))
- ),
- uid=evidence["uids"],
- timestamp=evidence["timestamp"],
- category=IDEACategory.RECON_SCANNING,
- conn_count=evidence["pkts_sent"],
- proto=Proto(evidence["protocol"].lower()),
- source_target_tag=Tag.RECON,
- port=evidence["dport"],
- )
-
- self.db.set_evidence(evidence)
+ self.set_evidence_horizontal_portscan(evidence)
diff --git a/modules/network_discovery/network_discovery.py b/modules/network_discovery/network_discovery.py
index 6642b5cc6..2dd24cac0 100644
--- a/modules/network_discovery/network_discovery.py
+++ b/modules/network_discovery/network_discovery.py
@@ -1,7 +1,8 @@
import json
from typing import List
-from slips_files.common.imports import *
+from slips_files.common.slips_utils import utils
+from slips_files.common.abstracts.module import IModule
from modules.network_discovery.horizontal_portscan import HorizontalPortscan
from modules.network_discovery.vertical_portscan import VerticalPortscan
from slips_files.core.evidence_structure.evidence import (
@@ -56,11 +57,6 @@ def init(self):
# slips sets dhcp scan evidence
self.minimum_requested_addrs = 4
- def shutdown_gracefully(self):
- # alert about all the pending evidence before this module stops
- self.horizontal_ps.combine_evidence()
- self.vertical_ps.combine_evidence()
-
def check_icmp_sweep(
self,
msg: str,
@@ -309,6 +305,7 @@ def set_evidence_dhcp_scan(
self, timestamp, profileid, twid, uids, number_of_requested_addrs
):
srcip = profileid.split("_")[-1]
+ confidence = 0.8
description = (
f"Performing a DHCP scan by requesting "
f"{number_of_requested_addrs} different IP addresses. "
@@ -321,7 +318,7 @@ def set_evidence_dhcp_scan(
direction=Direction.SRC, attacker_type=IoCType.IP, value=srcip
),
threat_level=ThreatLevel.MEDIUM,
- confidence=0.8,
+ confidence=confidence,
description=description,
profile=ProfileID(ip=srcip),
timewindow=TimeWindow(number=twid_number),
diff --git a/modules/network_discovery/vertical_portscan.py b/modules/network_discovery/vertical_portscan.py
index f05d4bbb1..36e5c09d7 100644
--- a/modules/network_discovery/vertical_portscan.py
+++ b/modules/network_discovery/vertical_portscan.py
@@ -46,68 +46,11 @@ class VerticalPortscan:
def __init__(self, db):
self.db = db
- # We need to know that after a detection, if we receive another flow
- # that does not modify the count for the detection, we don't
- # re-detect again
- self.cached_tw_thresholds = {}
- # Get from the database the separator used to
- # separate the IP and the word profile
- self.fieldseparator = self.db.get_field_separator()
- # The minimum amount of ports to scan in vertical scan
- self.port_scan_minimum_dports = 5
- # list of tuples, each tuple is the args to setevidence
- self.pending_vertical_ps_evidence = {}
- # we should alert once we find 1 vertical ps evidence then
- # combine the rest of evidence every x seconds
- # the value of this dict will be true after
- # the first portscan alert to th ekey ip
- # format is {ip: True/False , ...}
- self.alerted_once_vertical_ps = {}
-
- def combine_evidence(self):
- """
- combines all evidence in self.pending_vertical_ps_evidence into 1
- evidence and empties the dict afterwards
- """
- for key, evidence_list in self.pending_vertical_ps_evidence.items():
- # each key here is {profileid}-{twid}-{state}-{protocol}-{dport}
- # each value here is a list of evidence that should be combined
- profileid, twid, _, protocol, dstip = key.split("-")
- final_evidence_uids = []
- final_pkts_sent = 0
-
- # combine all evidence that share the above key
- for evidence in evidence_list:
- # each evidence is a tuple of
- # (timestamp, pkts_sent, uids, amount_of_dips)
- # in the final evidence, we'll be
- # using the ts of the last evidence
- (
- timestamp,
- pkts_sent,
- evidence_uids,
- amount_of_dports,
- ) = evidence
- # since we're combining evidence,
- # we want the uids of the final evidence
- # to be the sum of all the evidence we combined
- final_evidence_uids += evidence_uids
- final_pkts_sent += pkts_sent
-
- evidence = {
- "timestamp": timestamp,
- "pkts_sent": final_pkts_sent,
- "protocol": protocol,
- "profileid": profileid,
- "twid": twid,
- "uid": final_evidence_uids,
- "amount_of_dports": amount_of_dports,
- "dstip": dstip,
- }
-
- self.set_evidence_vertical_portscan(evidence)
- # reset the dict since we already combined
- self.pending_vertical_ps_evidence = {}
+ # to keep track of the max dports reported per timewindow
+ self.cached_thresholds_per_tw = {}
+ # The minimum amount of scanned ports to trigger an evidence
+ # is increased exponentially every evidence, and is reset each timewindow
+ self.minimum_dports_to_set_evidence = 5
def set_evidence_vertical_portscan(self, evidence: dict):
"""Sets the vertical portscan evidence in the db"""
@@ -151,76 +94,62 @@ def set_evidence_vertical_portscan(self, evidence: dict):
self.db.set_evidence(evidence)
- def decide_if_time_to_set_evidence_or_combine(
- self, evidence: dict, cache_key: str
+ def are_dports_greater_or_eq_minimum_dports(self, dports: int) -> bool:
+ return dports >= self.minimum_dports_to_set_evidence
+
+ @staticmethod
+ def are_dports_greater_or_eq_last_evidence(
+ dports: int, ports_reported_last_evidence: int
) -> bool:
"""
- sets the evidence immediately if it was the
- first portscan evidence in this tw
- or combines past 3
- evidence and then sets an evidence.
- :return: True if evidence was set/combined,
- False if evidence was queued for combining later
+ To make sure the amount of dports reported
+ each evidence is higher than the previous one +15
+ so the first alert will always report 5
+ dports, and then 20+,35+. etc
+
+ :param dports: dports to report in the current evidence
+ :param ports_reported_last_evidence: the amount of
+ ports reported in the last evidence in the current
+ evidence's timewindow
"""
- if not self.alerted_once_vertical_ps.get(cache_key, False):
- # now from now on, we will be combining the next vertical
- # ps evidence targetting this dport
- self.alerted_once_vertical_ps[cache_key] = True
- self.set_evidence_vertical_portscan(evidence)
+ if ports_reported_last_evidence == 0:
+ # first portscan evidence in this threshold, no past evidence
+ # to compare with
return True
+ return dports >= ports_reported_last_evidence + 15
- # we will be combining further alerts to avoid alerting
- # many times every portscan
- evidence_to_combine = (
- evidence["timestamp"],
- evidence["pkts_sent"],
- evidence["uid"],
- evidence["amount_of_dports"],
- )
+ def should_set_evidence(self, dports: int, twid_threshold: int) -> bool:
+ """
+ Makes sure the given dports are more than the minimum dports number
+ we should alert on, and that is it more than the dports of
+ the last evidence
- # for all the combined alerts, the following params should be equal
- key = (
- f'{evidence["profileid"]}-'
- f'{evidence["twid"]}-'
- f'{evidence["state"]}-'
- f'{evidence["protocol"]}-'
- f'{evidence["dstip"]}'
+ The goal is to never get an evidence that's
+ 1 or 2 ports more than the previous one so we dont
+ have so many portscan evidence
+ """
+ more_than_min = self.are_dports_greater_or_eq_minimum_dports(dports)
+ exceeded_twid_threshold = self.are_dports_greater_or_eq_last_evidence(
+ dports, twid_threshold
)
- try:
- self.pending_vertical_ps_evidence[key].append(evidence_to_combine)
- except KeyError:
- # first time seeing this key
- self.pending_vertical_ps_evidence[key] = [evidence_to_combine]
-
- # combine evidence every x new portscans to the same ip
- if len(self.pending_vertical_ps_evidence[key]) == 3:
- self.combine_evidence()
- return True
-
- return False
+ return more_than_min and exceeded_twid_threshold
def check_if_enough_dports_to_trigger_an_evidence(
- self, cache_key: str, amount_of_dports: int
+ self, twid_identifier: str, amount_of_dports: int
) -> bool:
"""
checks if the scanned sports are enough to trigger and evidence
- we make sure the amount of dports reported each evidence
- is higher than the previous one +5
+ to make sure the amount of dports reported each evidence
+ is higher than the previous one +15
"""
- prev_amount_dports: int = self.cached_tw_thresholds.get(cache_key, 0)
- # we make sure the amount of dports reported
- # each evidence is higher than the previous one +5
- # so the first alert will always report 5
- # dport, and then 10+,15+,20+ etc
- # the goal is to never get an evidence that's
- # 1 or 2 ports more than the previous one so we dont
- # have so many portscan evidence
- if (
- amount_of_dports >= self.port_scan_minimum_dports
- and prev_amount_dports + 5 <= amount_of_dports
- ):
- # Store in our local cache how many dips were there:
- self.cached_tw_thresholds[cache_key] = amount_of_dports
+ twid_threshold: int = self.cached_thresholds_per_tw.get(
+ twid_identifier, 0
+ )
+
+ if self.should_set_evidence(amount_of_dports, twid_threshold):
+ # keep track of the max reported dstips
+ # in the last evidence in this twid
+ self.cached_thresholds_per_tw[twid_identifier] = amount_of_dports
return True
return False
@@ -228,7 +157,8 @@ def get_not_established_dst_ips(
self, protocol: str, state: str, profileid: str, twid: str
) -> dict:
"""
- Get the list of dstips that we tried to connect to (not established flows)
+ Get the list of dstips that we tried to connect to
+ (not established flows)
these unknowns are the info this function retrieves
profileid -> unknown_dstip:unknown_dstports
@@ -239,10 +169,12 @@ def get_not_established_dst_ips(
totalflows: total flows seen by the profileid
totalpkt: total packets seen by the profileid
totalbytes: total bytes sent by the profileid
- stime: timestamp of the first flow seen from this profileid -> this dstip
+ stime: timestamp of the first flow seen from
+ this profileid -> this dstip
uid: list of uids where the given profileid was
contacting the dst_ip on this dstport
- dstports: dst ports seen in all flows where the given profileid was srcip
+ dstports: dst ports seen in all flows where the given
+ profileid was srcip
{
: < int spkts sent to this port>
}
@@ -257,12 +189,14 @@ def get_not_established_dst_ips(
)
return dstips
- def get_cache_key(self, profileid: str, twid: str, dstip: str):
+ def get_twid_identifier(
+ self, profileid: str, twid: str, dstip: str
+ ) -> str:
"""
- returns the key that identifies this vertical portscan in thhe
+ returns the key that identifies this vertical portscan in the
given tw
"""
- return f"{profileid}:{twid}:dstip:{dstip}:VerticalPortscan"
+ return f"{profileid}:{twid}:dstip:{dstip}"
def check(self, profileid, twid):
"""
@@ -270,9 +204,10 @@ def check(self, profileid, twid):
"""
# if you're portscaning a port that is open it's gonna be established
# the amount of open ports we find is gonna be so small
- # theoretically this is incorrect bc we'll be ignoring established evidence,
- # but usually open ports are very few compared to the whole range
- # so, practically this is correct to avoid FP
+ # theoretically this is incorrect bc we'll be ignoring
+ # established connections, but usually open ports are very few
+ # compared to the whole range. so, practically this is correct to
+ # avoid FP
state = "Not Established"
for protocol in ("TCP", "UDP"):
@@ -280,17 +215,20 @@ def check(self, profileid, twid):
protocol, state, profileid, twid
)
- # For each dstip, see if the amount of ports connections is over the threshold
+ # For each dstip, see if the amount of ports
+ # connections is over the threshold
for dstip in dstips.keys():
- dstports: dict = dstips[dstip]["dstports"]
+ dst_ports: dict = dstips[dstip]["dstports"]
# Get the total amount of pkts sent to all
# ports on the same host
- pkts_sent = sum(dstports[dport] for dport in dstports)
- amount_of_dports = len(dstports)
+ pkts_sent = sum(dst_ports[dport] for dport in dst_ports)
+ amount_of_dports = len(dst_ports)
- cache_key = self.get_cache_key(profileid, twid, dstip)
+ twid_identifier: str = self.get_twid_identifier(
+ profileid, twid, dstip
+ )
if self.check_if_enough_dports_to_trigger_an_evidence(
- cache_key, amount_of_dports
+ twid_identifier, amount_of_dports
):
evidence_details = {
"timestamp": dstips[dstip]["stime"],
@@ -304,6 +242,4 @@ def check(self, profileid, twid):
"state": state,
}
- self.decide_if_time_to_set_evidence_or_combine(
- evidence_details, cache_key
- )
+ self.set_evidence_vertical_portscan(evidence_details)
diff --git a/slips_files/common/abstracts/exporter.py b/slips_files/common/abstracts/exporter.py
new file mode 100644
index 000000000..7f494b250
--- /dev/null
+++ b/slips_files/common/abstracts/exporter.py
@@ -0,0 +1,80 @@
+"""
+An interface for modules that export evidence somewhere, whether to slack,
+warden etc.
+"""
+
+from abc import ABC, abstractmethod
+
+from slips_files.common.abstracts.observer import IObservable
+from slips_files.core.database.database_manager import DBManager
+from slips_files.core.output import Output
+
+
+class IExporter(IObservable, ABC):
+ def __init__(self, logger: Output, db: DBManager, **kwargs):
+ IObservable.__init__(self)
+ self.logger = logger
+ self.add_observer(self.logger)
+ self.db = db
+ self.init(**kwargs)
+
+ @property
+ @abstractmethod
+ def name(self) -> str:
+ pass
+
+ def print(self, text, verbose=1, debug=0, log_to_logfiles_only=False):
+ """
+ Function to use to print text using the outputqueue of slips.
+ Slips then decides how, when and where to print this text
+ by taking all the processes into account
+ :param verbose:
+ 0 - don't print
+ 1 - basic operation/proof of work
+ 2 - log I/O operations and filenames
+ 3 - log database/profile/timewindow changes
+ :param debug:
+ 0 - don't print
+ 1 - print exceptions
+ 2 - unsupported and unhandled types (cases that may cause errors)
+ 3 - red warnings that needs examination - developer warnings
+ :param text: text to print. Can include format
+ like 'Test {}'.format('here')
+ :param log_to_logfiles_only: logs to slips.log only, not to cli
+ """
+ self.notify_observers(
+ {
+ "from": self.name,
+ "txt": str(text),
+ "verbose": verbose,
+ "debug": debug,
+ "log_to_logfiles_only": log_to_logfiles_only,
+ }
+ )
+
+ @abstractmethod
+ def init(self):
+ """
+ handles the initialization of exporters
+ the goal of this is to have one common __init__() for all
+ modules, which is the one in this file, and a different init() per
+ expoerter
+ this init will have access to all keyword args passes when
+ initializing the module
+ """
+
+ @abstractmethod
+ def export(self, *args, **kwargs):
+ """exports evidence/alerts to the destination"""
+
+ @abstractmethod
+ def shutdown_gracefully(self):
+ """Exits gracefully"""
+
+ @abstractmethod
+ def should_export(self) -> bool:
+ """Determines whether to export or not"""
+
+ @abstractmethod
+ def read_configuration(self):
+ """Reads configuration"""
diff --git a/slips_files/common/abstracts/module.py b/slips_files/common/abstracts/module.py
index 0f3f18225..e08c280d6 100644
--- a/slips_files/common/abstracts/module.py
+++ b/slips_files/common/abstracts/module.py
@@ -38,13 +38,28 @@ def __init__(
self.add_observer(self.logger)
self.init(**kwargs)
+ @property
+ @abstractmethod
+ def name(self):
+ pass
+
+ @property
+ @abstractmethod
+ def description(self):
+ pass
+
+ @property
+ @abstractmethod
+ def authors(self):
+ pass
+
@abstractmethod
def init(self, **kwargs):
"""
- all the code that was in the __init__ of all modules, is
- now in this method
+ handles the initialization of modules
the goal of this is to have one common __init__() for all
- modules, which is the one in this file
+ modules, which is the one in this file, and a different init() per
+ module
this init will have access to all keyword args passes when
initializing the module
"""
@@ -112,7 +127,6 @@ def pre_main(self):
This function is for initializations that are
executed once before the main loop
"""
- pass
def get_msg(self, channel_name):
message = self.db.get_message(self.channels[channel_name])
diff --git a/slips_files/core/database/database_manager.py b/slips_files/core/database/database_manager.py
index ed01bfc60..4df193742 100644
--- a/slips_files/core/database/database_manager.py
+++ b/slips_files/core/database/database_manager.py
@@ -629,7 +629,7 @@ def getProfiles(self, *args, **kwargs):
return self.rdb.getProfiles(*args, **kwargs)
def getTWsfromProfile(self, *args, **kwargs):
- return self.rdb.getTWsfromProfile(*args, **kwargs)
+ return self.rdb.get_tws_from_profile(*args, **kwargs)
def get_number_of_tws_in_profile(self, *args, **kwargs):
return self.rdb.get_number_of_tws_in_profile(*args, **kwargs)
@@ -658,9 +658,6 @@ def get_first_twid_for_profile(self, *args, **kwargs):
def get_tw_of_ts(self, *args, **kwargs):
return self.rdb.get_tw_of_ts(*args, **kwargs)
- def add_new_older_tw(self, *args, **kwargs):
- return self.rdb.add_new_older_tw(*args, **kwargs)
-
def add_new_tw(self, *args, **kwargs):
return self.rdb.add_new_tw(*args, **kwargs)
diff --git a/slips_files/core/database/redis_db/database.py b/slips_files/core/database/redis_db/database.py
index 78ab04fbe..62629d15f 100644
--- a/slips_files/core/database/redis_db/database.py
+++ b/slips_files/core/database/redis_db/database.py
@@ -21,15 +21,12 @@
class RedisDB(IoCHandler, AlertHandler, ProfileHandler, IObservable):
- """Main redis db class."""
-
- # this db should be a singelton per port. meaning no 2 instances should be created for the same port at the same
- # time
+ # this db is a singelton per port. meaning no 2 instances
+ # should be created for the same port at the same time
_obj = None
_port = None
# Stores instances per port
_instances = {}
-
supported_channels = {
"tw_modified",
"evidence_added",
@@ -97,7 +94,8 @@ class RedisDB(IoCHandler, AlertHandler, ProfileHandler, IObservable):
# try to reconnect to redis this amount of times in case of connection
# errors before terminating
max_retries = 150
- # to keep track of connection retries. once it reaches max_retries, slips will terminate
+ # to keep track of connection retries. once it reaches max_retries,
+ # slips will terminate
connection_retry = 0
def __new__(
@@ -105,7 +103,8 @@ def __new__(
):
"""
treat the db as a singelton per port
- meaning every port will have exactly 1 single obj of this db at any given time
+ meaning every port will have exactly 1 single obj of this db
+ at any given time
"""
cls.redis_port = redis_port
cls.flush_db = flush_db
@@ -139,8 +138,8 @@ def __init__(
@classmethod
def _set_redis_options(cls):
"""
- Sets the default slips options,
- when using a different port we override it with -p
+ Updates the default slips options based on the -s param,
+ writes the new configs to cls._conf_file
"""
cls._options = {
"daemonize": "yes",
@@ -149,22 +148,25 @@ def _set_redis_options(cls):
"appendonly": "no",
}
- if "-s" in sys.argv:
- # Will save the DB if both the given number of seconds and the given
- # number of write operations against the DB occurred.
- # In the example below the behaviour will be to save:
- # after 30 sec if at least 500 keys changed
- # AOF persistence logs every write operation received by the server,
- # that will be played again at server startup
- # saved the db to /dump.rdb
- cls._options.update(
- {
- "save": "30 500",
- "appendonly": "yes",
- "dir": os.getcwd(),
- "dbfilename": "dump.rdb",
- }
- )
+ if "-s" not in sys.argv:
+ return
+
+ # Will save the DB if both the given number of seconds
+ # and the given number of write operations against the DB
+ # occurred.
+ # In the example below the behaviour will be to save:
+ # after 30 sec if at least 500 keys changed
+ # AOF persistence logs every write operation received by
+ # the server, that will be played again at server startup
+ # save the db to /dump.rdb
+ cls._options.update(
+ {
+ "save": "30 500",
+ "appendonly": "yes",
+ "dir": os.getcwd(),
+ "dbfilename": "dump.rdb",
+ }
+ )
with open(cls._conf_file, "w") as f:
for option, val in cls._options.items():
diff --git a/slips_files/core/database/redis_db/profile_handler.py b/slips_files/core/database/redis_db/profile_handler.py
index b598dfeca..da355a298 100644
--- a/slips_files/core/database/redis_db/profile_handler.py
+++ b/slips_files/core/database/redis_db/profile_handler.py
@@ -7,7 +7,6 @@
from typing import (
Tuple,
Union,
- Dict,
Optional,
List,
Set,
@@ -91,31 +90,36 @@ def set_dhcp_flow(self, profileid, twid, requested_addr, uid):
def get_timewindow(self, flowtime, profileid):
"""
This function returns the TW in the database where the flow belongs.
- If the TW is not there, we create as many tw as necessary in the future
- or past until we get the correct TW for this flow.
- - We use this function to avoid retrieving all the data from the DB
- for the complete profile.
- We use a separate table for the TW per profile.
- -- Returns the time window id
- THIS IS NOT WORKING:
- - The empty tws in the middle are not being created!!!
- - The Dtp ips are stored in the first tw
+ Returns the time window id
+ DISCLAIMER:
+
+ if the given flowtime is == the starttime of a tw, it will
+ belong to that tw
+ if it is == the end of a tw, it will belong to the next one
+ for example,
+ a flow with ts = 2 belongs to tw1
+ a flow with ts = 4 belongs to tw3
+
+ tw1 tw2 tw3 tw4
+ 0 ──────┬─────┬──────┬──────
+ │ │ │
+ 2 4 6
+
"""
# If the option for only-one-tw was selected, we should
# create the TW at least 100 years before the flowtime,
# to cover for 'flows in the past'. Which means we should
# cover for any flow that is coming later with time before the
# first flow
+ flowtime = float(flowtime)
if self.width == 9999999999:
# Seconds in 1 year = 31536000
tw_start = float(flowtime - (31536000 * 100))
tw_number: int = 1
else:
starttime_of_first_tw: str = self.r.hget("analysis", "file_start")
-
if starttime_of_first_tw:
starttime_of_first_tw = float(starttime_of_first_tw)
- flowtime = float(flowtime)
tw_number: int = (
floor((flowtime - starttime_of_first_tw) / self.width) + 1
)
@@ -130,7 +134,6 @@ def get_timewindow(self, flowtime, profileid):
tw_id: str = f"timewindow{tw_number}"
- # Add this TW, of this profile, to the DB
self.add_new_tw(profileid, tw_id, tw_start)
return tw_id
@@ -1085,7 +1088,7 @@ def getProfiles(self):
profiles = self.r.smembers("profiles")
return profiles if profiles != set() else {}
- def getTWsfromProfile(self, profileid):
+ def get_tws_from_profile(self, profileid):
"""
Receives a profile id and returns the list of all the TW in that profile
Returns a list of tuples (twid, ts) or an empty list
@@ -1101,7 +1104,7 @@ def get_number_of_tws_in_profile(self, profileid) -> int:
Receives a profile id and returns the number of all the
TWs in that profile
"""
- return len(self.getTWsfromProfile(profileid)) if profileid else 0
+ return len(self.get_tws_from_profile(profileid)) if profileid else 0
def get_srcips_from_profile_tw(self, profileid, twid):
"""
@@ -1209,37 +1212,6 @@ def get_tw_of_ts(self, profileid, time) -> Optional[Tuple[str, float]]:
return data
- def add_new_older_tw(
- self, profileid: str, tw_start_time: float, tw_number: int
- ):
- """
- Creates or adds a new timewindow that is OLDER than the
- first we have
- :param tw_start_time: start time of timewindow to add
- :param tw_number: number of timewindow to add
- Returns the id of the timewindow just created
- """
- try:
- twid: str = f"timewindow{tw_number}"
- timewindows: Dict[str, float] = {twid: tw_start_time}
- self.r.zadd(f"tws{profileid}", timewindows)
-
- self.print(
- f"Created and added to DB the new older "
- f"TW with id {twid}. Time: {tw_start_time} ",
- 0,
- 4,
- )
-
- # The creation of a TW now does not imply that it was modified.
- # You need to put data to mark is at modified
- return twid
- except redis.exceptions.ResponseError as e:
- self.print("error in addNewOlderTW in database.py", 0, 1)
- self.print(type(e), 0, 1)
- self.print(e, 0, 1)
- self.print(traceback.format_exc(), 0, 1)
-
def add_new_tw(self, profileid, timewindow: str, startoftw: float):
"""
Creates or adds a new timewindow to the list of tw for the
diff --git a/slips_files/core/database/sqlite_db/database.py b/slips_files/core/database/sqlite_db/database.py
index ef24946e3..4792ea67c 100644
--- a/slips_files/core/database/sqlite_db/database.py
+++ b/slips_files/core/database/sqlite_db/database.py
@@ -324,7 +324,8 @@ def add_alert(self, alert: dict):
# 'alerts': 'alert_id TEXT PRIMARY KEY, alert_time TEXT, ip_alerted TEXT,
# timewindow TEXT, tw_start TEXT, tw_end TEXT, label TEXT'
self.execute(
- "INSERT OR REPLACE INTO alerts (alert_id, ip_alerted, timewindow, tw_start, tw_end, label, alert_time) "
+ "INSERT OR REPLACE INTO alerts "
+ "(alert_id, ip_alerted, timewindow, tw_start, tw_end, label, alert_time) "
"VALUES (?, ?, ?, ?, ?, ?, ?);",
(
alert["alert_ID"],
diff --git a/slips_files/core/evidencehandler.py b/slips_files/core/evidencehandler.py
index 3c27f8db8..17a1c8926 100644
--- a/slips_files/core/evidencehandler.py
+++ b/slips_files/core/evidencehandler.py
@@ -559,22 +559,21 @@ def is_blocking_module_enabled(self) -> bool:
self.is_running_on_interface() and "-p" not in sys.argv
) or custom_flows
- def handle_new_alert(self, alert_ID: str, tw_evidence: dict):
+ def handle_new_alert(self, alert_id: str, tw_evidence: dict):
"""
saves alert details in the db and informs exporting modules about it
"""
- profile, srcip, twid, _ = alert_ID.split("_")
+ profile, srcip, twid, _ = alert_id.split("_")
profileid = f"{profile}_{srcip}"
self.db.set_evidence_causing_alert(
- profileid, twid, alert_ID, self.IDs_causing_an_alert
+ profileid, twid, alert_id, self.IDs_causing_an_alert
)
# when an alert is generated , we should set the threat level of the
# attacker's profile to 1(critical) and confidence 1
# so that it gets reported to other peers with these numbers
self.db.update_threat_level(profileid, "critical", 1)
-
alert_details = {
- "alert_ID": alert_ID,
+ "alert_ID": alert_id,
"profileid": profileid,
"twid": twid,
}
diff --git a/slips_files/core/helpers/flow_handler.py b/slips_files/core/helpers/flow_handler.py
index 089fd22ba..b4f97cc0a 100644
--- a/slips_files/core/helpers/flow_handler.py
+++ b/slips_files/core/helpers/flow_handler.py
@@ -5,6 +5,7 @@
import os
from dataclasses import asdict
from typing import Tuple
+
from slips_files.core.flows.suricata import SuricataFile
from slips_files.core.flows.zeek import DHCP
from slips_files.common.slips_utils import utils
@@ -126,15 +127,18 @@ def make_sure_theres_a_uid(self):
def handle_conn(self):
role = "Client"
daddr_as_obj = ipaddress.ip_address(self.flow.daddr)
- # this identified the tuple, it's a combination of daddr, dport and proto
- # this is legacy code and refactoring it will break many things, so i wont:D
+ # this identified the tuple, it's a combination
+ # of daddr, dport and proto
+ # this is legacy code and refactoring it will
+ # break many things, so i wont:D
tupleid = f"{daddr_as_obj}-{self.flow.dport}-{self.flow.proto}"
# Compute the symbol for this flow, for this TW, for this profile.
# The symbol is based on the 'letters' of the original Startosphere IPS tool
symbol: Tuple = self.symbol.compute(self.flow, self.twid, "OutTuples")
- # Change symbol for its internal data. Symbol is a tuple and is confusing if we ever change the API
+ # Change symbol for its internal data. Symbol is a tuple and is
+ # confusing if we ever change the API
# Add the out tuple
self.db.add_tuple(
self.profileid, self.twid, tupleid, symbol, role, self.flow
diff --git a/tests/test_flow_handler.py b/tests/test_flow_handler.py
index 8de524b55..e26a51e31 100644
--- a/tests/test_flow_handler.py
+++ b/tests/test_flow_handler.py
@@ -1,11 +1,17 @@
from tests.module_factory import ModuleFactory
import pytest
+from unittest.mock import Mock, call
+from slips_files.core.helpers.flow_handler import FlowHandler
+from slips_files.core.flows.zeek import DHCP
+import json
+from dataclasses import asdict
+
def test_is_supported_flow_not_ts(flow, mock_db):
flow.starttime = None
flow_handler = ModuleFactory().create_flow_handler_obj(flow, mock_db)
- assert flow_handler.is_supported_flow() == False
+ assert flow_handler.is_supported_flow() is False
@pytest.mark.parametrize(
@@ -23,3 +29,332 @@ def test_is_supported_flow_without_ts(
flow.type_ = flow_type
flow_handler = ModuleFactory().create_flow_handler_obj(flow, mock_db)
assert flow_handler.is_supported_flow() == expected_val
+
+
+# testing handle_dns
+def test_handle_dns():
+ mock_db = Mock()
+ flow = Mock()
+ flow_handler = FlowHandler(mock_db, None, flow)
+ flow_handler.twid = "timewindow_id"
+ flow_handler.profileid = "profile_id"
+ flow_handler.handle_dns()
+
+ mock_db.add_out_dns.assert_called_with(
+ flow_handler.profileid, flow_handler.twid, flow
+ )
+ mock_db.add_altflow.assert_called_with(
+ flow, flow_handler.profileid, flow_handler.twid, "benign"
+ )
+
+
+# testing handle_ftp
+def test_handle_ftp():
+ mock_db = Mock()
+ flow = Mock()
+ flow.used_port = 21 # Assuming FTP typically uses port 21
+ flow_handler = FlowHandler(mock_db, None, flow)
+ flow_handler.profileid = "profile_id"
+ flow_handler.twid = "timewindow_id"
+ flow_handler.handle_ftp()
+
+ mock_db.set_ftp_port.assert_called_with(21)
+ mock_db.add_altflow.assert_called_with(
+ flow, flow_handler.profileid, flow_handler.twid, "benign"
+ )
+
+
+# testing handle_http
+def test_handle_http():
+ mock_db = Mock()
+ flow = Mock()
+ flow_handler = FlowHandler(mock_db, None, flow)
+ flow_handler.profileid = "profile_id"
+ flow_handler.twid = "timewindow_id"
+ flow_handler.handle_http()
+
+ mock_db.add_out_http.assert_called_with(
+ flow_handler.profileid, flow_handler.twid, flow
+ )
+ mock_db.add_altflow.assert_called_with(
+ flow, flow_handler.profileid, flow_handler.twid, "benign"
+ )
+
+
+# testing handle_ssl
+def test_handle_ssl(flow, mock_db):
+ flow_handler = FlowHandler(mock_db, None, flow)
+ flow_handler.profileid = "profile_id"
+ flow_handler.twid = "timewindow_id"
+ flow_handler.handle_ssl()
+
+ mock_db.add_out_ssl.assert_called_with(
+ flow_handler.profileid, flow_handler.twid, flow
+ )
+ mock_db.add_altflow.assert_called_with(
+ flow, flow_handler.profileid, flow_handler.twid, "benign"
+ )
+
+
+# testing handle_ssh
+def test_handle_ssh(flow, mock_db):
+ flow_handler = FlowHandler(mock_db, None, flow)
+ flow_handler.profileid = "profile_id"
+ flow_handler.twid = "timewindow_id"
+ flow_handler.handle_ssh()
+
+ mock_db.add_out_ssh.assert_called_with(
+ flow_handler.profileid, flow_handler.twid, flow
+ )
+ mock_db.add_altflow.assert_called_with(
+ flow, flow_handler.profileid, flow_handler.twid, "benign"
+ )
+
+
+# testing handle_weird
+def test_handle_weird(flow, mock_db):
+ flow_handler = FlowHandler(mock_db, None, flow)
+ flow_handler.profileid = "profile_id"
+ flow_handler.twid = "timewindow_id"
+ flow_handler.handle_weird()
+
+ expected_payload = {
+ "profileid": flow_handler.profileid,
+ "twid": flow_handler.twid,
+ "flow": asdict(flow),
+ }
+ mock_db.publish.assert_called_with(
+ "new_weird", json.dumps(expected_payload)
+ )
+ mock_db.add_altflow.assert_called_with(
+ flow, flow_handler.profileid, flow_handler.twid, "benign"
+ )
+
+
+# testing handle_tunnel
+def test_handle_tunnel(flow, mock_db):
+ flow_handler = FlowHandler(mock_db, None, flow)
+ flow_handler.profileid = "profile_id"
+ flow_handler.twid = "timewindow_id"
+ flow_handler.handle_tunnel()
+
+ expected_payload = {
+ "profileid": flow_handler.profileid,
+ "twid": flow_handler.twid,
+ "flow": asdict(flow),
+ }
+ mock_db.publish.assert_called_with(
+ "new_tunnel", json.dumps(expected_payload)
+ )
+ mock_db.add_altflow.assert_called_with(
+ flow, flow_handler.profileid, flow_handler.twid, "benign"
+ )
+
+
+# testing handle_conn
+def test_handle_conn(flow, mock_db, mocker):
+ flow_handler = FlowHandler(mock_db, None, flow)
+ flow_handler.profileid = "profile_id"
+ flow_handler.twid = "timewindow_id"
+ flow.daddr = "192.168.1.1"
+ flow.dport = 80
+ flow.proto = "tcp"
+
+ mock_symbol = mocker.Mock()
+ mock_symbol.compute.return_value = ("A", "B", "C")
+ flow_handler.symbol = mock_symbol
+
+ flow_handler.handle_conn()
+
+ mock_db.add_tuple.assert_called_with(
+ flow_handler.profileid,
+ flow_handler.twid,
+ "192.168.1.1-80-tcp",
+ ("A", "B", "C"),
+ "Client",
+ flow,
+ )
+ mock_db.add_ips.assert_called_with(
+ flow_handler.profileid, flow_handler.twid, flow, "Client"
+ )
+ mock_db.add_port.assert_has_calls(
+ [
+ call(
+ flow_handler.profileid,
+ flow_handler.twid,
+ flow,
+ "Client",
+ "Dst",
+ ),
+ call(
+ flow_handler.profileid,
+ flow_handler.twid,
+ flow,
+ "Client",
+ "Src",
+ ),
+ ]
+ )
+ mock_db.add_flow.assert_called_with(
+ flow, flow_handler.profileid, flow_handler.twid, "benign"
+ )
+ mock_db.add_mac_addr_to_profile.assert_called_with(
+ flow_handler.profileid, flow.smac
+ )
+ if not flow_handler.running_non_stop:
+ flow_handler.publisher.new_MAC.assert_has_calls(
+ [call(flow.smac, flow.saddr), call(flow.dmac, flow.daddr)]
+ )
+
+
+# testing handle_files
+def test_handle_files(flow, mock_db):
+ flow_handler = FlowHandler(mock_db, None, flow)
+ flow_handler.profileid = "profile_id"
+ flow_handler.twid = "timewindow_id"
+
+ flow_handler.handle_files()
+
+ expected_payload = {
+ "flow": asdict(flow),
+ "type": "zeek",
+ "profileid": flow_handler.profileid,
+ "twid": flow_handler.twid,
+ }
+ mock_db.publish.assert_called_with(
+ "new_downloaded_file", json.dumps(expected_payload)
+ )
+ mock_db.add_altflow.assert_called_with(
+ flow, flow_handler.profileid, flow_handler.twid, "benign"
+ )
+
+
+# testing handle_arp
+def test_handle_arp(flow, mock_db, mocker):
+ flow_handler = FlowHandler(mock_db, None, flow)
+ flow_handler.profileid = "profile_id"
+ flow_handler.twid = "timewindow_id"
+ flow.dmac = "aa:bb:cc:dd:ee:ff"
+ flow.smac = "ff:ee:dd:cc:bb:aa"
+ flow.daddr = "192.168.1.1"
+ flow.saddr = "192.168.1.2"
+
+ mock_publisher = mocker.Mock()
+ flow_handler.publisher = mock_publisher
+
+ flow_handler.handle_arp()
+
+ expected_payload = {
+ "flow": asdict(flow),
+ "profileid": flow_handler.profileid,
+ "twid": flow_handler.twid,
+ }
+ mock_db.publish.assert_called_with("new_arp", json.dumps(expected_payload))
+ mock_db.add_mac_addr_to_profile.assert_called_with(
+ flow_handler.profileid, flow.smac
+ )
+ mock_publisher.new_MAC.assert_has_calls(
+ [call(flow.dmac, flow.daddr), call(flow.smac, flow.saddr)]
+ )
+ mock_db.add_altflow.assert_called_with(
+ flow, flow_handler.profileid, flow_handler.twid, "benign"
+ )
+
+
+# testing handle_smtp
+def test_handle_smtp(flow, mock_db):
+ flow_handler = FlowHandler(mock_db, None, flow)
+ flow_handler.profileid = "profile_id"
+ flow_handler.twid = "timewindow_id"
+
+ flow_handler.handle_smtp()
+
+ expected_payload = {
+ "flow": asdict(flow),
+ "profileid": flow_handler.profileid,
+ "twid": flow_handler.twid,
+ }
+ mock_db.publish.assert_called_with(
+ "new_smtp", json.dumps(expected_payload)
+ )
+ mock_db.add_altflow.assert_called_with(
+ flow, flow_handler.profileid, flow_handler.twid, "benign"
+ )
+
+
+# testing handle_software
+def test_handle_software(flow, mock_db, mocker):
+ flow_handler = FlowHandler(mock_db, None, flow)
+ flow_handler.profileid = "profile_id"
+ flow_handler.twid = "timewindow_id"
+
+ mock_publisher = mocker.Mock()
+ flow_handler.publisher = mock_publisher
+
+ flow_handler.handle_software()
+
+ mock_db.add_software_to_profile.assert_called_with(
+ flow_handler.profileid, flow
+ )
+ mock_publisher.new_software.assert_called_with(
+ flow_handler.profileid, flow
+ )
+ mock_db.add_altflow.assert_called_with(
+ flow, flow_handler.profileid, flow_handler.twid, "benign"
+ )
+
+
+# testing handle_notice
+def test_handle_notice(flow, mock_db):
+ flow_handler = FlowHandler(mock_db, None, flow)
+ flow_handler.profileid = "profile_id"
+ flow_handler.twid = "timewindow_id"
+ flow.note = "Gateway_addr_identified: 192.168.1.1"
+ flow.msg = "Gateway_addr_identified: 192.168.1.1"
+
+ flow_handler.handle_notice()
+
+ mock_db.add_out_notice.assert_called_with(
+ flow_handler.profileid, flow_handler.twid, flow
+ )
+ mock_db.set_default_gateway.assert_called_with("IP", "192.168.1.1")
+ mock_db.add_altflow.assert_called_with(
+ flow, flow_handler.profileid, flow_handler.twid, "benign"
+ )
+
+
+# testing handle_dhcp
+def test_handle_dhcp(mock_db, mocker):
+ flow = DHCP(
+ starttime=1234567890,
+ uids=["uid1", "uid2", "uid3"],
+ smac="aa:bb:cc:dd:ee:ff",
+ saddr="192.168.1.1",
+ server_addr="192.168.1.1",
+ daddr="192.168.1.2",
+ client_addr="192.168.1.3",
+ host_name="test-host",
+ requested_addr="192.168.1.4",
+ )
+ flow_handler = FlowHandler(mock_db, None, flow)
+ flow_handler.profileid = "profile_id"
+ flow_handler.twid = "timewindow_id"
+
+ mock_publisher = mocker.Mock()
+ flow_handler.publisher = mock_publisher
+
+ flow_handler.handle_dhcp()
+
+ mock_publisher.new_MAC.assert_called_with(flow.smac, flow.saddr)
+ mock_db.add_mac_addr_to_profile.assert_called_with(
+ flow_handler.profileid, flow.smac
+ )
+ mock_db.store_dhcp_server.assert_called_with("192.168.1.1")
+ mock_db.mark_profile_as_dhcp.assert_called_with(flow_handler.profileid)
+ mock_publisher.new_dhcp.assert_called_with(flow_handler.profileid, flow)
+
+ for uid in flow.uids:
+ flow.uid = uid
+ mock_db.add_altflow.assert_called_with(
+ flow, flow_handler.profileid, flow_handler.twid, "benign"
+ )
diff --git a/tests/test_horizontal_portscans.py b/tests/test_horizontal_portscans.py
index 5f721118b..c4e4fe4e6 100644
--- a/tests/test_horizontal_portscans.py
+++ b/tests/test_horizontal_portscans.py
@@ -31,7 +31,8 @@ def enough_dstips_to_reach_the_threshold(mock_db):
# get a random list of ints(ports) that are below the threshold
# Generate a random number between 0 and threshold
amount_of_dstips: int = random.randint(
- module.port_scan_minimum_dips, module.port_scan_minimum_dips + 100
+ module.minimum_dstips_to_set_evidence,
+ module.minimum_dstips_to_set_evidence + 100,
)
dport = 5555
res = {dport: {"dstips": {"8.8.8.8": {"dstports": random_ports}}}}
@@ -49,9 +50,9 @@ def enough_dstips_to_reach_the_threshold(mock_db):
[
(0, 5, True),
(5, 6, False),
- (5, 8, False),
- (5, 15, True),
- (15, 20, True),
+ (5, 15, False),
+ (15, 29, False),
+ (15, 30, True),
],
)
def test_check_if_enough_dstips_to_trigger_an_evidence(
@@ -69,8 +70,8 @@ def test_check_if_enough_dstips_to_trigger_an_evidence(
horizontal_ps = ModuleFactory().create_horizontal_portscan_obj(mock_db)
- key: str = horizontal_ps.get_cache_key(profileid, timewindow, dport)
- horizontal_ps.cached_tw_thresholds[key] = prev_amount_of_dstips
+ key: str = horizontal_ps.get_twid_identifier(profileid, timewindow, dport)
+ horizontal_ps.cached_thresholds_per_tw[key] = prev_amount_of_dstips
enough: bool = horizontal_ps.check_if_enough_dstips_to_trigger_an_evidence(
key, cur_amount_of_dstips
@@ -88,7 +89,7 @@ def test_check_if_enough_dstips_to_trigger_an_evidence_no_cache(mock_db):
timewindow = "timewindow0"
dport = 5555
- key = horizontal_ps.get_cache_key(profileid, timewindow, dport)
+ key = horizontal_ps.get_twid_identifier(profileid, timewindow, dport)
cur_amount_of_dstips = 10
enough = horizontal_ps.check_if_enough_dstips_to_trigger_an_evidence(
@@ -105,7 +106,7 @@ def test_check_if_enough_dstips_to_trigger_an_evidence_less_than_minimum(
timewindow = "timewindow0"
dport = 5555
- key = horizontal_ps.get_cache_key(profileid, timewindow, dport)
+ key = horizontal_ps.get_twid_identifier(profileid, timewindow, dport)
cur_amount_of_dstips = 3
enough = horizontal_ps.check_if_enough_dstips_to_trigger_an_evidence(
@@ -123,7 +124,7 @@ def not_enough_dstips_to_reach_the_threshold(mock_db):
# get a random list of ints(ports) that are below the threshold
# Generate a random number between 0 and threshold
amount_of_dstips: int = random.randint(
- 0, module.port_scan_minimum_dips - 1
+ 0, module.minimum_dstips_to_set_evidence - 1
)
dport = 5555
res = {dport: {"dstips": {"8.8.8.8": {"dstports": random_ports}}}}
@@ -141,8 +142,8 @@ def test_check_if_enough_dstips_to_trigger_an_evidence_equal_min_dips(mock_db):
profileid = "profile_1.1.1.1"
timewindow = "timewindow0"
dport = 80
- key = horizontal_ps.get_cache_key(profileid, timewindow, dport)
- amount_of_dips = horizontal_ps.port_scan_minimum_dips
+ key = horizontal_ps.get_twid_identifier(profileid, timewindow, dport)
+ amount_of_dips = horizontal_ps.minimum_dstips_to_set_evidence
enough = horizontal_ps.check_if_enough_dstips_to_trigger_an_evidence(
key, amount_of_dips
)
@@ -171,7 +172,7 @@ def test_check_if_enough_dstips_to_trigger_an_evidence_min_dstips_threshold(
dports: dict = get_test_conns(mock_db)
mock_db.get_data_from_profile_tw.return_value = dports
- cache_key = horizontal_ps.get_cache_key(profileid, timewindow, dport)
+ cache_key = horizontal_ps.get_twid_identifier(profileid, timewindow, dport)
amount_of_dips = len(dports[dport]["dstips"])
assert (
@@ -309,14 +310,14 @@ def test_get_packets_sent_invalid_values(mock_db):
horizontal_ps.get_packets_sent(dstips)
-def test_get_cache_key():
+def test_get_twid_identifier():
horizontal_ps = HorizontalPortscan(MagicMock())
profileid = "profile_1.1.1.1"
twid = "timewindow0"
dport = 80
- cache_key = horizontal_ps.get_cache_key(profileid, twid, dport)
- expected_key = f"{profileid}:{twid}:dport:{dport}:HorizontalPortscan"
+ cache_key = horizontal_ps.get_twid_identifier(profileid, twid, dport)
+ expected_key = f"{profileid}:{twid}:dport:{dport}"
assert cache_key == expected_key
@@ -326,7 +327,7 @@ def test_get_cache_key_empty_dport():
twid = "timewindow0"
dport = ""
- cache_key = horizontal_ps.get_cache_key(profileid, twid, dport)
+ cache_key = horizontal_ps.get_twid_identifier(profileid, twid, dport)
assert cache_key is False
@@ -336,7 +337,7 @@ def test_get_cache_key_none_dport(mock_db):
twid = "timewindow0"
dport = None
- cache_key = horizontal_ps.get_cache_key(profileid, twid, dport)
+ cache_key = horizontal_ps.get_twid_identifier(profileid, twid, dport)
assert cache_key is False
@@ -354,36 +355,6 @@ def test_check_broadcast_or_multicast_address(
mock_get_not_estab_dst_ports.assert_not_called()
-def test_decide_if_time_to_set_evidence_or_combine_empty_alerted(mock_db):
- horizontal_ps = ModuleFactory().create_horizontal_portscan_obj(mock_db)
- horizontal_ps.alerted_once_horizontal_ps = {}
- evidence = {
- "protocol": "TCP",
- "profileid": "profile_1.1.1.1",
- "twid": "timewindow0",
- "uids": ["uid1", "uid2"],
- "dport": 80,
- "pkts_sent": 100,
- "timestamp": "1234.56",
- "state": "Not Established",
- "amount_of_dips": 10,
- }
-
- mock_db.get_port_info.return_value = "HTTP"
- mock_db.set_evidence.return_value = None
-
- cache_key = horizontal_ps.get_cache_key(
- evidence["profileid"], evidence["twid"], evidence["dport"]
- )
- result = horizontal_ps.decide_if_time_to_set_evidence_or_combine(
- evidence, cache_key
- )
-
- assert result is True
- assert horizontal_ps.alerted_once_horizontal_ps[cache_key] is True
- mock_db.set_evidence.assert_called_once()
-
-
def test_set_evidence_horizontal_portscan_empty_port_info(mock_db):
horizontal_ps = ModuleFactory().create_horizontal_portscan_obj(mock_db)
evidence = {
@@ -497,173 +468,20 @@ def test_set_evidence_horizontal_portscan_empty_uids(mock_db):
@pytest.mark.parametrize(
- "number_of_pending_evidence, expected_return_val",
+ "ip, expected_val",
[
- (0, True),
- (1, False),
- (2, False),
- (3, True),
- (6, True),
+ ("224.0.0.1", False),
+ ("255.255.255.255", False),
+ ("invalid", False),
+ ("1.1.1.1", True),
],
)
-def test_combine_evidence(
- number_of_pending_evidence, expected_return_val: bool, mock_db
-):
- """
- first evidence will be alerted, the rest will be combined
- """
- profileid = "profile_1.1.1.1"
- timewindow = "timewindow0"
- dstip = "8.8.8.8"
- dport = 5555
-
- horizontal_ps = ModuleFactory().create_horizontal_portscan_obj(mock_db)
- key: str = horizontal_ps.get_cache_key(profileid, timewindow, dstip)
-
- for evidence_ctr in range(number_of_pending_evidence + 1):
- # this will add 2 evidence to the pending evidence list
- evidence = {
- "protocol": "TCP",
- "profileid": profileid,
- "twid": timewindow,
- "uids": [],
- "uid": [],
- "dport": dport,
- "pkts_sent": 5,
- "timestamp": "1234.54",
- "stime": "1234.54",
- "state": "Not Established",
- "amount_of_dips": 70,
- }
- # in the first iteration, enough_to_combine is gonna be True bc
- # it's the first evidence ever
- # next 2 should be false
-
- enough_to_combine: bool = (
- horizontal_ps.decide_if_time_to_set_evidence_or_combine(
- evidence, key
- )
- )
-
- if evidence_ctr == 0:
- continue
-
- assert enough_to_combine == expected_return_val
-
-
-def test_combine_evidence_different_keys(mock_db):
- horizontal_ps = ModuleFactory().create_horizontal_portscan_obj(mock_db)
- horizontal_ps.pending_horizontal_ps_evidence = {
- "profile_1.1.1.1-timewindow0-Not Established-TCP-80": [
- (1, 10, ["uid1"], 5)
- ],
- "profile_2.2.2.2-timewindow1-Not Established-UDP-53": [
- (2, 20, ["uid2", "uid3"], 10)
- ],
- "profile_3.3.3.3-timewindow2-Not Established-TCP-443": [
- (3, 30, ["uid4"], 15),
- (4, 40, ["uid5"], 20),
- (5, 50, ["uid6"], 25),
- ],
- }
- mock_db.get_port_info.return_value = "HTTP"
- mock_db.set_evidence.return_value = None
-
- horizontal_ps.combine_evidence()
-
- assert mock_db.set_evidence.call_count == 3
- assert horizontal_ps.pending_horizontal_ps_evidence == {}
-
-
-def test_combine_evidence_empty_pending_evidence(mock_db):
- horizontal_ps = ModuleFactory().create_horizontal_portscan_obj(mock_db)
- horizontal_ps.pending_horizontal_ps_evidence = {}
- mock_db.get_port_info.return_value = "HTTP"
- mock_db.set_evidence.return_value = None
-
- horizontal_ps.combine_evidence()
-
- assert mock_db.set_evidence.call_count == 0
- assert horizontal_ps.pending_horizontal_ps_evidence == {}
-
-
-def test_combine_evidence_single_pending_evidence(mock_db):
- horizontal_ps = ModuleFactory().create_horizontal_portscan_obj(mock_db)
- horizontal_ps.pending_horizontal_ps_evidence = {
- "profile_1.1.1.1-timewindow0-Not Established-TCP-80": [
- (1, 10, ["uid1"], 5)
- ]
- }
- mock_db.get_port_info.return_value = "HTTP"
- mock_db.set_evidence.return_value = None
-
- horizontal_ps.combine_evidence()
-
- assert mock_db.set_evidence.call_count == 1
- assert horizontal_ps.pending_horizontal_ps_evidence == {}
-
-
-def test_combine_evidence_no_pending_evidence(mock_db):
- horizontal_ps = ModuleFactory().create_horizontal_portscan_obj(mock_db)
- horizontal_ps.pending_horizontal_ps_evidence = {}
- mock_db.get_port_info.return_value = "HTTP"
- mock_db.set_evidence.return_value = None
-
- horizontal_ps.combine_evidence()
-
- assert mock_db.set_evidence.call_count == 0
- assert horizontal_ps.pending_horizontal_ps_evidence == {}
-
-
-def test_combine_evidence_multiple_keys(mock_db):
- horizontal_ps = ModuleFactory().create_horizontal_portscan_obj(mock_db)
- horizontal_ps.pending_horizontal_ps_evidence = {
- "profile_1.1.1.1-timewindow0-Not Established-TCP-80": [
- (1, 10, ["uid1"], 5),
- (2, 20, ["uid2"], 10),
- (3, 30, ["uid3"], 15),
- ],
- "profile_2.2.2.2-timewindow1-Not Established-UDP-53": [
- (4, 40, ["uid4"], 20),
- (5, 50, ["uid5"], 25),
- (6, 60, ["uid6"], 30),
- ],
- }
- mock_db.get_port_info.side_effect = ["HTTP", "DNS"]
- mock_db.set_evidence.return_value = None
- horizontal_ps.combine_evidence()
- assert mock_db.set_evidence.call_count == 2
- assert horizontal_ps.pending_horizontal_ps_evidence == {}
-
-
-def test_combine_evidence_empty_port_info(mock_db):
- horizontal_ps = ModuleFactory().create_horizontal_portscan_obj(mock_db)
- horizontal_ps.pending_horizontal_ps_evidence = {
- "profile_1.1.1.1-timewindow0-Not Established-TCP-80": [
- (1, 10, ["uid1"], 5),
- (2, 20, ["uid2"], 10),
- (3, 30, ["uid3"], 15),
- ]
- }
- mock_db.get_port_info.return_value = ""
- mock_db.set_evidence.return_value = None
- horizontal_ps.combine_evidence()
- assert mock_db.set_evidence.call_count == 1
- assert horizontal_ps.pending_horizontal_ps_evidence == {}
-
-
-def test_check_multicast_address(mock_db):
+def test_is_valid_saddr(mock_db, ip, expected_val):
mock_db.get_field_separator.return_value = "_"
horizontal_ps = ModuleFactory().create_horizontal_portscan_obj(mock_db)
- profileid = "profile_224.0.0.1"
- twid = "timewindow0"
-
- with patch.object(
- horizontal_ps, "get_not_estab_dst_ports"
- ) as mock_get_not_estab_dst_ports:
- horizontal_ps.check(profileid, twid)
- mock_get_not_estab_dst_ports.assert_not_called()
+ profileid = f"profile_{ip}"
+ assert horizontal_ps.is_valid_saddr(profileid) == expected_val
def test_get_resolved_ips(mock_db):
@@ -732,9 +550,7 @@ def test_check_invalid_profileid(mock_db):
horizontal_ps.check(profileid, twid)
-def test_check_invalid_twid(mock_db):
+def test_is_valid_twid(mock_db):
horizontal_ps = ModuleFactory().create_horizontal_portscan_obj(mock_db)
- profileid = "profile_1.1.1.1"
twid = ""
- with pytest.raises(Exception):
- horizontal_ps.check(profileid, twid)
+ assert not horizontal_ps.is_valid_twid(twid)
diff --git a/tests/test_profiler.py b/tests/test_profiler.py
index 66824f4ba..cdd025cc9 100644
--- a/tests/test_profiler.py
+++ b/tests/test_profiler.py
@@ -1,5 +1,4 @@
"""Unit test for slips_files/core/performance_profiler.py"""
-
from unittest.mock import Mock
from tests.module_factory import ModuleFactory
@@ -9,6 +8,9 @@
import json
from slips_files.core.profiler import SUPPORTED_INPUT_TYPES, SEPARATORS
from slips_files.core.flows.zeek import Conn
+import ipaddress
+from unittest.mock import Mock, patch
+import queue
@pytest.mark.parametrize(
@@ -221,3 +223,364 @@ def test_get_rev_profile_no_daddr(flow, mock_db):
profiler.flow.daddr = None
profiler.daddr_as_obj = None
assert profiler.get_rev_profile() == (False, False)
+
+def test_get_rev_profile_existing_profileid(mock_db):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ profiler.flow = Conn(
+ '1.0',
+ '1234',
+ '192.168.1.1',
+ '8.8.8.8',
+ 5,
+ 'TCP',
+ 'dhcp',
+ 80,88,
+ 20,20,
+ 20,20,
+ '','',
+ 'Established',''
+ )
+ mock_db.get_profileid_from_ip.return_value = "existing_profile"
+ mock_db.get_timewindow.return_value = "existing_timewindow"
+ assert profiler.get_rev_profile() == ("existing_profile", "existing_timewindow")
+
+
+def test_get_rev_profile_no_timewindow(mock_db):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ profiler.flow = Conn(
+ '1.0',
+ '1234',
+ '192.168.1.1',
+ '8.8.8.8',
+ 5,
+ 'TCP',
+ 'dhcp',
+ 80, 88,
+ 20, 20,
+ 20, 20,
+ '', '',
+ 'Established', ''
+ )
+ mock_db.get_profileid_from_ip.return_value = "profile_8.8.8.8"
+ mock_db.get_timewindow.return_value = None
+
+ profile_id, tw_id = profiler.get_rev_profile()
+ assert profile_id == "profile_8.8.8.8"
+ assert tw_id is None
+
+def test_define_separator_direct_support(mock_db):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ sample_flow = {'data': 'some_data'}
+ input_type = 'nfdump'
+
+ separator = profiler.define_separator(sample_flow, input_type)
+ assert separator == 'nfdump'
+
+
+@pytest.mark.parametrize('client_ips, expected_private_ips', [
+ (['192.168.1.1', '10.0.0.1'], ['192.168.1.1', '10.0.0.1']),
+ (['8.8.8.8', '1.1.1.1'], []),
+ (['192.168.1.1', '8.8.8.8'], ['192.168.1.1']),
+])
+def test_get_private_client_ips(client_ips, expected_private_ips, mock_db, monkeypatch):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ profiler.client_ips = client_ips
+ with patch('slips_files.core.profiler.utils.is_private_ip') as mock_is_private_ip:
+ def is_private_ip(ip):
+ ip_obj = ipaddress.ip_address(ip)
+ return ipaddress.ip_address(ip_obj).is_private
+
+ mock_is_private_ip.side_effect = is_private_ip
+
+ private_ips = profiler.get_private_client_ips()
+ assert set(private_ips) == set(expected_private_ips)
+
+def test_convert_starttime_to_epoch(mock_db):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ profiler.flow = Mock()
+ profiler.flow.starttime = "2023-04-04 12:00:00"
+
+ with patch('slips_files.core.profiler.utils.convert_format') as mock_convert_format:
+ mock_convert_format.return_value = 1680604800
+
+ profiler.convert_starttime_to_epoch()
+
+ mock_convert_format.assert_called_once_with("2023-04-04 12:00:00", "unixtimestamp")
+ assert profiler.flow.starttime == 1680604800
+
+def test_convert_starttime_to_epoch_invalid_format(mock_db, monkeypatch):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ profiler.flow = Mock()
+ profiler.flow.starttime = "not a real time"
+ monkeypatch.setattr('slips_files.core.profiler.utils.convert_format', Mock(side_effect=ValueError))
+ profiler.convert_starttime_to_epoch()
+ assert profiler.flow.starttime == "not a real time"
+
+def test_should_set_localnet(mock_db):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+
+ profiler.flow = Mock()
+ profiler.flow.saddr = "192.168.1.1"
+ profiler.is_localnet_set = False
+ assert profiler.should_set_localnet() is True
+
+ profiler.is_localnet_set = True
+ assert profiler.should_set_localnet() is False
+
+ profiler.is_localnet_set = False
+ profiler.flow.saddr = "8.8.8.8"
+ assert profiler.should_set_localnet() is False
+
+def test_should_set_localnet_already_set(mock_db):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ profiler.is_localnet_set = True
+ result = profiler.should_set_localnet()
+ assert result is False
+
+def test_check_for_stop_msg(mock_db, monkeypatch):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ monkeypatch.setattr(profiler, "shutdown_gracefully", Mock())
+ monkeypatch.setattr(profiler, "is_done_processing", Mock())
+ assert profiler.check_for_stop_msg("stop") is True
+ profiler.shutdown_gracefully.assert_called_once()
+ profiler.is_done_processing.assert_called_once()
+
+ assert profiler.check_for_stop_msg("not_stop") is False
+
+def test_pre_main(mock_db, monkeypatch):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+
+ with monkeypatch.context() as m:
+ mock_drop_root_privs = Mock()
+ m.setattr("slips_files.core.profiler.utils.drop_root_privs", mock_drop_root_privs)
+ profiler.pre_main()
+
+ mock_drop_root_privs.assert_called_once()
+
+def test_main(mock_db, monkeypatch):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ profiler.profiler_queue = Mock(spec=queue.Queue)
+ profiler.profiler_queue.get.side_effect = ["stop"]
+ profiler.check_for_stop_msg = Mock(return_value=True)
+
+ profiler.main()
+
+ profiler.check_for_stop_msg.assert_called()
+
+
+def mock_print(*args, **kwargs):
+ pass
+
+def test_is_done_processing(mock_db, monkeypatch):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ profiler.done_processing = Mock()
+ profiler.is_profiler_done_event = Mock()
+
+ monkeypatch.setattr(profiler, "print", mock_print)
+
+ profiler.is_done_processing()
+
+ profiler.done_processing.release.assert_called_once()
+ profiler.is_profiler_done_event.set.assert_called_once()
+
+@patch("slips_files.core.profiler.Profiler.add_flow_to_profile")
+@patch("slips_files.core.profiler.Profiler.handle_setting_local_net")
+def test_main_flow_processing(mock_handle_setting_local_net, mock_add_flow_to_profile, mock_db, monkeypatch):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ profiler.profiler_queue = Mock(spec=queue.Queue)
+ profiler.profiler_queue.get.side_effect = [
+ {"line": "sample_line", "input_type": "zeek", "total_flows": 100},
+ "stop"
+ ]
+ profiler.check_for_stop_msg = Mock(side_effect=[False, True])
+ monkeypatch.setattr(profiler, "define_separator", Mock(return_value="zeek"))
+ profiler.input = None
+ monkeypatch.setattr(profiler, "input", Mock())
+ profiler.input.process_line = Mock(return_value="sample_flow")
+
+ profiler.main()
+
+ mock_add_flow_to_profile.assert_called_once()
+ mock_handle_setting_local_net.assert_called_once()
+
+
+
+@patch("slips_files.core.profiler.ConfigParser")
+def test_read_configuration(mock_config_parser, mock_db):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ mock_conf = mock_config_parser.return_value
+
+ mock_conf.whitelist_path.return_value = "path/to/whitelist"
+ mock_conf.ts_format.return_value = "unixtimestamp"
+ mock_conf.analysis_direction.return_value = "all"
+ mock_conf.label.return_value = "malicious"
+ mock_conf.get_tw_width_as_float.return_value = 1.0
+ mock_conf.client_ips.return_value = ["192.168.1.1", "10.0.0.1"]
+
+ profiler.read_configuration()
+
+ assert profiler.whitelist_path == "path/to/whitelist"
+ assert profiler.timeformat == "unixtimestamp"
+ assert profiler.analysis_direction == "all"
+ assert profiler.label == "malicious"
+ assert profiler.width == 1.0
+ assert profiler.client_ips == ["192.168.1.1", "10.0.0.1"]
+
+
+
+def test_add_flow_to_profile_unsupported_flow(mock_db):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ profiler.flow = Mock()
+ profiler.flow.type_ = "unsupported"
+ profiler.flow_parser = Mock()
+ profiler.flow_parser.is_supported_flow.return_value = False
+
+ result = profiler.add_flow_to_profile()
+ assert result is False
+
+@patch("slips_files.core.profiler.FlowHandler")
+def test_store_features_going_out(mock_flow_handler, mock_db):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ profiler.flow = Mock()
+ profiler.flow.type_ = "conn"
+ profiler.flow_parser = mock_flow_handler.return_value
+ profiler.profileid = "profile_test"
+ profiler.twid = "twid_test"
+
+ profiler.store_features_going_out()
+
+ profiler.flow_parser.handle_conn.assert_called_once()
+
+def test_store_features_going_in_non_conn_flow(mock_db):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ profiler.flow = Mock(type_='dns', saddr='192.168.1.1', dport=53, proto='UDP')
+ profiler.saddr_as_obj = ipaddress.ip_address('192.168.1.1')
+ profileid = 'profile_test_dns'
+ twid = 'tw_test_dns'
+ profiler.store_features_going_in(profileid, twid)
+ mock_db.add_tuple.assert_not_called()
+ mock_db.add_ips.assert_not_called()
+ mock_db.add_port.assert_not_called()
+ mock_db.add_flow.assert_not_called()
+ mock_db.mark_profile_tw_as_modified.assert_not_called()
+
+def test_store_features_going_out_unsupported_type(mock_db):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ profiler.flow = Mock()
+ profiler.flow.type_ = "unsupported_type"
+ profiler.flow_parser = Mock()
+ result = profiler.store_features_going_out()
+ profiler.flow_parser.handle_conn.assert_not_called()
+ assert result is False
+
+def test_handle_in_flows_valid_daddr(mock_db):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ profiler.flow = Mock(type_='conn', daddr='8.8.8.8')
+ profiler.get_rev_profile = Mock(return_value=('rev_profile', 'rev_twid'))
+ profiler.store_features_going_in = Mock()
+
+ profiler.handle_in_flows()
+
+ profiler.get_rev_profile.assert_called_once()
+ profiler.store_features_going_in.assert_called_once_with('rev_profile', 'rev_twid')
+
+
+def test_shutdown_gracefully(mock_db, monkeypatch):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ profiler.rec_lines = 100
+ monkeypatch.setattr(profiler, "print", Mock())
+ profiler.shutdown_gracefully()
+ profiler.print.assert_called_with("Stopping. Total lines read: 100", log_to_logfiles_only=True)
+
+def test_init_pbar(mock_db):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ profiler.notify_observers = Mock()
+ total_flows = 500
+
+ profiler.init_pbar(total_flows)
+
+ profiler.notify_observers.assert_called_once_with({
+ 'bar': 'init',
+ 'bar_info': {
+ 'input_type': profiler.input_type,
+ 'total_flows': total_flows
+ }
+ })
+ assert profiler.supported_pbar is True
+
+def test_get_local_net_from_flow(mock_db, monkeypatch):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ profiler.flow = Mock()
+ profiler.flow.saddr = '10.0.0.1'
+ profiler.client_ips = []
+ local_net = profiler.get_local_net()
+
+ assert local_net == '10.0.0.0/8'
+
+
+
+@pytest.mark.parametrize('client_ips, expected_cidr', [
+ (['192.168.1.1'], '192.168.0.0/16'),
+ (['172.16.0.1'], '172.16.0.0/12'),
+ ([], '192.168.0.0/16')
+])
+def test_get_local_net(client_ips, expected_cidr, mock_db, monkeypatch):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ profiler.client_ips = client_ips
+ profiler.flow = Mock()
+ profiler.flow.saddr = '192.168.1.1'
+
+ local_net = profiler.get_local_net()
+ assert local_net == expected_cidr
+
+
+def test_handle_setting_local_net_when_already_set(mock_db):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ profiler.is_localnet_set = True
+ profiler.handle_setting_local_net()
+ mock_db.set_local_network.assert_not_called()
+
+def test_handle_setting_local_net(mock_db, monkeypatch):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ profiler.flow = Mock()
+ profiler.flow.saddr = "192.168.1.1"
+
+ monkeypatch.setattr(profiler, "should_set_localnet", Mock(return_value=True))
+
+ monkeypatch.setattr(profiler, "get_local_net", Mock(return_value="192.168.1.0/24"))
+
+ profiler.handle_setting_local_net()
+
+ profiler.db.set_local_network.assert_called_once_with("192.168.1.0/24")
+
+def test_notify_observers_no_observers(mock_db):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ test_msg = {'action': 'test'}
+ try:
+ profiler.notify_observers(test_msg)
+ except Exception as e:
+ pytest.fail(f"Unexpected error occurred: {e}")
+
+def test_notify_observers(mock_db):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ observer_mock = Mock()
+ profiler.observers.append(observer_mock)
+ test_msg = {'test': 'message'}
+ profiler.notify_observers(test_msg)
+ observer_mock.update.assert_called_once_with(test_msg)
+
+def test_notify_observers_with_correct_message(mock_db):
+ observer_mock = Mock()
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ profiler.observers.append(observer_mock)
+ test_msg = {'action': 'test_action'}
+ profiler.notify_observers(test_msg)
+ observer_mock.update.assert_called_once_with(test_msg)
+
+
+def test_should_stop_false(mock_db):
+ profiler = ModuleFactory().create_profiler_obj(mock_db)
+ profiler.some_condition = False
+ assert profiler.should_stop() is False
+
+
diff --git a/tests/test_vertical_portscans.py b/tests/test_vertical_portscans.py
index 1558923e1..654b617d9 100644
--- a/tests/test_vertical_portscans.py
+++ b/tests/test_vertical_portscans.py
@@ -20,7 +20,7 @@ def not_enough_dports_to_reach_the_threshold(mock_db):
# get a random list of ints(ports) that are below the threshold
# Generate a random number between 0 and threshold
amount_of_dports: int = random.randint(
- 0, module.port_scan_minimum_dports - 1
+ 0, module.minimum_dports_to_set_evidence - 1
)
ip: str = "8.8.8.8"
@@ -45,7 +45,7 @@ def enough_dports_to_reach_the_threshold(mock_db):
# get a random list of ints(ports) that are below the threshold
# Generate a random number between 0 and threshold
amount_of_dports: int = random.randint(
- module.port_scan_minimum_dports, 100
+ module.minimum_dports_to_set_evidence, 100
)
ip: str = "8.8.8.8"
@@ -72,7 +72,7 @@ def not_enough_dports_to_combine_1_evidence(mock_db):
# get a random list of ints(ports) that are below the threshold
# Generate a random number between 0 and threshold
amount_of_dports: int = random.randint(
- module.port_scan_minimum_dports, 100
+ module.minimum_dports_to_set_evidence, 100
)
ip: str = "8.8.8.8"
@@ -106,7 +106,7 @@ def test_min_dports_threshold(
conns: dict = get_test_conns(mock_db)
mock_db.get_data_from_profile_tw.return_value = conns
- cache_key = vertical_ps.get_cache_key(profileid, timewindow, dstip)
+ cache_key = vertical_ps.get_twid_identifier(profileid, timewindow, dstip)
amount_of_dports = len(conns[dstip]["dstports"])
assert (
@@ -118,76 +118,22 @@ def test_min_dports_threshold(
@pytest.mark.parametrize(
- "number_of_pending_evidence, expected_return_val",
- [
- (0, True),
- (1, False),
- (2, False),
- (3, True),
- (6, True),
- ],
-)
-def test_combining_evidence(
- number_of_pending_evidence, expected_return_val: bool, mock_db
-):
- """
- first evidence will be alerted, the rest will be combined
- """
- profileid = "profile_1.1.1.1"
- timewindow = "timewindow0"
- dstip = "8.8.8.8"
-
- vertical_ps = ModuleFactory().create_vertical_portscan_obj(mock_db)
- key: str = vertical_ps.get_cache_key(profileid, timewindow, dstip)
- # get a random bunch of dstips, this dict is not important
- dstips: dict = enough_dports_to_reach_the_threshold(mock_db)
- amount_of_dports = len(dstips[dstip]["dstports"])
-
- pkts_sent = sum(dstips[dstip]["dstports"].values())
-
- for evidence_ctr in range(number_of_pending_evidence + 1):
- # as if there's 1 pending evience
- # module.pending_vertical_ps_evidence[key].append(1)
- # this will add 2 evidence to the pending evidence list
- evidence = {
- "timestamp": dstips[dstip]["stime"],
- "pkts_sent": pkts_sent,
- "protocol": "TCP",
- "profileid": profileid,
- "twid": timewindow,
- "uid": dstips[dstip]["uid"],
- "amount_of_dports": amount_of_dports,
- "dstip": dstip,
- "state": "Not Established",
- }
- # in the first iteration, enough_to_combine is gonna be True bc
- # it's the first evidence ever
- # next 2 should be false
-
- enough_to_combine = (
- vertical_ps.decide_if_time_to_set_evidence_or_combine(
- evidence, key
- )
- )
-
- if evidence_ctr == 0:
- continue
-
- assert enough_to_combine == expected_return_val
-
-
-@pytest.mark.parametrize(
- "prev_amount_of_dports, cur_amount_of_dports, expected_return_val",
+ "ports_reported_last_evidence, cur_amount_of_dports, expected_return_val",
[
(0, 5, True),
+ (5, 5, False),
+ (5, 4, False),
(5, 6, False),
- (5, 8, False),
- (5, 15, True),
- (15, 20, True),
+ (5, 20, True),
+ (20, 34, False),
+ (20, 35, True),
],
)
def test_check_if_enough_dports_to_trigger_an_evidence(
- mock_db, prev_amount_of_dports, cur_amount_of_dports, expected_return_val
+ mock_db,
+ ports_reported_last_evidence,
+ cur_amount_of_dports,
+ expected_return_val,
):
"""
slip sdetects can based on the number of current dports scanned to the
@@ -201,8 +147,8 @@ def test_check_if_enough_dports_to_trigger_an_evidence(
vertical_ps = ModuleFactory().create_vertical_portscan_obj(mock_db)
- key: str = vertical_ps.get_cache_key(profileid, timewindow, dstip)
- vertical_ps.cached_tw_thresholds[key] = prev_amount_of_dports
+ key: str = vertical_ps.get_twid_identifier(profileid, timewindow, dstip)
+ vertical_ps.cached_thresholds_per_tw[key] = ports_reported_last_evidence
enough: bool = vertical_ps.check_if_enough_dports_to_trigger_an_evidence(
key, cur_amount_of_dports