diff --git a/.env b/.env index 7698a4f..7b43647 100644 --- a/.env +++ b/.env @@ -1,5 +1,5 @@ COMPOSE_PROJECT_NAME=elastic -ELK_VERSION=7.12.0 +ELK_VERSION=7.16.2 #----------- Resources --------------------------# ELASTICSEARCH_HEAP=1024m diff --git a/Makefile b/Makefile index 633e4f6..74cad9e 100644 --- a/Makefile +++ b/Makefile @@ -1,15 +1,17 @@ .DEFAULT_GOAL:=help -COMPOSE_ALL_FILES := -f docker-compose.yml -f docker-compose.monitor.yml -f docker-compose.tools.yml -f docker-compose.nodes.yml +COMPOSE_ALL_FILES := -f docker-compose.yml -f docker-compose.monitor.yml -f docker-compose.tools.yml -f docker-compose.nodes.yml -f docker-compose.logs.yml COMPOSE_MONITORING := -f docker-compose.yml -f docker-compose.monitor.yml +COMPOSE_LOGGING := -f docker-compose.yml -f docker-compose.logs.yml COMPOSE_TOOLS := -f docker-compose.yml -f docker-compose.tools.yml COMPOSE_NODES := -f docker-compose.yml -f docker-compose.nodes.yml ELK_SERVICES := elasticsearch logstash kibana +ELK_LOG_COLLECTION := filebeat ELK_MONITORING := elasticsearch-exporter logstash-exporter filebeat-cluster-logs -ELK_TOOLS := curator elastalert rubban +ELK_TOOLS := rubban ELK_NODES := elasticsearch-1 elasticsearch-2 ELK_MAIN_SERVICES := ${ELK_SERVICES} ${ELK_MONITORING} ${ELK_TOOLS} -ELK_ALL_SERVICES := ${ELK_MAIN_SERVICES} ${ELK_NODES} +ELK_ALL_SERVICES := ${ELK_MAIN_SERVICES} ${ELK_NODES} ${ELK_LOG_COLLECTION} # -------------------------- # load .env so that Docker Swarm Commands has .env values too. (https://github.com/moby/moby/issues/29133) @@ -41,6 +43,9 @@ up: monitoring: ## Start ELK Monitoring. @docker-compose ${COMPOSE_MONITORING} up -d --build ${ELK_MONITORING} +collect-docker-logs: ## Start Filebeat that collects all Host Docker Logs and ship it to ELK + @docker-compose ${COMPOSE_LOGGING} up -d --build ${ELK_LOG_COLLECTION} + tools: ## Start ELK Tools (ElastAlert, Curator). @docker-compose ${COMPOSE_TOOLS} up -d --build ${ELK_TOOLS} @@ -49,6 +54,8 @@ nodes: ## Start Two Extra Elasticsearch Nodes build: ## Build ELK and all its extra components. @docker-compose ${COMPOSE_ALL_FILES} build ${ELK_ALL_SERVICES} +ps: ## Show all running containers. + @docker-compose ${COMPOSE_ALL_FILES} ps down: ## Down ELK and all its extra components. @docker-compose ${COMPOSE_ALL_FILES} down diff --git a/README.md b/README.md index 4aa2a22..e8caa44 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@

With tools like Curator, Rubban, ElastAlert for Alerting.

- Elastic Stack Version 7^^ + Elastic Stack Version 7^^ @@ -29,7 +29,7 @@ Elastic Stack (**ELK**) Docker Composition, preconfigured with **Security**, **M Based on [Official Elastic Docker Images](https://www.docker.elastic.co/) -Stack Version: [7.12.0](https://www.elastic.co/blog/elastic-stack-7-12-0-released) +Stack Version: [7.16.2](https://www.elastic.co/blog/elastic-stack-7-16-2-released) > You can change Elastic Stack version by setting `ELK_VERSION` in `.env` file and rebuild your images. Any version >= 7.0.0 is compatible with this template. ### Main Features 📜 @@ -171,7 +171,7 @@ $ make prune * Some Configuration are parameterized in the `.env` file. * `ELASTIC_PASSWORD`, user `elastic`'s password (default: `changeme` _pls_). - * `ELK_VERSION` Elastic Stack Version (default: `7.12.0`) + * `ELK_VERSION` Elastic Stack Version (default: `7.16.2`) * `ELASTICSEARCH_HEAP`, how much Elasticsearch allocate from memory (default: 1GB -good for development only-) * `LOGSTASH_HEAP`, how much Logstash allocate from memory. * Other configurations which their such as cluster name, and node name, etc. @@ -193,22 +193,10 @@ To Re-generate Keystore: make keystore ``` -### Enable SSL on HTTP - -By default, only Transport Layer has SSL Enabled, to enable SSL on HTTP layer, add the following lines to `elasticsearch.yml` -```yaml -## - http -xpack.security.http.ssl.enabled: true -xpack.security.http.ssl.key: certs/elasticsearch.key -xpack.security.http.ssl.certificate: certs/elasticsearch.crt -xpack.security.http.ssl.certificate_authorities: certs/ca.crt -xpack.security.http.ssl.client_authentication: optional -``` - -> ⚠️ Enabling SSL on HTTP layer will require all clients that connect to Elasticsearch to configure SSL connection for HTTP, this includes all the current configured parts of the stack (e.g Logstash, Kibana, Curator, etc) plus any library/binding that connects to Elasticsearch from your application code. +### Notes -### Notes +- ⚠️ Elasticsearch HTTP layer is using SSL, thus mean you need to configure your elasticsearch clients with the `CA` in `secrets/certs/ca/ca.crt`, or configure client to ignore SSL Certificate Verification (e.g `--insecure` in `curl`). - Adding Two Extra Nodes to the cluster will make the cluster depending on them and won't start without them again. diff --git a/docker-compose.logs.yml b/docker-compose.logs.yml new file mode 100644 index 0000000..13231fd --- /dev/null +++ b/docker-compose.logs.yml @@ -0,0 +1,24 @@ +version: '3.5' + +# will contain all elasticsearch data. +volumes: + filebeat-data: + +services: + # Docker Logs Shipper ------------------------------ + filebeat: + image: docker.elastic.co/beats/filebeat:${ELK_VERSION} + restart: always + # -e flag to log to stderr and disable syslog/file output + command: -e --strict.perms=false + user: root + environment: + ELASTIC_USERNAME: ${ELASTIC_USERNAME} + ELASTIC_PASSWORD: ${ELASTIC_PASSWORD} + KIBANA_HOST_PORT: ${KIBANA_HOST}:${KIBANA_PORT} + ELASTICSEARCH_HOST_PORT: https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT} + volumes: + - ./filebeat/filebeat.docker.logs.yml:/usr/share/filebeat/filebeat.yml:ro + - /var/lib/docker/containers:/var/lib/docker/containers:ro + - /var/run/docker.sock:/var/run/docker.sock:ro + - filebeat-data:/var/lib/filebeat/data \ No newline at end of file diff --git a/docker-compose.monitor.yml b/docker-compose.monitor.yml index a3b0a15..b0a9e9e 100644 --- a/docker-compose.monitor.yml +++ b/docker-compose.monitor.yml @@ -6,7 +6,7 @@ services: elasticsearch-exporter: image: justwatch/elasticsearch_exporter:1.1.0 restart: always - command: ["--es.uri", "http://${ELASTIC_USERNAME}:${ELASTIC_PASSWORD}@${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}", + command: ["--es.uri", "https://${ELASTIC_USERNAME}:${ELASTIC_PASSWORD}@${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}", "--es.all", "--es.snapshots", "--es.indices"] @@ -22,11 +22,7 @@ services: # Cluster Logs Shipper ------------------------------ filebeat-cluster-logs: - image: filebeat:elastdocker-${ELK_VERSION} - build: - context: tools/filebeat/ - args: - ELK_VERSION: $ELK_VERSION + image: docker.elastic.co/beats/filebeat:${ELK_VERSION} restart: always # -e flag to log to stderr and disable syslog/file output command: -e --strict.perms=false @@ -35,8 +31,8 @@ services: ELASTIC_USERNAME: ${ELASTIC_USERNAME} ELASTIC_PASSWORD: ${ELASTIC_PASSWORD} KIBANA_HOST_PORT: ${KIBANA_HOST}:${KIBANA_PORT} - ELASTICSEARCH_HOST_PORT: ${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT} + ELASTICSEARCH_HOST_PORT: https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT} volumes: - - ./tools/filebeat/monitor/filebeat_cluster_logs.yml:/usr/share/filebeat/filebeat.yml:ro + - ./filebeat/filebeat.monitoring.yml:/usr/share/filebeat/filebeat.yml:ro - /var/lib/docker/containers:/var/lib/docker/containers:ro - /var/run/docker.sock:/var/run/docker.sock:ro \ No newline at end of file diff --git a/docker-compose.nodes.yml b/docker-compose.nodes.yml index 13d4e52..8056b45 100644 --- a/docker-compose.nodes.yml +++ b/docker-compose.nodes.yml @@ -7,12 +7,8 @@ volumes: services: elasticsearch-1: - image: elasticsearch:elastdocker-${ELK_VERSION} - build: - context: elasticsearch/ - args: - ELK_VERSION: ${ELK_VERSION} -# restart: unless-stopped + image: docker.elastic.co/elasticsearch/elasticsearch:${ELK_VERSION} + restart: unless-stopped environment: ELASTIC_USERNAME: ${ELASTIC_USERNAME} ELASTIC_PASSWORD: ${ELASTIC_PASSWORD} @@ -43,12 +39,8 @@ services: soft: 200000 hard: 200000 elasticsearch-2: - image: elasticsearch:elastdocker-${ELK_VERSION} - build: - context: elasticsearch/ - args: - ELK_VERSION: ${ELK_VERSION} - # restart: unless-stopped + image: docker.elastic.co/elasticsearch/elasticsearch:${ELK_VERSION} + restart: unless-stopped environment: ELASTIC_USERNAME: ${ELASTIC_USERNAME} ELASTIC_PASSWORD: ${ELASTIC_PASSWORD} diff --git a/docker-compose.setup.yml b/docker-compose.setup.yml index 8e68d56..78848d6 100644 --- a/docker-compose.setup.yml +++ b/docker-compose.setup.yml @@ -2,10 +2,7 @@ version: '3.5' services: keystore: - build: - context: elasticsearch/ - args: - ELK_VERSION: ${ELK_VERSION} + image: docker.elastic.co/elasticsearch/elasticsearch:${ELK_VERSION} command: bash /setup/setup-keystore.sh user: "0" volumes: @@ -13,15 +10,9 @@ services: - ./setup/:/setup/ environment: ELASTIC_PASSWORD: ${ELASTIC_PASSWORD} - # Add keystore values used in `keystore.sh` here. (e.g AMAZON S3 Repo Creds) - AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID} - AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY} certs: - build: - context: elasticsearch/ - args: - ELK_VERSION: ${ELK_VERSION} + image: docker.elastic.co/elasticsearch/elasticsearch:${ELK_VERSION} command: bash /setup/setup-certs.sh user: "0" volumes: diff --git a/docker-compose.tools.yml b/docker-compose.tools.yml index 640746e..1f28ecd 100644 --- a/docker-compose.tools.yml +++ b/docker-compose.tools.yml @@ -1,36 +1,6 @@ version: '3.5' services: - curator: - image: curator:elastdocker-${ELK_VERSION} - build: - context: tools/curator/. - restart: unless-stopped - environment: - ELASTICSEARCH_HOST_PORT: ${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT} - ELASTICSEARCH_HTTP_AUTH: ${ELASTIC_USERNAME}:${ELASTIC_PASSWORD} - volumes: - - ./tools/curator/curator.yml:/root/.curator/curator.yml:ro - - ./tools/curator/crontab:/etc/crontab:ro - - ./tools/curator/actions/:/actions/:ro - - elastalert: - image: sherifabdlnaby/elastalert:3.1.1 - restart: unless-stopped - volumes: - - ./tools/elastalert/config/elastalert.yaml:/opt/config/config.yaml - - ./tools/elastalert/config/elastalert-test.yaml:/opt/config/config-test.yaml - - ./tools/elastalert/config/config.json:/opt/config/config.json - - ./tools/elastalert/rules:/opt/elastalert/rules - - ./tools/elastalert/rule_templates:/opt/elastalert/rule_templates - environment: - ELASTICSEARCH_HOST: ${ELASTICSEARCH_HOST} - ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT} - ELASTIC_USERNAME: ${ELASTIC_USERNAME} - ELASTIC_PASSWORD: ${ELASTIC_PASSWORD} - ports: - - 3030:3030 - rubban: image: sherifabdlnaby/rubban:latest restart: unless-stopped @@ -41,3 +11,6 @@ services: RUBBAN_REFRESHINDEXPATTERN_ENABLED: 'true' RUBBAN_REFRESHINDEXPATTERN_SCHEDULE: '*/5 * * * *' RUBBAN_REFRESHINDEXPATTERN_PATTERNS: '*' + RUBBAN_AUTOINDEXPATTERN_ENABLED: true + RUBBAN_AUTOINDEXPATTERN_SCHEDULE: '*/5 * * * *' + RUBBAN_AUTOINDEXPATTERN_GENERALPATTERNS: '[{"pattern":"filebeat?","timeFieldName":"@timestamp"},{"pattern":"logstash?","timeFieldName":"@timestamp"}]' \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml index 8876cd9..a99c5ed 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -26,11 +26,7 @@ secrets: services: elasticsearch: - image: elasticsearch:elastdocker-${ELK_VERSION} - build: - context: elasticsearch/ - args: - ELK_VERSION: ${ELK_VERSION} + image: docker.elastic.co/elasticsearch/elasticsearch:${ELK_VERSION} restart: unless-stopped environment: ELASTIC_USERNAME: ${ELASTIC_USERNAME} @@ -64,40 +60,39 @@ services: nofile: soft: 200000 hard: 200000 + healthcheck: + test: ["CMD", "sh", "-c", "curl -sf --insecure https://$ELASTIC_USERNAME:$ELASTIC_PASSWORD@localhost:9200/_cat/health | grep -ioE 'green|yellow' || echo 'not green/yellow cluster status'"] logstash: - image: logstash:elastdocker-${ELK_VERSION} - build: - context: logstash/ - args: - ELK_VERSION: $ELK_VERSION + image: docker.elastic.co/logstash/logstash:${ELK_VERSION} restart: unless-stopped volumes: - ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro - ./logstash/config/pipelines.yml:/usr/share/logstash/config/pipelines.yml:ro - ./logstash/pipeline:/usr/share/logstash/pipeline:ro + secrets: + - source: elastic.ca + target: /certs/ca.crt environment: ELASTIC_USERNAME: ${ELASTIC_USERNAME} ELASTIC_PASSWORD: ${ELASTIC_PASSWORD} - ELASTICSEARCH_HOST_PORT: ${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT} + ELASTICSEARCH_HOST_PORT: https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT} LS_JAVA_OPTS: "-Xmx${LOGSTASH_HEAP} -Xms${LOGSTASH_HEAP}" ports: - "5044:5044" - "9600:9600" + healthcheck: + test: ["CMD", "curl", "-s" ,"-XGET", "http://127.0.0.1:9600"] kibana: - image: kibana:elastdocker-${ELK_VERSION} - build: - context: kibana/ - args: - ELK_VERSION: $ELK_VERSION + image: docker.elastic.co/kibana/kibana:${ELK_VERSION} restart: unless-stopped volumes: - ./kibana/config/:/usr/share/kibana/config:ro environment: ELASTIC_USERNAME: ${ELASTIC_USERNAME} ELASTIC_PASSWORD: ${ELASTIC_PASSWORD} - ELASTICSEARCH_HOST_PORT: ${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT} + ELASTICSEARCH_HOST_PORT: https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT} secrets: - source: elastic.ca target: /certs/ca.crt diff --git a/elasticsearch/Dockerfile b/elasticsearch/Dockerfile deleted file mode 100644 index 70e33ff..0000000 --- a/elasticsearch/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -ARG ELK_VERSION - -# https://github.com/elastic/elasticsearch-docker -FROM docker.elastic.co/elasticsearch/elasticsearch:${ELK_VERSION} - -# Add healthcheck -COPY scripts/docker-healthcheck . -HEALTHCHECK CMD sh ./docker-healthcheck - -# Add your elasticsearch plugins setup here -# Example: RUN elasticsearch-plugin install analysis-icu -#RUN elasticsearch-plugin install --batch repository-s3 diff --git a/elasticsearch/config/elasticsearch.yml b/elasticsearch/config/elasticsearch.yml index d79ce2a..ac0d2e2 100644 --- a/elasticsearch/config/elasticsearch.yml +++ b/elasticsearch/config/elasticsearch.yml @@ -24,11 +24,11 @@ xpack.security.transport.ssl.certificate: certs/elasticsearch.crt xpack.security.transport.ssl.certificate_authorities: certs/ca.crt ## - http -#xpack.security.http.ssl.enabled: true -#xpack.security.http.ssl.key: certs/elasticsearch.key -#xpack.security.http.ssl.certificate: certs/elasticsearch.crt -#xpack.security.http.ssl.certificate_authorities: certs/ca.crt -#xpack.security.http.ssl.client_authentication: optional +xpack.security.http.ssl.enabled: true +xpack.security.http.ssl.key: certs/elasticsearch.key +xpack.security.http.ssl.certificate: certs/elasticsearch.crt +xpack.security.http.ssl.certificate_authorities: certs/ca.crt +xpack.security.http.ssl.client_authentication: optional # Monitoring xpack.monitoring.collection.enabled: true \ No newline at end of file diff --git a/elasticsearch/scripts/docker-healthcheck b/elasticsearch/scripts/docker-healthcheck deleted file mode 100644 index 89f5820..0000000 --- a/elasticsearch/scripts/docker-healthcheck +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -set -eo pipefail - -host="$(hostname --ip-address || echo '127.0.0.1')" - -if health="$(curl -fsSL "http://$ELASTIC_USERNAME:$ELASTIC_PASSWORD@$host:9200/_cat/health?h=status")"; then - health="$(echo "$health" | sed -r 's/^[[:space:]]+|[[:space:]]+$//g')" # trim whitespace (otherwise we'll have "green ") - if [ "$health" = 'green' ] || [ "$health" = "yellow" ]; then - exit 0 - fi - echo >&2 "unexpected health status: $health" -fi - -exit 1 diff --git a/filebeat/filebeat.docker.logs.yml b/filebeat/filebeat.docker.logs.yml new file mode 100644 index 0000000..010bad7 --- /dev/null +++ b/filebeat/filebeat.docker.logs.yml @@ -0,0 +1,82 @@ +#================================== Description ======================================== +# Filebeat Config to send Elasticsearch/Logstash/Kibana in a docker host to Elasticsea- +# sh cluster. + +name: filebeat-docker-logs-shipper + +filebeat.config: + modules: + path: ${path.config}/modules.d/*.yml + reload.enabled: false + +#================================ Autodiscover ======================================= +# Autodiscover all containers with elasticsearch images, and add an separate input for +# each container and log type. +filebeat.autodiscover: + providers: + - type: docker + templates: + - condition: + and: + - not.contains: + docker.container.image: elasticsearch + - not.contains: + docker.container.image: logstash + - not.contains: + docker.container.image: kibana + config: + - type: container + paths: + - /var/lib/docker/containers/${data.docker.container.id}/*.log + +processors: + - add_cloud_metadata: ~ + +# Output to Logstash +output.logstash: + hosts: ["logstash:5044"] + +#=================================== Kibana ========================================== +# Enable setting up Kibana +# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. +# This requires a Kibana endpoint configuration. +setup: + kibana: + host: '${KIBANA_HOST_PORT}' + username: '${ELASTIC_USERNAME}' + password: '${ELASTIC_PASSWORD}' + +#==================================== Monitoring ===================================== +# Enable Monitoring Beats +# Filebeat can export internal metrics to a central Elasticsearch monitoring +# cluster. This requires xpack monitoring to be enabled in Elasticsearch + +# Use deprecated option to avoid current UX bug in 7.3.0 where filebeat creates a +# standalone monitoring cluster in the monitoring UI. +# see: https://github.com/elastic/beats/pull/13182 +xpack.monitoring: + enabled: true + elasticsearch: + hosts: '${ELASTICSEARCH_HOST_PORT}' + username: '${ELASTIC_USERNAME}' + password: '${ELASTIC_PASSWORD}' + + +#monitoring: +# enabled: true +# elasticsearch: +# hosts: '${ELASTICSEARCH_HOST_PORT}' +# username: '${ELASTIC_USERNAME}' +# password: '${ELASTIC_PASSWORD}' + +#================================ HTTP Endpoint ====================================== +# Enabled so we can monitor filebeat using filebeat exporter if needed. +# Each beat can expose internal metrics through a HTTP endpoint. For security +# reasons the endpoint is disabled by default. This feature is currently experimental. +# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# append ?pretty to the URL. + +# Defines if the HTTP endpoint is enabled. +http.enabled: true +http.host: 0.0.0.0 +http.port: 5066 diff --git a/tools/filebeat/monitor/filebeat_cluster_logs.yml b/filebeat/filebeat.monitoring.yml similarity index 90% rename from tools/filebeat/monitor/filebeat_cluster_logs.yml rename to filebeat/filebeat.monitoring.yml index 57b9650..ae9d5b4 100644 --- a/tools/filebeat/monitor/filebeat_cluster_logs.yml +++ b/filebeat/filebeat.monitoring.yml @@ -2,7 +2,7 @@ # Filebeat Config to send Elasticsearch/Logstash/Kibana in a docker host to Elasticsea- # sh cluster. -name: filebeat-elasticsearch-logs-shipper +name: filebeat-elk-monitoring filebeat.config: modules: @@ -73,8 +73,13 @@ processors: - add_cloud_metadata: ~ # Output to ES directly. -output.logstash: - hosts: ["logstash:5044"] +output.elasticsearch: + hosts: '${ELASTICSEARCH_HOST_PORT}' + username: '${ELASTIC_USERNAME}' + password: '${ELASTIC_PASSWORD}' + ssl: + verification_mode: "none" + #=================================== Kibana ========================================== # Enable setting up Kibana @@ -94,7 +99,12 @@ setup: # Use deprecated option to avoid current UX bug in 7.3.0 where filebeat creates a # standalone monitoring cluster in the monitoring UI. # see: https://github.com/elastic/beats/pull/13182 -xpack.monitoring.enabled: false +xpack.monitoring: + enabled: true +# elasticsearch: +# hosts: '${ELASTICSEARCH_HOST_PORT}' +# username: '${ELASTIC_USERNAME}' +# password: '${ELASTIC_PASSWORD}' #monitoring: # enabled: true @@ -102,6 +112,8 @@ xpack.monitoring.enabled: false # hosts: '${ELASTICSEARCH_HOST_PORT}' # username: '${ELASTIC_USERNAME}' # password: '${ELASTIC_PASSWORD}' +# ssl.enabled: true +# ssl.verification_mode: none #================================ HTTP Endpoint ====================================== # Enabled so we can monitor filebeat using filebeat exporter if needed. diff --git a/kibana/Dockerfile b/kibana/Dockerfile deleted file mode 100644 index 2420e9d..0000000 --- a/kibana/Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -ARG ELK_VERSION - -# https://github.com/elastic/kibana-docker -FROM docker.elastic.co/kibana/kibana:${ELK_VERSION} -ARG ELK_VERSION - -## Add healthcheck -#COPY scripts/docker-healthcheck . -#HEALTHCHECK CMD sh ./docker-healthcheck - -# Add your kibana plugins setup here -# Example: RUN kibana-plugin install -#RUN kibana-plugin install https://github.com/bitsensor/elastalert-kibana-plugin/releases/download/1.1.0/elastalert-kibana-plugin-1.1.0-${ELK_VERSION}.zip \ No newline at end of file diff --git a/kibana/config/kibana.yml b/kibana/config/kibana.yml index b27fd65..dc54111 100644 --- a/kibana/config/kibana.yml +++ b/kibana/config/kibana.yml @@ -6,7 +6,7 @@ server.name: kibana server.host: "0.0.0.0" # Elasticsearch Connection -elasticsearch.hosts: [ "http://${ELASTICSEARCH_HOST_PORT}" ] +elasticsearch.hosts: [ "${ELASTICSEARCH_HOST_PORT}" ] # SSL settings server.ssl.enabled: true diff --git a/kibana/scripts/docker-healthcheck b/kibana/scripts/docker-healthcheck deleted file mode 100644 index 3658d1a..0000000 --- a/kibana/scripts/docker-healthcheck +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -eo pipefail - -host="$(hostname --ip-address || echo '127.0.0.1')" -if health="$(curl -fsSkL "https://$ELASTIC_USERNAME:$ELASTIC_PASSWORD@$host:5601/api/status" | python -c "import sys, json; print json.load(sys.stdin)['status']['overall']['state']")"; then - health="$(echo "$health" | sed -r 's/^[[:space:]]+|[[:space:]]+$//g')" # trim whitespace (otherwise we'll have "green ") - if [ "$health" = 'green' ]; then - exit 0 - fi - echo >&2 "unexpected health status: $health" -fi -exit 1 \ No newline at end of file diff --git a/logstash/Dockerfile b/logstash/Dockerfile deleted file mode 100644 index 673814e..0000000 --- a/logstash/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -ARG ELK_VERSION - -# https://github.com/elastic/logstash-docker -FROM docker.elastic.co/logstash/logstash:${ELK_VERSION} - -HEALTHCHECK --interval=240s --timeout=120s --retries=5 \ - CMD curl -s -XGET 'http://127.0.0.1:9600' - -# Add your logstash plugins setup here -# Example: RUN logstash-plugin install logstash-filter-json diff --git a/logstash/config/logstash.yml b/logstash/config/logstash.yml index ed626d2..7a60964 100644 --- a/logstash/config/logstash.yml +++ b/logstash/config/logstash.yml @@ -1,8 +1,9 @@ --- http.host: "0.0.0.0" -xpack.monitoring.elasticsearch.hosts: ${ELASTICSEARCH_HOST_PORT} ## X-Pack security credentials +xpack.monitoring.elasticsearch.hosts: ${ELASTICSEARCH_HOST_PORT} xpack.monitoring.enabled: true xpack.monitoring.elasticsearch.username: ${ELASTIC_USERNAME} -xpack.monitoring.elasticsearch.password: ${ELASTIC_PASSWORD} \ No newline at end of file +xpack.monitoring.elasticsearch.password: ${ELASTIC_PASSWORD} +xpack.monitoring.elasticsearch.ssl.certificate_authority: /certs/ca.crt \ No newline at end of file diff --git a/logstash/pipeline/main.conf b/logstash/pipeline/main.conf index 5b4b10e..1198ec8 100644 --- a/logstash/pipeline/main.conf +++ b/logstash/pipeline/main.conf @@ -13,5 +13,8 @@ output { hosts => "${ELASTICSEARCH_HOST_PORT}" user => "${ELASTIC_USERNAME}" password => "${ELASTIC_PASSWORD}" + ssl => true + ssl_certificate_verification => false + cacert => "/certs/ca.crt" } } diff --git a/tools/curator/Dockerfile b/tools/curator/Dockerfile deleted file mode 100755 index 8ddd781..0000000 --- a/tools/curator/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM alpine:3.8 - -ARG CURATOR_VERSION=5.8.3 -ENV CURATOR_VERSION=$CURATOR_VERSION - -RUN apk add --no-cache tini python py-pip \ - && pip install elasticsearch-curator==${CURATOR_VERSION} \ - && pip install -U pyyaml==3.12 - -COPY entrypoint.sh /usr/local/bin/entrypoint -RUN chmod +x /usr/local/bin/entrypoint -ENTRYPOINT ["entrypoint"] diff --git a/tools/curator/actions/snapshot.yml b/tools/curator/actions/snapshot.yml deleted file mode 100755 index db1fc80..0000000 --- a/tools/curator/actions/snapshot.yml +++ /dev/null @@ -1,39 +0,0 @@ -# Starting from 7.4.0, Automated Snapshot can be configured natively on Elasticsearch (Snapshot Lifecycle Policy) -#actions: -# 1: -# action: snapshot -# description: >- -# Snapshot indices to the configured Elasticsearch Repository. -# Snapshots into => `elasticsearch-snapshot-%Y.%m.%d-%H:%M:%S` -# options: -# repository: elasticsearch-backup -# name: elasticsearch-snapshot-%Y.%m.%d-%H:%M -# include_global_state: True -# wait_for_completion: True -# ignore_empty_list: True -# max_wait: -1 -# wait_interval: 20 -# allow_ilm_indices: true -# filters: -# - filtertype: pattern -# kind: regex -# value: '^(filebeat-|logstash-|.kibana).*$' -# 2: -# action: delete_snapshots -# description: >- -# Delete snapshots older than configured -# options: -# repository: elasticsearch-backup -# retry_interval: 30 -# retry_count: 2 -# ignore_empty_list: True -# disable_action: false -# filters: -# - filtertype: pattern -# kind: prefix -# value: elasticsearch-snapshot- -# - filtertype: age -# source: creation_date -# direction: older -# unit: days -# unit_count: 180 \ No newline at end of file diff --git a/tools/curator/crontab b/tools/curator/crontab deleted file mode 100644 index 43210d0..0000000 --- a/tools/curator/crontab +++ /dev/null @@ -1,3 +0,0 @@ -# Snapshot Every 1 hour -* * * * * curator /actions/snapshot.yml - diff --git a/tools/curator/curator.yml b/tools/curator/curator.yml deleted file mode 100755 index d881703..0000000 --- a/tools/curator/curator.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -# Remember, leave a key empty if there is no value. None will be a string, -# not a Python "NoneType" - -# This are Environment Variables INSIDE the container, and should be set by the Docker-Compose File -client: - hosts: - - ${ELASTICSEARCH_HOST_PORT} - http_auth: ${ELASTICSEARCH_HTTP_AUTH} - port: 9200 - use_ssl: False - ssl_no_validate: False - timeout: 30 - master_only: False - -logging: - loglevel: INFO - logformat: default diff --git a/tools/curator/entrypoint.sh b/tools/curator/entrypoint.sh deleted file mode 100644 index 0b0131d..0000000 --- a/tools/curator/entrypoint.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh -set -eu - -# Add Crontab -crontab /etc/crontab - -# Run Crond -exec tini -s -- crond -fl 8 -d 8 \ No newline at end of file diff --git a/tools/elastalert/config/config.json b/tools/elastalert/config/config.json deleted file mode 100644 index fdd28b9..0000000 --- a/tools/elastalert/config/config.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "appName": "elastalert-server", - "port": 3030, - "wsport": 3333, - "elastalertPath": "/opt/elastalert", - "verbose": false, - "es_debug": false, - "debug": false, - "rulesPath": { - "relative": true, - "path": "/rules" - }, - "templatesPath": { - "relative": true, - "path": "/rule_templates" - }, - "es_host": "${ELASTICSEARCH_HOST}", - "es_port": "${ELASTICSEARCH_PORT}", - "es_username": "${ELASTIC_USERNAME}", - "es_password": "${ELASTIC_PASSWORD}", - "writeback_index": "elastalert_status" -} diff --git a/tools/elastalert/config/elastalert-test.yaml b/tools/elastalert/config/elastalert-test.yaml deleted file mode 100644 index 3f31b56..0000000 --- a/tools/elastalert/config/elastalert-test.yaml +++ /dev/null @@ -1,60 +0,0 @@ -# NOTE: This config is used when testing a rule - -# The elasticsearch hostname for metadata writeback -# Note that every rule can have its own elasticsearch host -es_host: ${ELASTICSEARCH_HOST} - -# The elasticsearch port -es_port: ${ELASTICSEARCH_PORT} - -# Option basic-auth username and password for elasticsearch -es_username: ${ELASTIC_USERNAME} -es_password: ${ELASTIC_PASSWORD} - -# This is the folder that contains the rule yaml files -# Any .yaml file will be loaded as a rule -rules_folder: rules - -# How often ElastAlert will query elasticsearch -# The unit can be anything from weeks to seconds -run_every: - minutes: 5 - -# ElastAlert will buffer results from the most recent -# period of time, in case some log sources are not in real time -buffer_time: - minutes: 2 - -## Limit Spikes to take all memory trying to download documents -## (max downloaded docs = max_query_size x max_scrolling_count) -max_query_size: 1000 -max_scrolling_count: 5 - -## Doc Type for Count Queries -doc_type: _doc - -# Optional URL prefix for elasticsearch -#es_url_prefix: elasticsearch - -# Connect with TLS to elasticsearch -#use_ssl: True - -# Verify TLS certificates -#verify_certs: True - -# GET request with body is the default option for Elasticsearch. -# If it fails for some reason, you can pass 'GET', 'POST' or 'source'. -# See http://elasticsearch-py.readthedocs.io/en/master/connection.html?highlight=send_get_body_as#transport -# for details -#es_send_get_body_as: GET - - -# The index on es_host which is used for metadata storage -# This can be a unmapped index, but it is recommended that you run -# elastalert-create-index to set a mapping -writeback_index: elastalert_status - -# If an alert fails for some reason, ElastAlert will retry -# sending the alert until this time period has elapsed -alert_time_limit: - days: 2 diff --git a/tools/elastalert/config/elastalert.yaml b/tools/elastalert/config/elastalert.yaml deleted file mode 100644 index 3267ff3..0000000 --- a/tools/elastalert/config/elastalert.yaml +++ /dev/null @@ -1,58 +0,0 @@ -# The elasticsearch hostname for metadata writeback -# Note that every rule can have its own elasticsearch host -es_host: ${ELASTICSEARCH_HOST} - -# The elasticsearch port -es_port: ${ELASTICSEARCH_PORT} - -# Option basic-auth username and password for elasticsearch -es_username: ${ELASTIC_USERNAME} -es_password: ${ELASTIC_PASSWORD} - -# This is the folder that contains the rule yaml files -# Any .yaml file will be loaded as a rule -rules_folder: rules - -# How often ElastAlert will query elasticsearch -# The unit can be anything from weeks to seconds -run_every: - minutes: 5 - -# ElastAlert will buffer results from the most recent -# period of time, in case some log sources are not in real time -buffer_time: - minutes: 2 - -## Limit Spikes to take all memory trying to download documents -## (max downloaded docs = max_query_size x max_scrolling_count) -max_query_size: 1000 -max_scrolling_count: 5 - -## Doc Type for Count Queries -doc_type: _doc - -# Optional URL prefix for elasticsearch -#es_url_prefix: elasticsearch - -# Connect with TLS to elasticsearch -#use_ssl: True - -# Verify TLS certificates -#verify_certs: True - -# GET request with body is the default option for Elasticsearch. -# If it fails for some reason, you can pass 'GET', 'POST' or 'source'. -# See http://elasticsearch-py.readthedocs.io/en/master/connection.html?highlight=send_get_body_as#transport -# for details -#es_send_get_body_as: GET - - -# The index on es_host which is used for metadata storage -# This can be a unmapped index, but it is recommended that you run -# elastalert-create-index to set a mapping -writeback_index: elastalert_status - -# If an alert fails for some reason, ElastAlert will retry -# sending the alert until this time period has elapsed -alert_time_limit: - days: 2 diff --git a/tools/elastalert/rule_templates/detection_template.yaml b/tools/elastalert/rule_templates/detection_template.yaml deleted file mode 100644 index 4ad9b77..0000000 --- a/tools/elastalert/rule_templates/detection_template.yaml +++ /dev/null @@ -1,56 +0,0 @@ -# Rule name, must be unique -name: Alert on any detection - -# Index to search, wildcard supported -index: bitsensor -timestamp_field: endpoint.localtime - -# Type of alert. -type: any -realert: - seconds: 0 - -# A list of elasticsearch filters used for find events -# These filters are joined with AND and nested in a filtered query -# For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html -filter: -- query: - query_string: - query: "_exists_:detections" - -include: - - endpoint.location - - endpoint.name - - context.http.userAgent - - context.ip - - context.php.session.sessionId - - detections - - meta.user - - -# Enhancement for converting 'detections' array into object, ex. get merged detection type by -# 'detections_parsed.type' or get first detection type by 'detection_parsed.0.type' -match_enhancements: -- "elastalert_modules.bitsensor_enhancement.AlertTextEnhancement" -run_enhancements_first: true - - -alert_subject: ":exclamation: Detection on {}" -alert_subject_args: - - endpoint.name - -alert_text_type: alert_text_only -alert_text: "Triggered at _{}_\n\n*Attacker:*\nIP: {} \nUser-Agent: {}\nDetection: `{}`\n\n:Id: {}\nUser: {}" -alert_text_args: - - endpoint.localtime - - context.ip - - context.http.userAgent - - detections_parsed.type - - _id - - meta.user - -# The alert is use when a match is found -alert: - - slack -slack_webhook_url: "https://hooks.slack.com/services/" -slack_username_override: "ElastAlert" diff --git a/tools/elastalert/rule_templates/error_jira_template.yaml b/tools/elastalert/rule_templates/error_jira_template.yaml deleted file mode 100644 index 0507333..0000000 --- a/tools/elastalert/rule_templates/error_jira_template.yaml +++ /dev/null @@ -1,62 +0,0 @@ -# Rule name, must be unique -name: Alert on any error - -# Index to search, wildcard supported -index: bitsensor -timestamp_field: endpoint.localtime - -# Type of alert. -type: any -realert: - seconds: 0 - -# A list of elasticsearch filters used for find events -# These filters are joined with AND and nested in a filtered query -# For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html -filter: -- query: - query_string: - query: "_exists_:errors" - -include: - - endpoint.location - - endpoint.name - - context.http.userAgent - - context.ip - - errors - - meta.user - - -# Enhancement for converting 'detections' array into object, ex. get merged detection type by -# 'detections_parsed.type' or get first detection type by 'detection_parsed.0.type' -match_enhancements: -- "elastalert_modules.bitsensor_enhancement.AlertTextEnhancement" -run_enhancements_first: true - - -alert_subject: "Error on {}" -alert_subject_args: - - endpoint.name - -alert_text_type: alert_text_only -alert_text: "Triggered at _{}_\n\n*Attacker:*\nIP: {} \nUser-Agent: {}\nError: *{}*\n\nId: {}\nUser: {}" -alert_text_args: - - endpoint.localtime - - context.ip - - context.http.userAgent - - errors_parsed.type - - _id - - meta.user - -# The alert is use when a match is found -alert: - - jira - -jira_server: https://bitsensor.atlassian.net -jira_project: SA -jira_issuetype: Story -jira_labels: error - -# Add jira_acct.txt to rules folder -# The file is yaml formatted and must contain fields: 'user', 'password' -jira_account_file: "rules/jira_acct.txt" \ No newline at end of file diff --git a/tools/elastalert/rule_templates/integration_started_template.yaml b/tools/elastalert/rule_templates/integration_started_template.yaml deleted file mode 100644 index 02cf57f..0000000 --- a/tools/elastalert/rule_templates/integration_started_template.yaml +++ /dev/null @@ -1,54 +0,0 @@ -# Rule name, must be unique -name: Integration Started - -# Alert on x events in y seconds -type: frequency - -# Alert when this many documents matching the query occur within a timeframe -num_events: 1 - -# num_events must occur within this amount of time to trigger an alert -timeframe: - hours: 1 - -# When the attacker continues, send a new alert after x minutes -realert: - days: 7 - -query_key: - - meta.provider - - endpoint.name - -include: - - meta.provider - - endpoint.name - -alert_subject: "Integration started on <{}> | <{}|Show Dashboard>" -alert_subject_args: - - endpoint.name - - kibana_link - -alert_text: |- - Integration on {} has started with plugin {}. - -alert_text_args: - - endpoint.name - - meta.provider - -# The alert when a match is found -alert: - - slack - -slack_webhook_url: "https://hooks.slack.com/services/" -slack_username_override: "ElastAlert" - -# Alert body only cointains a title and text -alert_text_type: alert_text_only - -# Link to BitSensor Kibana Dashboard -use_kibana4_dashboard: "https://kibana.dashboard.io/app/kibana#/dashboard" - -# Index to search, wildcard supported -index: bitsensor -timestamp_field: endpoint.localtime -doc_type: datapoint diff --git a/tools/elastalert/rule_templates/no_data_template.yaml b/tools/elastalert/rule_templates/no_data_template.yaml deleted file mode 100644 index add9cda..0000000 --- a/tools/elastalert/rule_templates/no_data_template.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# Alert when no data has been received for more then 30 seconds. - -# Rule name, must be unique -name: No Data - -# Type of alert. -type: flatline - -# Alert when this many documents matching the query occur within a timeframe -threshold: 1 -use_terms: true - -# num_events must occur within this amount of time to trigger an alert -timeframe: - seconds: 30 - -realert: - minutes: 10 - -exponential_realert: - hours: 1 - -doc_type: datapoint - -# Index to search, wildcard supported -index: bitsensor -timestamp_field: endpoint.localtime - -alert_subject: "No data on dashboard" - -alert_text_type: alert_text_only -alert_text: "The stack receives no data. It might be down :(" - -# The alert is use when a match is found -alert: - - slack -slack_webhook_url: "https://hooks.slack.com/services/" -slack_username_override: "ElastAlert" diff --git a/tools/elastalert/rule_templates/relevant_attack_template.yaml b/tools/elastalert/rule_templates/relevant_attack_template.yaml deleted file mode 100644 index e571bae..0000000 --- a/tools/elastalert/rule_templates/relevant_attack_template.yaml +++ /dev/null @@ -1,79 +0,0 @@ -# Index to search, wildcard supported -name: Known Attacks - -# Alert on each event -type: any - -# A list of elasticsearch filters used for find events -# These filters are joined with AND and nested in a filtered query -# For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html -filter: -- query: - query_string: - query: "detections.reason:KNOWN_ATTACK" - -index: bitsensor-detections-* -timestamp_field: endpoint.localtime - -# Key per profile -query_key: - - context.ip - - context.http.userAgent - -# When the attacker continues, send a new alert after x minutes -realert: - minutes: 10 - -# Index to search, wildcard supported -include: - - endpoint.location - - endpoint.name - - context.http.userAgent - - context.ip - - context.php.session.sessionId - - detections - -alert_subject: "Attack on <{}> of type {} | <{}|Show Dashboard>" -alert_subject_args: - - endpoint.name - - detections_parsed.type - - kibana_link - -alert_text: |- - An attack on {} is detected. - Detection name: {} - Detection type: {} - - The attacker looks like: - IP: {} - User-Agent: {} - -alert_text_args: - - endpoint.name - - detections_parsed.name - - detections_parsed.type - - context.ip - - context.http.userAgent - -# Specify your services here -alert: - - slack - -# How To Generate your API: -# Click on your Workspace name (upper left corner) -# Go to "Manage Apps", then "Custom Integrations", "Incoming Webhooks" -# Press "Add Configuration", and choose your channel. Now paste it here: -slack_webhook_url: "https://hooks.slack.com/services/" -slack_username_override: "BitSensor Alerting" - -# Alert body only cointains a title and text -alert_text_type: alert_text_only - -# Link to BitSensor Kibana Dashboard -use_kibana4_dashboard: "https://kibana.dashboard.io/app/kibana#/dashboard" - -# Enhancement for converting 'detections' array into object, ex. get merged detection type by -# 'detections_parsed.type' or get first detection type by 'detection_parsed.0.type' -match_enhancements: -- "elastalert_modules.bitsensor_enhancement.AlertTextEnhancement" -run_enhancements_first: true diff --git a/tools/elastalert/rule_templates/spike_template.yml b/tools/elastalert/rule_templates/spike_template.yml deleted file mode 100644 index 28cf901..0000000 --- a/tools/elastalert/rule_templates/spike_template.yml +++ /dev/null @@ -1,34 +0,0 @@ -# Rule name, must be unique -name: Spike in attacks on server - -# Type of alert. -type: spike - -# num_events must occur within this amount of time to trigger an alert -timeframe: - seconds: 60 -spike_height: 10 -spike_type: up - -# Index to search, wildcard supported -index: bitsensor -timestamp_field: endpoint.localtime - -query_key: - - endpoint.name - -alert_subject: "Surge in attacks on {}" -alert_subject_args: - - endpoint.name - -alert_text_type: alert_text_only -alert_text: "Surge in attacks on {}" -alert_text_args: - - endpoint.name - -# The alert is use when a match is found -alert: - - slack -slack_webhook_url: "https://hooks.slack.com/services/" -slack_username_override: "ElastAlert" - diff --git a/tools/elastalert/rule_templates/successful_attack_template.yaml b/tools/elastalert/rule_templates/successful_attack_template.yaml deleted file mode 100644 index 520be57..0000000 --- a/tools/elastalert/rule_templates/successful_attack_template.yaml +++ /dev/null @@ -1,50 +0,0 @@ -# Rule name, must be unique -name: Alert on Successful Attack - -# Type of alert. -type: any - -realert: - seconds: 0 - -# Index to search, wildcard supported -index: bitsensor -timestamp_field: endpoint.localtime - -# A list of elasticsearch filters used for find events -# These filters are joined with AND and nested in a filtered query -# For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html -filter: -- query: - query_string: - query: "detections.successful:true" - -include: - - endpoint.location - - endpoint.name - - context.http.userAgent - - context.ip - - context.php.session.sessionId - - detections.type - - detections.name - - meta.user - - errors - -alert_subject: "Successful attack on {}" -alert_subject_args: - - endpoint.name - -alert_text_type: alert_text_only -alert_text: "Detection triggered at {}\nIP: {} \nUser-Agent: {}\n\nID: {}\nUser: {}" -alert_text_args: - - endpoint.localtime - - context.ip - - context.http.userAgent - - _id - - meta.user - -# The alert is use when a match is found -alert: - - slack -slack_webhook_url: "https://hooks.slack.com/services/" -slack_username_override: "ElastAlert" diff --git a/tools/elastalert/rule_templates/threshold_template.yml b/tools/elastalert/rule_templates/threshold_template.yml deleted file mode 100644 index be8c841..0000000 --- a/tools/elastalert/rule_templates/threshold_template.yml +++ /dev/null @@ -1,59 +0,0 @@ -# Alert when there are 500 discovery detection events coming from the same ip, userAgent within 30 seconds. - -# Rule name, must be unique -name: Attack threshold exceeded - -# Type of alert. -type: percentage_match - -# Alert when this many documents matching the query occur within a timeframe -max_percentage: 10 - -# num_events must occur within this amount of time to trigger an alert -timeframe: - seconds: 60 - -# Index to search, wildcard supported -index: bitsensor -timestamp_field: endpoint.localtime - -query_key: - - context.ip - -# A list of elasticsearch filters used for find events -# These filters are joined with AND and nested in a filtered query -# For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html -match_bucket_filter: -- query: - query_string: - query: "_exists_:detections" - -include: - - endpoint.location - - endpoint.name - - context.http.userAgent - - context.ip - - context.php.session.sessionId - - detections.type - - detections.name - - meta.user - - errors - -alert_subject: "Attack threshold exceeded by {}" -alert_subject_args: - - context.ip - -alert_text_type: alert_text_only -alert_text: "Time: {}\nIP: {} \nUser-Agent: {}\n\nID: {}\nUser: {}" -alert_text_args: - - endpoint.localtime - - context.ip - - context.http.userAgent - - _id - - meta.user - -# The alert is use when a match is found -alert: - - slack -slack_webhook_url: "https://hooks.slack.com/services/" -slack_username_override: "ElastAlert" diff --git a/tools/elastalert/rule_templates/volumetric_alert_template.yaml b/tools/elastalert/rule_templates/volumetric_alert_template.yaml deleted file mode 100644 index fbbe55a..0000000 --- a/tools/elastalert/rule_templates/volumetric_alert_template.yaml +++ /dev/null @@ -1,70 +0,0 @@ -# Rule name, must be unique -name: Bad/Bot Behaviour - -# Alert on x events in y seconds -type: frequency - -# Alert when this many documents matching the query occur within a timeframe -num_events: 20 - -# num_events must occur within this amount of time to trigger an alert -timeframe: - seconds: 30 - -# A list of elasticsearch filters used for find events -# These filters are joined with AND and nested in a filtered query -# For more info: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html -filter: -- query: - query_string: - query: "detections.reason:BEHAVIOUR" - -# Index to search, wildcard supported -index: bitsensor-detections-* -timestamp_field: endpoint.localtime -doc_type: datapoint - -# When the attacker continues, send a new alert after x minutes -realert: - minutes: 1 - -query_key: - - context.ip - - context.http.userAgent - -include: - - endpoint.location - - endpoint.name - - context.http.userAgent - - context.ip - - context.php.session.sessionId - -alert_subject: "Bad/Bot behaviour on <{}> | <{}|Show Dashboard>" -alert_subject_args: - - endpoint.name - - kibana_link - -alert_text: |- - An attack on {} is detected. - - The attacker looks like: - IP: {} - Tool: {} - -alert_text_args: - - endpoint.name - - context.ip - - context.http.userAgent - -# The alert is use when a match is found -alert: - - slack - -slack_webhook_url: "https://hooks.slack.com/services/" -slack_username_override: "ElastAlert" - -# Alert body only cointains a title and text -alert_text_type: alert_text_only - -# Link to BitSensor Kibana Dashboard -use_kibana4_dashboard: "https://kibana.dashboard.io/app/kibana#/dashboard" diff --git a/tools/elastalert/rules/.gitkeep b/tools/elastalert/rules/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/tools/filebeat/Dockerfile b/tools/filebeat/Dockerfile deleted file mode 100644 index 3a3d542..0000000 --- a/tools/filebeat/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -ARG ELK_VERSION - -FROM docker.elastic.co/beats/filebeat:${ELK_VERSION} - -# Add healthcheck -HEALTHCHECK CMD filebeat test config && filebeat test output - -# EXPOSE metrics port -EXPOSE 5066 \ No newline at end of file