From 0cff5bf53b09eb15fb9eb6cac9527716be61b2c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?N=C3=A9stor=20Salceda?= Date: Thu, 31 May 2018 17:25:34 +0200 Subject: [PATCH 01/14] [stable/falco] Add Falco chart --- stable/falco/.helmignore | 21 + stable/falco/Chart.yaml | 18 + stable/falco/README.md | 90 + stable/falco/rules/falco_rules.local.yaml | 13 + stable/falco/rules/falco_rules.yaml | 1540 +++++++++++++++++ stable/falco/templates/NOTES.txt | 5 + stable/falco/templates/_helpers.tpl | 32 + stable/falco/templates/clusterrole.yaml | 32 + .../falco/templates/clusterrolebinding.yaml | 20 + stable/falco/templates/configmap.yaml | 126 ++ stable/falco/templates/daemonset.yaml | 87 + stable/falco/templates/deployment.yaml | 21 + stable/falco/templates/serviceaccount.yaml | 11 + stable/falco/values.yaml | 144 ++ 14 files changed, 2160 insertions(+) create mode 100644 stable/falco/.helmignore create mode 100644 stable/falco/Chart.yaml create mode 100644 stable/falco/README.md create mode 100644 stable/falco/rules/falco_rules.local.yaml create mode 100644 stable/falco/rules/falco_rules.yaml create mode 100644 stable/falco/templates/NOTES.txt create mode 100644 stable/falco/templates/_helpers.tpl create mode 100644 stable/falco/templates/clusterrole.yaml create mode 100644 stable/falco/templates/clusterrolebinding.yaml create mode 100644 stable/falco/templates/configmap.yaml create mode 100644 stable/falco/templates/daemonset.yaml create mode 100644 stable/falco/templates/deployment.yaml create mode 100644 stable/falco/templates/serviceaccount.yaml create mode 100644 stable/falco/values.yaml diff --git a/stable/falco/.helmignore b/stable/falco/.helmignore new file mode 100644 index 000000000000..f0c131944441 --- /dev/null +++ b/stable/falco/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/stable/falco/Chart.yaml b/stable/falco/Chart.yaml new file mode 100644 index 000000000000..1e61d257bbfe --- /dev/null +++ b/stable/falco/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +name: falco +version: 0.1 +description: Sysdig Falco +keywords: + - monitoring + - security + - alerting + - metric + - troubleshooting + - run-time +home: https://www.sysdig.com/opensource/falco/ +icon: https://sysdig.com/wp-content/uploads/2016/08/falco_blog_480.jpg +sources: + - https://github.com/draios/falco +maintainers: + - name: Néstor Salceda + email: nestor.salceda@sysdig.com diff --git a/stable/falco/README.md b/stable/falco/README.md new file mode 100644 index 000000000000..0940755ba36d --- /dev/null +++ b/stable/falco/README.md @@ -0,0 +1,90 @@ +# Sysdig Falco + +[Sysdig Falco](https://www.sysdig.com/opensource/falco/) is a behavioral activity monitor designed to detect anomalous activity in your applications. You can use Falco to monitor run-time security of your Kubernetes applications and internal components. + +To know more about Sysdig Falco have a look at: + +- [Kubernetes security logging with Falco & Fluentd +](https://sysdig.com/blog/kubernetes-security-logging-fluentd-falco/) +- [Active Kubernetes security with Sysdig Falco, NATS, and kubeless](https://sysdig.com/blog/active-kubernetes-security-falco-nats-kubeless/) +- [Detecting cryptojacking with Sysdig’s Falco +](https://sysdig.com/blog/detecting-cryptojacking-with-sysdigs-falco/) + +## Introduction + +This chart adds Falco to all nodes in your cluster using a DaemonSet. + +Also provides a Deployment for generating Falco alerts. This is useful for testing purposes. + +## Installing the Chart + +To install the chart with the release name `my-release` run: + +```bash +$ helm install --name my-release stable/falco +``` + +After a few seconds, Falco should be running. + +> **Tip**: List all releases using `helm list`, a release is a name used to track an specific deployment + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` +> **Tip**: Use helm delete --purge my-release to completely remove the release from Helm internal storage + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the Falco chart and their default values. + +| Parameter | Description | Default | +| --- | --- | --- | +| `image.repository` | The image repository to pull from | `sysdig/falco` | +| `image.tag` | The image tag to pull | `latest` | +| `image.pullPolicy` | The image pull policy | `Always` | +| `rbac.create` | If true, create & use RBAC resources | `true` | +| `rbac.serviceAccountName` | If rbac.create is false, use this value as serviceAccountName | `default` | +| `deployment.enabled` | Run falco-event-generator for sample events | `false` | +| `deployment.replicas` | How many replicas of falco-event-generator to run | `1` | +| `falco.rulesFile` | The location of the rules files | `[/etc/falco/falco_rules.yaml, /etc/falco/falco_rules.local.yaml, /etc/falco/rules.d]` | +| `falco.jsonOutput` | Output events in json or text | `false` | +| `falco.jsonIncludeOutputProperty` | Include output property in json output | `true` | +| `falco.logStderr` | Send Falco debugging information logs to stderr | `true` | +| `falco.logSyslog` | Send Falco debugging information logs to syslog | `true` | +| `falco.logLevel` | The minimum level of Falco debugging information to include in logs | `info` | +| `falco.priority` | The minimum rule priority level to load an run | `debug` | +| `falco.bufferedOutputs` | Use buffered outputs to channels | `false` | +| `falco.outputs.rate` | Number of tokens gained per second | `1` | +| `falco.outputs.maxBurst` | Maximum number of tokens outstanding | `1000` | +| `falco.syslogOutput.enabled` | Enable syslog output for security notifications | `true` | +| `falco.fileOutput.enabled` | Enable file output for security notifications | `false` | +| `falco.fileOutput.keepAlive` | Open file once or every time a new notification arrives | `false` | +| `falco.fileOutput.filename` | The filename for logging notifications | `./events.txt` | +| `falco.stdoutOutput.enabled` | Enable stdout output for security notifications | `true` | +| `falco.programOutput.enabled` | Enable program output for security notifications | `false` | +| `falco.programOutput.keepAlive` | Start the program once or re-spawn when a notification arrives | `false` | +| `falco.programOutput.program` | Command to execute for program output | `mail -s "Falco Notification" someone@example.com` | +| `falco.gcsccIntegration.enabled` | Enable Google Cloud Security Command Center integration | `false` | +| `falco.gcsccIntegration.webhookUrl` | The URL where sysdig-gcscc-connector webhook is listening | `http://sysdig-gcscc-connector.default.svc.cluster.local:8080/events` | +| `falco.gcsccIntegration.webhookAuthenticationToken` | Token used for authentication and webhook | `b27511f86e911f20b9e0f9c8104b4ec4` | +| `tolerations` | The tolerations for scheduling | `node-role.kubernetes.io/master:NoSchedule` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install --name my-release --set falco.jsonOutput=true stable/falco +``` + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/falco +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/stable/falco/rules/falco_rules.local.yaml b/stable/falco/rules/falco_rules.local.yaml new file mode 100644 index 000000000000..3c8e3bb5aa85 --- /dev/null +++ b/stable/falco/rules/falco_rules.local.yaml @@ -0,0 +1,13 @@ +#################### +# Your custom rules! +#################### + +# Add new rules, like this one +# - rule: The program "sudo" is run in a container +# desc: An event will trigger every time you run sudo in a container +# condition: evt.type = execve and evt.dir=< and container.id != host and proc.name = sudo +# output: "Sudo run in container (user=%user.name %container.info parent=%proc.pname cmdline=%proc.cmdline)" +# priority: ERROR +# tags: [users, container] + +# Or override/append to any rule, macro, or list from the Default Rules diff --git a/stable/falco/rules/falco_rules.yaml b/stable/falco/rules/falco_rules.yaml new file mode 100644 index 000000000000..58f4ea43d236 --- /dev/null +++ b/stable/falco/rules/falco_rules.yaml @@ -0,0 +1,1540 @@ +# Currently disabled as read/write are ignored syscalls. The nearly +# similar open_write/open_read check for files being opened for +# reading/writing. +# - macro: write +# condition: (syscall.type=write and fd.type in (file, directory)) +# - macro: read +# condition: (syscall.type=read and evt.dir=> and fd.type in (file, directory)) + +- macro: open_write + condition: (evt.type=open or evt.type=openat) and evt.is_open_write=true and fd.typechar='f' and fd.num>=0 + +- macro: open_read + condition: (evt.type=open or evt.type=openat) and evt.is_open_read=true and fd.typechar='f' and fd.num>=0 + +- macro: never_true + condition: (evt.num=0) + +- macro: always_true + condition: (evt.num=>0) + +# In some cases, such as dropped system call events, information about +# the process name may be missing. For some rules that really depend +# on the identity of the process performing an action such as opening +# a file, etc., we require that the process name be known. +- macro: proc_name_exists + condition: (proc.name!="") + +- macro: rename + condition: evt.type in (rename, renameat) +- macro: mkdir + condition: evt.type = mkdir +- macro: remove + condition: evt.type in (rmdir, unlink, unlinkat) + +- macro: modify + condition: rename or remove + +- macro: spawned_process + condition: evt.type = execve and evt.dir=< + +# File categories +- macro: bin_dir + condition: fd.directory in (/bin, /sbin, /usr/bin, /usr/sbin) + +- macro: bin_dir_mkdir + condition: > + (evt.arg[1] startswith /bin/ or + evt.arg[1] startswith /sbin/ or + evt.arg[1] startswith /usr/bin/ or + evt.arg[1] startswith /usr/sbin/) + +- macro: bin_dir_rename + condition: > + evt.arg[1] startswith /bin/ or + evt.arg[1] startswith /sbin/ or + evt.arg[1] startswith /usr/bin/ or + evt.arg[1] startswith /usr/sbin/ + +- macro: etc_dir + condition: fd.name startswith /etc/ + +# This detects writes immediately below / or any write anywhere below /root +- macro: root_dir + condition: ((fd.directory=/ or fd.name startswith /root) and fd.name contains "/") + +- list: shell_binaries + items: [bash, csh, ksh, sh, tcsh, zsh, dash] + +- list: shell_mgmt_binaries + items: [add-shell, remove-shell] + +- macro: shell_procs + condition: (proc.name in (shell_binaries)) + +- list: coreutils_binaries + items: [ + truncate, sha1sum, numfmt, fmt, fold, uniq, cut, who, + groups, csplit, sort, expand, printf, printenv, unlink, tee, chcon, stat, + basename, split, nice, "yes", whoami, sha224sum, hostid, users, stdbuf, + base64, unexpand, cksum, od, paste, nproc, pathchk, sha256sum, wc, test, + comm, arch, du, factor, sha512sum, md5sum, tr, runcon, env, dirname, + tsort, join, shuf, install, logname, pinky, nohup, expr, pr, tty, timeout, + tail, "[", seq, sha384sum, nl, head, id, mkfifo, sum, dircolors, ptx, shred, + tac, link, chroot, vdir, chown, touch, ls, dd, uname, "true", pwd, date, + chgrp, chmod, mktemp, cat, mknod, sync, ln, "false", rm, mv, cp, echo, + readlink, sleep, stty, mkdir, df, dir, rmdir, touch + ] + +# dpkg -L login | grep bin | xargs ls -ld | grep -v '^d' | awk '{print $9}' | xargs -L 1 basename | tr "\\n" "," +- list: login_binaries + items: [ + login, systemd, '"(systemd)"', systemd-logind, su, + nologin, faillog, lastlog, newgrp, sg + ] + +# dpkg -L passwd | grep bin | xargs ls -ld | grep -v '^d' | awk '{print $9}' | xargs -L 1 basename | tr "\\n" "," +- list: passwd_binaries + items: [ + shadowconfig, grpck, pwunconv, grpconv, pwck, + groupmod, vipw, pwconv, useradd, newusers, cppw, chpasswd, usermod, + groupadd, groupdel, grpunconv, chgpasswd, userdel, chage, chsh, + gpasswd, chfn, expiry, passwd, vigr, cpgr + ] + +# repoquery -l shadow-utils | grep bin | xargs ls -ld | grep -v '^d' | +# awk '{print $9}' | xargs -L 1 basename | tr "\\n" "," +- list: shadowutils_binaries + items: [ + chage, gpasswd, lastlog, newgrp, sg, adduser, deluser, chpasswd, + groupadd, groupdel, addgroup, delgroup, groupmems, groupmod, grpck, grpconv, grpunconv, + newusers, pwck, pwconv, pwunconv, useradd, userdel, usermod, vigr, vipw, unix_chkpwd + ] + +- list: sysdigcloud_binaries + items: [setup-backend, dragent, sdchecks] + +- list: docker_binaries + items: [docker, dockerd, exe, docker-compose, docker-entrypoi, docker-runc-cur, docker-current] + +- list: k8s_binaries + items: [hyperkube, skydns, kube2sky, exechealthz] + +- list: lxd_binaries + items: [lxd, lxcfs] + +- list: http_server_binaries + items: [nginx, httpd, httpd-foregroun, lighttpd, apache, apache2] + +- list: db_server_binaries + items: [mysqld, postgres, sqlplus] + +- list: mysql_mgmt_binaries + items: [mysql_install_d, mysql_ssl_rsa_s] + +- list: postgres_mgmt_binaries + items: [pg_dumpall, pg_ctl, pg_lsclusters, pg_ctlcluster] + +- list: db_mgmt_binaries + items: [mysql_mgmt_binaries, postgres_mgmt_binaries] + +- list: nosql_server_binaries + items: [couchdb, memcached, redis-server, rabbitmq-server, mongod] + +- list: gitlab_binaries + items: [gitlab-shell, gitlab-mon, gitlab-runner-b, git] + +- macro: server_procs + condition: proc.name in (http_server_binaries, db_server_binaries, docker_binaries, sshd) + +# The explicit quotes are needed to avoid the - characters being +# interpreted by the filter expression. +- list: rpm_binaries + items: [dnf, rpm, rpmkey, yum, '"75-system-updat"', rhsmcertd-worke, subscription-ma, + repoquery, rpmkeys, rpmq, yum-cron, yum-config-mana, yum-debug-dump, + abrt-action-sav, rpmdb_stat] + +- macro: rpm_procs + condition: proc.name in (rpm_binaries) or proc.name in (salt-minion) + +- list: deb_binaries + items: [dpkg, dpkg-preconfigu, dpkg-reconfigur, apt, apt-get, aptitude, + frontend, preinst, add-apt-reposit, apt-auto-remova, apt-key, + apt-listchanges, unattended-upgr, apt-add-reposit + ] + +# The truncated dpkg-preconfigu is intentional, process names are +# truncated at the sysdig level. +- list: package_mgmt_binaries + items: [rpm_binaries, deb_binaries, update-alternat, gem, pip, sane-utils.post] + +- macro: package_mgmt_procs + condition: proc.name in (package_mgmt_binaries) + +- macro: run_by_package_mgmt_binaries + condition: proc.aname in (package_mgmt_binaries, needrestart) + +- list: ssl_mgmt_binaries + items: [ca-certificates] + +- list: dhcp_binaries + items: [dhclient, dhclient-script] + +# A canonical set of processes that run other programs with different +# privileges or as a different user. +- list: userexec_binaries + items: [sudo, su, suexec] + +- list: known_setuid_binaries + items: [ + sshd, dbus-daemon-lau, ping, ping6, critical-stack-, pmmcli, + filemng, PassengerAgent, bwrap, osdetect, nginxmng, sw-engine-fpm, + start-stop-daem + ] + +- list: user_mgmt_binaries + items: [login_binaries, passwd_binaries, shadowutils_binaries] + +- list: dev_creation_binaries + items: [blkid, rename_device, update_engine, sgdisk] + +- list: hids_binaries + items: [aide] + +- list: vpn_binaries + items: [openvpn] + +- list: nomachine_binaries + items: [nxexec, nxnode.bin, nxserver.bin, nxclient.bin] + +- macro: system_procs + condition: proc.name in (coreutils_binaries, user_mgmt_binaries) + +- list: mail_binaries + items: [ + sendmail, sendmail-msp, postfix, procmail, exim4, + pickup, showq, mailq, dovecot, imap-login, imap, + mailmng-core, pop3-login, dovecot-lda, pop3 + ] + +- list: mail_config_binaries + items: [ + update_conf, parse_mc, makemap_hash, newaliases, update_mk, update_tlsm4, + update_db, update_mc, ssmtp.postinst, mailq, postalias, postfix.config., + postfix.config, postfix-script + ] + +- list: sensitive_file_names + items: [/etc/shadow, /etc/sudoers, /etc/pam.conf] + +- macro: sensitive_files + condition: > + fd.name startswith /etc and + (fd.name in (sensitive_file_names) + or fd.directory in (/etc/sudoers.d, /etc/pam.d)) + +# Indicates that the process is new. Currently detected using time +# since process was started, using a threshold of 5 seconds. +- macro: proc_is_new + condition: proc.duration <= 5000000000 + +# Network +- macro: inbound + condition: > + (((evt.type in (accept,listen) and evt.dir=<)) or + (fd.typechar = 4 or fd.typechar = 6) and + (fd.ip != "0.0.0.0" and fd.net != "127.0.0.0/8") and + (evt.rawres >= 0 or evt.res = EINPROGRESS)) + +- macro: outbound + condition: > + (((evt.type = connect and evt.dir=<)) or + (fd.typechar = 4 or fd.typechar = 6) and + (fd.ip != "0.0.0.0" and fd.net != "127.0.0.0/8") and + (evt.rawres >= 0 or evt.res = EINPROGRESS)) + +# Very similar to inbound/outbound, but combines the tests together +# for efficiency. +- macro: inbound_outbound + condition: > + (((evt.type in (accept,listen,connect) and evt.dir=<)) or + (fd.typechar = 4 or fd.typechar = 6) and + (fd.ip != "0.0.0.0" and fd.net != "127.0.0.0/8") and + (evt.rawres >= 0 or evt.res = EINPROGRESS)) + +- macro: ssh_port + condition: fd.sport=22 + +# In a local/user rules file, you could override this macro to +# enumerate the servers for which ssh connections are allowed. For +# example, you might have a ssh gateway host for which ssh connections +# are allowed. +# +# In the main falco rules file, there isn't any way to know the +# specific hosts for which ssh access is allowed, so this macro just +# repeats ssh_port, which effectively allows ssh from all hosts. In +# the overridden macro, the condition would look something like +# "fd.sip="a.b.c.d" or fd.sip="e.f.g.h" or ..." +- macro: allowed_ssh_hosts + condition: ssh_port + +- rule: Disallowed SSH Connection + desc: Detect any new ssh connection to a host other than those in an allowed group of hosts + condition: (inbound_outbound) and ssh_port and not allowed_ssh_hosts + output: Disallowed SSH Connection (command=%proc.cmdline connection=%fd.name user=%user.name) + priority: NOTICE + tags: [network] + +# Use this to test whether the event occurred within a container. + +# When displaying container information in the output field, use +# %container.info, without any leading term (file=%fd.name +# %container.info user=%user.name, and not file=%fd.name +# container=%container.info user=%user.name). The output will change +# based on the context and whether or not -pk/-pm/-pc was specified on +# the command line. +- macro: container + condition: container.id != host + +- macro: interactive + condition: > + ((proc.aname=sshd and proc.name != sshd) or + proc.name=systemd-logind or proc.name=login) + +- list: cron_binaries + items: [anacron, cron, crond, crontab] + +# https://github.com/liske/needrestart +- list: needrestart_binaries + items: [needrestart, 10-dpkg, 20-rpm, 30-pacman] + +# Possible scripts run by sshkit +- list: sshkit_script_binaries + items: [10_etc_sudoers., 10_passwd_group] + +- list: plesk_binaries + items: [sw-engine, sw-engine-fpm, sw-engine-kv, filemng, f2bmng] + +# System users that should never log into a system. Consider adding your own +# service users (e.g. 'apache' or 'mysqld') here. +- macro: system_users + condition: user.name in (bin, daemon, games, lp, mail, nobody, sshd, sync, uucp, www-data) + +# These macros will be removed soon. Only keeping them to maintain +# compatiblity with some widely used rules files. +# Begin Deprecated +- macro: parent_ansible_running_python + condition: (proc.pname in (python, pypy) and proc.pcmdline contains ansible) + +- macro: parent_bro_running_python + condition: (proc.pname=python and proc.cmdline contains /usr/share/broctl) + +- macro: parent_python_running_denyhosts + condition: > + (proc.cmdline startswith "denyhosts.py /usr/bin/denyhosts.py" or + (proc.pname=python and + (proc.pcmdline contains /usr/sbin/denyhosts or + proc.pcmdline contains /usr/local/bin/denyhosts.py))) + +- macro: parent_python_running_sdchecks + condition: > + (proc.pname in (python, python2.7) and + (proc.pcmdline contains /opt/draios/bin/sdchecks)) + +- macro: parent_linux_image_upgrade_script + condition: proc.pname startswith linux-image- + +- macro: parent_java_running_echo + condition: (proc.pname=java and proc.cmdline startswith "sh -c echo") + +- macro: parent_scripting_running_builds + condition: > + (proc.pname in (php,php5-fpm,php-fpm7.1,python,ruby,ruby2.3,ruby2.1,node,conda) and ( + proc.cmdline startswith "sh -c git" or + proc.cmdline startswith "sh -c date" or + proc.cmdline startswith "sh -c /usr/bin/g++" or + proc.cmdline startswith "sh -c /usr/bin/gcc" or + proc.cmdline startswith "sh -c gcc" or + proc.cmdline startswith "sh -c if type gcc" or + proc.cmdline startswith "sh -c cd '/var/www/edi/';LC_ALL=en_US.UTF-8 git" or + proc.cmdline startswith "sh -c /var/www/edi/bin/sftp.sh" or + proc.cmdline startswith "sh -c /usr/src/app/crxlsx/bin/linux/crxlsx" or + proc.cmdline startswith "sh -c make parent" or + proc.cmdline startswith "node /jenkins/tools" or + proc.cmdline startswith "sh -c '/usr/bin/node'" or + proc.cmdline startswith "sh -c stty -a |" or + proc.pcmdline startswith "node /opt/nodejs/bin/yarn" or + proc.pcmdline startswith "node /usr/local/bin/yarn" or + proc.pcmdline startswith "node /root/.config/yarn" or + proc.pcmdline startswith "node /opt/yarn/bin/yarn.js")) + + +- macro: httpd_writing_ssl_conf + condition: > + (proc.pname=run-httpd and + (proc.cmdline startswith "sed -ri" or proc.cmdline startswith "sed -i") and + (fd.name startswith /etc/httpd/conf.d/ or fd.name startswith /etc/httpd/conf)) + +- macro: parent_Xvfb_running_xkbcomp + condition: (proc.pname=Xvfb and proc.cmdline startswith 'sh -c "/usr/bin/xkbcomp"') + +- macro: parent_nginx_running_serf + condition: (proc.pname=nginx and proc.cmdline startswith "sh -c serf") + +- macro: parent_node_running_npm + condition: (proc.pcmdline startswith "node /usr/local/bin/npm" or + proc.pcmdline startswith "node /usr/local/nodejs/bin/npm" or + proc.pcmdline startswith "node /opt/rh/rh-nodejs6/root/usr/bin/npm") + +- macro: parent_java_running_sbt + condition: (proc.pname=java and proc.pcmdline contains sbt-launch.jar) + +- list: known_container_shell_spawn_cmdlines + items: [] + +- list: known_shell_spawn_binaries + items: [] + +- macro: shell_spawning_containers + condition: (container.image startswith jenkins or + container.image startswith gitlab/gitlab-ce or + container.image startswith gitlab/gitlab-ee) + +## End Deprecated + +- macro: ansible_running_python + condition: (proc.name in (python, pypy) and proc.cmdline contains ansible) + +- macro: chef_running_yum_dump + condition: (proc.name=python and proc.cmdline contains yum-dump.py) + +- macro: python_running_denyhosts + condition: > + (proc.name=python and + (proc.cmdline contains /usr/sbin/denyhosts or + proc.cmdline contains /usr/local/bin/denyhosts.py)) + +# Qualys seems to run a variety of shell subprocesses, at various +# levels. This checks at a few levels without the cost of a full +# proc.aname, which traverses the full parent heirarchy. +- macro: run_by_qualys + condition: > + (proc.pname=qualys-cloud-ag or + proc.aname[2]=qualys-cloud-ag or + proc.aname[3]=qualys-cloud-ag or + proc.aname[4]=qualys-cloud-ag) + +- macro: run_by_sumologic_securefiles + condition: > + ((proc.cmdline="usermod -a -G sumologic_collector" or + proc.cmdline="groupadd sumologic_collector") and + (proc.pname=secureFiles.sh and proc.aname[2]=java)) + +- macro: run_by_yum + condition: ((proc.pname=sh and proc.aname[2]=yum) or + (proc.aname[2]=sh and proc.aname[3]=yum)) + +- macro: run_by_ms_oms + condition: > + (proc.aname[3] startswith omsagent- or + proc.aname[3] startswith scx-) + +- macro: run_by_google_accounts_daemon + condition: > + (proc.aname[1] startswith google_accounts or + proc.aname[2] startswith google_accounts) + +# Chef is similar. +- macro: run_by_chef + condition: (proc.aname[2]=chef_command_wr or proc.aname[3]=chef_command_wr or + proc.aname[2]=chef-client or proc.aname[3]=chef-client or + proc.name=chef-client) + +- macro: run_by_adclient + condition: (proc.aname[2]=adclient or proc.aname[3]=adclient or proc.aname[4]=adclient) + +- macro: run_by_centrify + condition: (proc.aname[2]=centrify or proc.aname[3]=centrify or proc.aname[4]=centrify) + +- macro: run_by_puppet + condition: (proc.aname[2]=puppet or proc.aname[3]=puppet) + +# Also handles running semi-indirectly via scl +- macro: run_by_foreman + condition: > + (user.name=foreman and + (proc.pname in (rake, ruby, scl) and proc.aname[5] in (tfm-rake,tfm-ruby)) or + (proc.pname=scl and proc.aname[2] in (tfm-rake,tfm-ruby))) + +- macro: java_running_sdjagent + condition: proc.name=java and proc.cmdline contains sdjagent.jar + +- macro: kubelet_running_loopback + condition: (proc.pname=kubelet and proc.name=loopback) + +- macro: python_mesos_marathon_scripting + condition: (proc.pcmdline startswith "python3 /marathon-lb/marathon_lb.py") + +- macro: splunk_running_forwarder + condition: (proc.pname=splunkd and proc.cmdline startswith "sh -c /opt/splunkforwarder") + +- macro: parent_supervise_running_multilog + condition: (proc.name=multilog and proc.pname=supervise) + +- macro: supervise_writing_status + condition: (proc.name in (supervise,svc) and fd.name startswith "/etc/sb/") + +- macro: pki_realm_writing_realms + condition: (proc.cmdline startswith "bash /usr/local/lib/pki/pki-realm" and fd.name startswith /etc/pki/realms) + +- macro: htpasswd_writing_passwd + condition: (proc.name=htpasswd and fd.name=/etc/nginx/.htpasswd) + +- macro: lvprogs_writing_lvm_archive + condition: (proc.name in (dmeventd,lvcreate) and (fd.name startswith /etc/lvm/archive or + fd.name startswith /etc/lvm/backup)) +- macro: ovsdb_writing_openvswitch + condition: (proc.name=ovsdb-server and fd.directory=/etc/openvswitch) + +- macro: perl_running_plesk + condition: (proc.cmdline startswith "perl /opt/psa/admin/bin/plesk_agent_manager" or + proc.pcmdline startswith "perl /opt/psa/admin/bin/plesk_agent_manager") + +- macro: perl_running_updmap + condition: (proc.cmdline startswith "perl /usr/bin/updmap") + +- macro: perl_running_centrifydc + condition: (proc.cmdline startswith "perl /usr/share/centrifydc") + +- macro: parent_ucf_writing_conf + condition: (proc.pname=ucf and proc.aname[2]=frontend) + +- macro: consul_template_writing_conf + condition: > + ((proc.name=consul-template and fd.name startswith /etc/haproxy) or + (proc.name=reload.sh and proc.aname[2]=consul-template and fd.name startswith /etc/ssl)) + +- macro: countly_writing_nginx_conf + condition: (proc.cmdline startswith "nodejs /opt/countly/bin" and fd.name startswith /etc/nginx) + +- macro: ms_oms_writing_conf + condition: > + ((proc.name in (omiagent,omsagent,in_heartbeat_r*,omsadmin.sh,PerformInventor) + or proc.pname in (omi.postinst,omsconfig.posti,scx.postinst,omsadmin.sh,omiagent)) + and (fd.name startswith /etc/opt/omi or fd.name startswith /etc/opt/microsoft/omsagent)) + +- macro: ms_scx_writing_conf + condition: (proc.name in (GetLinuxOS.sh) and fd.name startswith /etc/opt/microsoft/scx) + +- macro: azure_scripts_writing_conf + condition: (proc.pname startswith "bash /var/lib/waagent/" and fd.name startswith /etc/azure) + +- macro: azure_networkwatcher_writing_conf + condition: (proc.name in (NetworkWatcherA) and fd.name=/etc/init.d/AzureNetworkWatcherAgent) + +- macro: couchdb_writing_conf + condition: (proc.name=beam.smp and proc.cmdline contains couchdb and fd.name startswith /etc/couchdb) + +- macro: update_texmf_writing_conf + condition: (proc.name=update-texmf and fd.name startswith /etc/texmf) + +- macro: slapadd_writing_conf + condition: (proc.name=slapadd and fd.name startswith /etc/ldap) + +- macro: symantec_writing_conf + condition: > + ((proc.name=symcfgd and fd.name startswith /etc/symantec) or + (proc.name=navdefutil and fd.name=/etc/symc-defutils.conf)) + +- macro: liveupdate_writing_conf + condition: (proc.cmdline startswith "java LiveUpdate" and fd.name in (/etc/liveupdate.conf, /etc/Product.Catalog.JavaLiveUpdate)) + +- macro: sosreport_writing_files + condition: > + (proc.name=urlgrabber-ext- and proc.aname[3]=sosreport and + (fd.name startswith /etc/pkt/nssdb or fd.name startswith /etc/pki/nssdb)) + +- macro: selinux_writing_conf + condition: (proc.name in (semodule,genhomedircon,sefcontext_comp) and fd.name startswith /etc/selinux) + +- list: veritas_binaries + items: [vxconfigd, sfcache, vxclustadm, vxdctl, vxprint, vxdmpadm, vxdisk, vxdg, vxassist, vxtune] + +- macro: veritas_driver_script + condition: (proc.cmdline startswith "perl /opt/VRTSsfmh/bin/mh_driver.pl") + +- macro: veritas_progs + condition: (proc.name in (veritas_binaries) or veritas_driver_script) + +- macro: veritas_writing_config + condition: (veritas_progs and fd.name startswith /etc/vx) + +- macro: nginx_writing_conf + condition: (proc.name=nginx and fd.name startswith /etc/nginx) + +- macro: nginx_writing_certs + condition: > + (((proc.name=openssl and proc.pname=nginx-launch.sh) or proc.name=nginx-launch.sh) and fd.name startswith /etc/nginx/certs) + +- macro: chef_client_writing_conf + condition: (proc.pcmdline startswith "chef-client /opt/gitlab" and fd.name startswith /etc/gitlab) + +- macro: centrify_writing_krb + condition: (proc.name in (adjoin,addns) and fd.name startswith /etc/krb5) + +- macro: cockpit_writing_conf + condition: > + ((proc.pname=cockpit-kube-la or proc.aname[2]=cockpit-kube-la) + and fd.name startswith /etc/cockpit) + +- macro: ipsec_writing_conf + condition: (proc.name=start-ipsec.sh and fd.directory=/etc/ipsec) + +- macro: exe_running_docker_save + condition: (proc.cmdline startswith "exe /var/lib/docker" and proc.pname in (dockerd, docker)) + +- macro: python_running_get_pip + condition: (proc.cmdline startswith "python get-pip.py") + +- macro: python_running_ms_oms + condition: (proc.cmdline startswith "python /var/lib/waagent/") + +- macro: gugent_writing_guestagent_log + condition: (proc.name=gugent and fd.name=GuestAgent.log) + +- rule: Write below binary dir + desc: an attempt to write to any file below a set of binary directories + condition: > + bin_dir and evt.dir = < and open_write + and not package_mgmt_procs + and not exe_running_docker_save + and not python_running_get_pip + and not python_running_ms_oms + output: > + File below a known binary directory opened for writing (user=%user.name + command=%proc.cmdline file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline gparent=%proc.aname[2]) + priority: ERROR + tags: [filesystem] + +- list: safe_etc_dirs + items: [/etc/cassandra, /etc/ssl/certs/java, /etc/logstash, /etc/nginx/conf.d, /etc/container_environment, /etc/hrmconfig] + +- macro: fluentd_writing_conf_files + condition: (proc.name=start-fluentd and fd.name in (/etc/fluent/fluent.conf, /etc/td-agent/td-agent.conf)) + +- macro: qualys_writing_conf_files + condition: (proc.name=qualys-cloud-ag and fd.name=/etc/qualys/cloud-agent/qagent-log.conf) + +- macro: git_writing_nssdb + condition: (proc.name=git-remote-http and fd.directory=/etc/pki/nssdb) + +- macro: plesk_writing_keys + condition: (proc.name in (plesk_binaries) and fd.name startswith /etc/sw/keys) + +- macro: plesk_install_writing_apache_conf + condition: (proc.cmdline startswith "bash -hB /usr/lib/plesk-9.0/services/webserver.apache configure" + and fd.name="/etc/apache2/apache2.conf.tmp") + +- macro: plesk_running_mktemp + condition: (proc.name=mktemp and proc.aname[3] in (plesk_binaries)) + +- macro: networkmanager_writing_resolv_conf + condition: proc.aname[2]=nm-dispatcher and fd.name=/etc/resolv.conf + +- macro: add_shell_writing_shells_tmp + condition: (proc.name=add-shell and fd.name=/etc/shells.tmp) + +- macro: duply_writing_exclude_files + condition: (proc.name=touch and proc.pcmdline startswith "bash /usr/bin/duply" and fd.name startswith "/etc/duply") + +- macro: xmlcatalog_writing_files + condition: (proc.name=update-xmlcatal and fd.directory=/etc/xml) + +- macro: datadog_writing_conf + condition: ((proc.cmdline startswith "python /opt/datadog-agent" or + proc.cmdline startswith "entrypoint.sh /entrypoint.sh datadog start" or + proc.cmdline startswith "agent.py /opt/datadog-agent") + and fd.name startswith "/etc/dd-agent") + +- macro: curl_writing_pki_db + condition: (proc.name=curl and fd.directory=/etc/pki/nssdb) + +- macro: haproxy_writing_conf + condition: ((proc.name in (update-haproxy-,haproxy_reload.) or proc.pname in (update-haproxy-,haproxy_reload,haproxy_reload.)) + and (fd.name=/etc/openvpn/client.map or fd.name startswith /etc/haproxy)) + +- macro: java_writing_conf + condition: (proc.name=java and fd.name=/etc/.java/.systemPrefs/.system.lock) + +- macro: rabbitmq_writing_conf + condition: (proc.name=rabbitmq-server and fd.directory=/etc/rabbitmq) + +- macro: rook_writing_conf + condition: (proc.name=toolbox.sh and container.image startswith rook/toolbox + and fd.directory=/etc/ceph) + +- macro: httpd_writing_conf_logs + condition: (proc.name=httpd and fd.name startswith /etc/httpd/) + +- macro: mysql_writing_conf + condition: ((proc.name=start-mysql.sh or proc.pname=start-mysql.sh) and fd.name startswith /etc/mysql) + +- macro: openvpn_writing_conf + condition: (proc.name in (openvpn,openvpn-entrypo) and fd.name startswith /etc/openvpn) + +- macro: php_handlers_writing_conf + condition: (proc.name=php_handlers_co and fd.name=/etc/psa/php_versions.json) + +- macro: sed_writing_temp_file + condition: > + ((proc.aname[3]=cron_start.sh and fd.name startswith /etc/security/sed) or + (proc.name=sed and (fd.name startswith /etc/apt/sources.list.d/sed or + fd.name startswith /etc/apt/sed or + fd.name startswith /etc/apt/apt.conf.d/sed))) + +- macro: cron_start_writing_pam_env + condition: (proc.cmdline="bash /usr/sbin/start-cron" and fd.name=/etc/security/pam_env.conf) + +# In some cases dpkg-reconfigur runs commands that modify /etc. Not +# putting the full set of package management programs yet. +- macro: dpkg_scripting + condition: (proc.aname[2] in (dpkg-reconfigur, dpkg-preconfigu)) + +# Add conditions to this macro (probably in a separate file, +# overwriting this macro) to allow for specific combinations of +# programs writing below specific directories below +# /etc. fluentd_writing_conf_files is a good example to follow, as it +# specifies both the program doing the writing as well as the specific +# files it is allowed to modify. +# +# In this file, it just takes one of the programs in the base macro +# and repeats it. + +- macro: user_known_write_etc_conditions + condition: proc.name=confd + +- macro: write_etc_common + condition: > + etc_dir and evt.dir = < and open_write + and proc_name_exists + and not proc.name in (passwd_binaries, shadowutils_binaries, sysdigcloud_binaries, + package_mgmt_binaries, ssl_mgmt_binaries, dhcp_binaries, + dev_creation_binaries, shell_mgmt_binaries, + mail_config_binaries, + sshkit_script_binaries, + ldconfig.real, ldconfig, confd, gpg, insserv, + apparmor_parser, update-mime, tzdata.config, tzdata.postinst, + systemd, systemd-machine, systemd-sysuser, + debconf-show, rollerd, bind9.postinst, sv, + gen_resolvconf., update-ca-certi, certbot, runsv, + qualys-cloud-ag, locales.postins, nomachine_binaries, + adclient, certutil, crlutil, pam-auth-update, parallels_insta, + openshift-launc, update-rc.d) + and not proc.pname in (sysdigcloud_binaries, mail_config_binaries, hddtemp.postins, sshkit_script_binaries, locales.postins, deb_binaries, dhcp_binaries) + and not fd.name pmatch (safe_etc_dirs) + and not fd.name in (/etc/container_environment.sh, /etc/container_environment.json, /etc/motd, /etc/motd.svc) + and not exe_running_docker_save + and not ansible_running_python + and not python_running_denyhosts + and not fluentd_writing_conf_files + and not user_known_write_etc_conditions + and not run_by_centrify + and not run_by_adclient + and not qualys_writing_conf_files + and not git_writing_nssdb + and not plesk_writing_keys + and not plesk_install_writing_apache_conf + and not plesk_running_mktemp + and not networkmanager_writing_resolv_conf + and not run_by_chef + and not add_shell_writing_shells_tmp + and not duply_writing_exclude_files + and not xmlcatalog_writing_files + and not parent_supervise_running_multilog + and not supervise_writing_status + and not pki_realm_writing_realms + and not htpasswd_writing_passwd + and not lvprogs_writing_lvm_archive + and not ovsdb_writing_openvswitch + and not datadog_writing_conf + and not curl_writing_pki_db + and not haproxy_writing_conf + and not java_writing_conf + and not dpkg_scripting + and not parent_ucf_writing_conf + and not rabbitmq_writing_conf + and not rook_writing_conf + and not php_handlers_writing_conf + and not sed_writing_temp_file + and not cron_start_writing_pam_env + and not httpd_writing_conf_logs + and not mysql_writing_conf + and not openvpn_writing_conf + and not consul_template_writing_conf + and not countly_writing_nginx_conf + and not ms_oms_writing_conf + and not ms_scx_writing_conf + and not azure_scripts_writing_conf + and not azure_networkwatcher_writing_conf + and not couchdb_writing_conf + and not update_texmf_writing_conf + and not slapadd_writing_conf + and not symantec_writing_conf + and not liveupdate_writing_conf + and not sosreport_writing_files + and not selinux_writing_conf + and not veritas_writing_config + and not nginx_writing_conf + and not nginx_writing_certs + and not chef_client_writing_conf + and not centrify_writing_krb + and not cockpit_writing_conf + and not ipsec_writing_conf + and not httpd_writing_ssl_conf + +- rule: Write below etc + desc: an attempt to write to any file below /etc + condition: write_etc_common + output: "File below /etc opened for writing (user=%user.name command=%proc.cmdline parent=%proc.pname pcmdline=%proc.pcmdline file=%fd.name program=%proc.name gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4])" + priority: ERROR + tags: [filesystem] + +- list: known_root_files + items: [/root/.monit.state, /root/.auth_tokens, /root/.bash_history, /root/.ash_history, /root/.aws/credentials, + /root/.viminfo.tmp, /root/.lesshst, /root/.bzr.log, /root/.gitconfig.lock, /root/.babel.json, /root/.localstack, + /root/.node_repl_history, /root/.mongorc.js, /root/.dbshell, /root/.augeas/history, /root/.rnd] + +- list: known_root_directories + items: [/root/.oracle_jre_usage, /root/.ssh, /root/.subversion, /root/.nami] + +- macro: known_root_conditions + condition: (fd.name startswith /root/orcexec. + or fd.name startswith /root/.m2 + or fd.name startswith /root/.npm + or fd.name startswith /root/.pki + or fd.name startswith /root/.ivy2 + or fd.name startswith /root/.config/Cypress + or fd.name startswith /root/.config/pulse + or fd.name startswith /root/.config/configstore + or fd.name startswith /root/jenkins/workspace + or fd.name startswith /root/.jenkins + or fd.name startswith /root/.cache + or fd.name startswith /root/.sbt + or fd.name startswith /root/.java + or fd.name startswith /root/.glide + or fd.name startswith /root/.sonar + or fd.name startswith /root/.v8flag + or fd.name startswith /root/infaagent + or fd.name startswith /root/.local/lib/python + or fd.name startswith /root/.pm2 + or fd.name startswith /root/.gnupg + or fd.name startswith /root/.pgpass + or fd.name startswith /root/.theano + or fd.name startswith /root/.gradle + or fd.name startswith /root/.android + or fd.name startswith /root/.ansible + or fd.name startswith /root/.crashlytics + or fd.name startswith /root/.dbus + or fd.name startswith /root/.composer + or fd.name startswith /root/.gconf + or fd.name startswith /root/.nv) + +- rule: Write below root + desc: an attempt to write to any file directly below / or /root + condition: > + root_dir and evt.dir = < and open_write + and not fd.name in (known_root_files) + and not fd.directory in (known_root_directories) + and not exe_running_docker_save + and not gugent_writing_guestagent_log + and not known_root_conditions + output: "File below / or /root opened for writing (user=%user.name command=%proc.cmdline parent=%proc.pname file=%fd.name program=%proc.name)" + priority: ERROR + tags: [filesystem] + +- macro: cmp_cp_by_passwd + condition: proc.name in (cmp, cp) and proc.pname in (passwd, run-parts) + +- rule: Read sensitive file trusted after startup + desc: > + an attempt to read any sensitive file (e.g. files containing user/password/authentication + information) by a trusted program after startup. Trusted programs might read these files + at startup to load initial state, but not afterwards. + condition: sensitive_files and open_read and server_procs and not proc_is_new and proc.name!="sshd" + output: > + Sensitive file opened for reading by trusted program after startup (user=%user.name + command=%proc.cmdline parent=%proc.pname file=%fd.name parent=%proc.pname gparent=%proc.aname[2]) + priority: WARNING + tags: [filesystem] + +- list: read_sensitive_file_binaries + items: [ + iptables, ps, lsb_release, check-new-relea, dumpe2fs, accounts-daemon, sshd, + vsftpd, systemd, mysql_install_d, psql, screen, debconf-show, sa-update, + pam-auth-update, /usr/sbin/spamd, polkit-agent-he, lsattr, file, sosreport, + scxcimservera, adclient, rtvscand, cockpit-session + ] + +# Add conditions to this macro (probably in a separate file, +# overwriting this macro) to allow for specific combinations of +# programs accessing sensitive files. +# fluentd_writing_conf_files is a good example to follow, as it +# specifies both the program doing the writing as well as the specific +# files it is allowed to modify. +# +# In this file, it just takes one of the macros in the base rule +# and repeats it. + +- macro: user_read_sensitive_file_conditions + condition: cmp_cp_by_passwd + +- rule: Read sensitive file untrusted + desc: > + an attempt to read any sensitive file (e.g. files containing user/password/authentication + information). Exceptions are made for known trusted programs. + condition: > + sensitive_files and open_read + and proc_name_exists + and not proc.name in (user_mgmt_binaries, userexec_binaries, package_mgmt_binaries, + cron_binaries, read_sensitive_file_binaries, shell_binaries, hids_binaries, + vpn_binaries, mail_config_binaries, nomachine_binaries, sshkit_script_binaries, + in.proftpd, mandb, salt-minion, postgres_mgmt_binaries) + and not cmp_cp_by_passwd + and not ansible_running_python + and not proc.cmdline contains /usr/bin/mandb + and not run_by_qualys + and not run_by_chef + and not user_read_sensitive_file_conditions + and not perl_running_plesk + and not perl_running_updmap + and not veritas_driver_script + and not perl_running_centrifydc + output: > + Sensitive file opened for reading by non-trusted program (user=%user.name program=%proc.name + command=%proc.cmdline file=%fd.name parent=%proc.pname gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4]) + priority: WARNING + tags: [filesystem] + +# Only let rpm-related programs write to the rpm database +- rule: Write below rpm database + desc: an attempt to write to the rpm database by any non-rpm related program + condition: fd.name startswith /var/lib/rpm and open_write and not rpm_procs and not ansible_running_python and not chef_running_yum_dump + output: "Rpm database opened for writing by a non-rpm program (command=%proc.cmdline file=%fd.name)" + priority: ERROR + tags: [filesystem, software_mgmt] + +- macro: postgres_running_wal_e + condition: (proc.pname=postgres and proc.cmdline startswith "sh -c envdir /etc/wal-e.d/env /usr/local/bin/wal-e") + +- macro: redis_running_prepost_scripts + condition: (proc.aname[2]=redis-server and (proc.cmdline contains "redis-server.post-up.d" or proc.cmdline contains "redis-server.pre-up.d")) + +- macro: rabbitmq_running_scripts + condition: > + (proc.pname=beam.smp and + (proc.cmdline startswith "sh -c exec ps" or + proc.cmdline startswith "sh -c exec inet_gethost" or + proc.cmdline= "sh -s unix:cmd" or + proc.cmdline= "sh -c exec /bin/sh -s unix:cmd 2>&1")) + +- macro: rabbitmqctl_running_scripts + condition: (proc.aname[2]=rabbitmqctl and proc.cmdline startswith "sh -c ") + +- rule: DB program spawned process + desc: > + a database-server related program spawned a new process other than itself. + This shouldn\'t occur and is a follow on from some SQL injection attacks. + condition: > + proc.pname in (db_server_binaries) + and spawned_process + and not proc.name in (db_server_binaries) + and not postgres_running_wal_e + output: > + Database-related program spawned process other than itself (user=%user.name + program=%proc.cmdline parent=%proc.pname) + priority: NOTICE + tags: [process, database] + +- rule: Modify binary dirs + desc: an attempt to modify any file below a set of binary directories. + condition: (bin_dir_rename) and modify and not package_mgmt_procs and not exe_running_docker_save + output: > + File below known binary directory renamed/removed (user=%user.name command=%proc.cmdline + operation=%evt.type file=%fd.name %evt.args) + priority: ERROR + tags: [filesystem] + +- rule: Mkdir binary dirs + desc: an attempt to create a directory below a set of binary directories. + condition: mkdir and bin_dir_mkdir and not package_mgmt_procs + output: > + Directory below known binary directory created (user=%user.name + command=%proc.cmdline directory=%evt.arg.path) + priority: ERROR + tags: [filesystem] + +# This list allows for easy additions to the set of commands allowed +# to change thread namespace without having to copy and override the +# entire change thread namespace rule. +- list: user_known_change_thread_namespace_binaries + items: [] + +- rule: Change thread namespace + desc: > + an attempt to change a program/thread\'s namespace (commonly done + as a part of creating a container) by calling setns. + condition: > + evt.type = setns + and not proc.name in (docker_binaries, k8s_binaries, lxd_binaries, sysdigcloud_binaries, sysdig, nsenter) + and not proc.name in (user_known_change_thread_namespace_binaries) + and not proc.name startswith "runc:" + and not proc.pname in (sysdigcloud_binaries) + and not java_running_sdjagent + and not kubelet_running_loopback + output: > + Namespace change (setns) by unexpected program (user=%user.name command=%proc.cmdline + parent=%proc.pname %container.info) + priority: NOTICE + tags: [process] + +# The binaries in this list and their descendents are *not* allowed +# spawn shells. This includes the binaries spawning shells directly as +# well as indirectly. For example, apache -> php/perl for +# mod_{php,perl} -> some shell is also not allowed, because the shell +# has apache as an ancestor. + +- list: protected_shell_spawning_binaries + items: [ + http_server_binaries, db_server_binaries, nosql_server_binaries, mail_binaries, + fluentd, flanneld, splunkd, consul, smbd, runsv, PM2 + ] + +- macro: parent_java_running_zookeeper + condition: (proc.pname=java and proc.pcmdline contains org.apache.zookeeper.server) + +- macro: parent_java_running_kafka + condition: (proc.pname=java and proc.pcmdline contains kafka.Kafka) + +- macro: parent_java_running_elasticsearch + condition: (proc.pname=java and proc.pcmdline contains org.elasticsearch.bootstrap.Elasticsearch) + +- macro: parent_java_running_activemq + condition: (proc.pname=java and proc.pcmdline contains activemq.jar) + +- macro: parent_java_running_cassandra + condition: (proc.pname=java and (proc.pcmdline contains "-Dcassandra.config.loader" or proc.pcmdline contains org.apache.cassandra.service.CassandraDaemon)) + +- macro: parent_java_running_jboss_wildfly + condition: (proc.pname=java and proc.pcmdline contains org.jboss) + +- macro: parent_java_running_glassfish + condition: (proc.pname=java and proc.pcmdline contains com.sun.enterprise.glassfish) + +- macro: parent_java_running_hadoop + condition: (proc.pname=java and proc.pcmdline contains org.apache.hadoop) + +- macro: parent_java_running_datastax + condition: (proc.pname=java and proc.pcmdline contains com.datastax) + +- macro: nginx_starting_nginx + condition: (proc.pname=nginx and proc.cmdline contains "/usr/sbin/nginx -c /etc/nginx/nginx.conf") + +- macro: nginx_running_aws_s3_cp + condition: (proc.pname=nginx and proc.cmdline startswith "sh -c /usr/local/bin/aws s3 cp") + +- macro: consul_running_net_scripts + condition: (proc.pname=consul and (proc.cmdline startswith "sh -c curl" or proc.cmdline startswith "sh -c nc")) + +- macro: consul_running_alert_checks + condition: (proc.pname=consul and proc.cmdline startswith "sh -c /bin/consul-alerts") + +- macro: serf_script + condition: (proc.cmdline startswith "sh -c serf") + +- macro: check_process_status + condition: (proc.cmdline startswith "sh -c kill -0 ") + +# In some cases, you may want to consider node processes run directly +# in containers as protected shell spawners. Examples include using +# pm2-docker or pm2 start some-app.js --no-daemon-mode as the direct +# entrypoint of the container, and when the node app is a long-lived +# server using something like express. +# +# However, there are other uses of node related to build pipelines for +# which node is not really a server but instead a general scripting +# tool. In these cases, shells are very likely and in these cases you +# don't want to consider node processes protected shell spawners. +# +# We have to choose one of these cases, so we consider node processes +# as unprotected by default. If you want to consider any node process +# run in a container as a protected shell spawner, override the below +# macro to remove the "never_true" clause, which allows it to take effect. +- macro: possibly_node_in_container + condition: (never_true and (proc.pname=node and proc.aname[3]=docker-containe)) + +- macro: protected_shell_spawner + condition: > + (proc.aname in (protected_shell_spawning_binaries) + or parent_java_running_zookeeper + or parent_java_running_kafka + or parent_java_running_elasticsearch + or parent_java_running_activemq + or parent_java_running_cassandra + or parent_java_running_jboss_wildfly + or parent_java_running_glassfish + or parent_java_running_hadoop + or parent_java_running_datastax + or possibly_node_in_container) + +- list: mesos_shell_binaries + items: [mesos-docker-ex, mesos-slave, mesos-health-ch] + +# Note that runsv is both in protected_shell_spawner and the +# exclusions by pname. This means that runsv can itself spawn shells +# (the ./run and ./finish scripts), but the processes runsv can not +# spawn shells. +- rule: Run shell untrusted + desc: an attempt to spawn a shell below a non-shell application. Specific applications are monitored. + condition: > + spawned_process + and shell_procs + and proc.pname exists + and protected_shell_spawner + and not proc.pname in (shell_binaries, gitlab_binaries, cron_binaries, user_known_shell_spawn_binaries, + needrestart_binaries, + mesos_shell_binaries, + erl_child_setup, exechealthz, + PM2, PassengerWatchd, c_rehash, svlogd, logrotate, hhvm, serf, + lb-controller, nvidia-installe, runsv, statsite, erlexec) + and not proc.cmdline in (known_shell_spawn_cmdlines) + and not proc.aname in (unicorn_launche) + and not consul_running_net_scripts + and not consul_running_alert_checks + and not nginx_starting_nginx + and not nginx_running_aws_s3_cp + and not run_by_package_mgmt_binaries + and not serf_script + and not check_process_status + and not run_by_foreman + and not python_mesos_marathon_scripting + and not splunk_running_forwarder + and not postgres_running_wal_e + and not redis_running_prepost_scripts + and not rabbitmq_running_scripts + and not rabbitmqctl_running_scripts + and not user_shell_container_exclusions + output: > + Shell spawned by untrusted binary (user=%user.name shell=%proc.name parent=%proc.pname + cmdline=%proc.cmdline pcmdline=%proc.pcmdline gparent=%proc.aname[2] ggparent=%proc.aname[3] + gggparent=%proc.aname[4] ggggparent=%proc.aname[5]) + priority: DEBUG + tags: [shell] + +- macro: trusted_containers + condition: (container.image startswith sysdig/agent or + (container.image startswith sysdig/falco and + not container.image startswith sysdig/falco-event-generator) or + container.image startswith quay.io/sysdig or + container.image startswith sysdig/sysdig or + container.image startswith gcr.io/google_containers/hyperkube or + container.image startswith quay.io/coreos/flannel or + container.image startswith gcr.io/google_containers/kube-proxy or + container.image startswith calico/node or + container.image startswith rook/toolbox or + container.image startswith registry.access.redhat.com/openshift3/logging-fluentd or + container.image startswith registry.access.redhat.com/openshift3/logging-elasticsearch or + container.image startswith registry.access.redhat.com/openshift3/metrics-cassandra or + container.image startswith openshift3/ose-sti-builder or + container.image startswith registry.access.redhat.com/openshift3/ose-sti-builder or + container.image startswith cloudnativelabs/kube-router or + container.image startswith "consul:" or + container.image startswith mesosphere/mesos-slave or + container.image startswith istio/proxy_ or + container.image startswith datadog/docker-dd-agent) + +# Add conditions to this macro (probably in a separate file, +# overwriting this macro) to specify additional containers that are +# trusted and therefore allowed to run privileged. +# +# In this file, it just takes one of the images in trusted_containers +# and repeats it. +- macro: user_trusted_containers + condition: (container.image startswith sysdig/agent) + +# Add conditions to this macro (probably in a separate file, +# overwriting this macro) to specify additional containers that are +# allowed to perform sensitive mounts. +# +# In this file, it just takes one of the images in trusted_containers +# and repeats it. +- macro: user_sensitive_mount_containers + condition: (container.image startswith sysdig/agent) + +- rule: Launch Privileged Container + desc: Detect the initial process started in a privileged container. Exceptions are made for known trusted images. + condition: > + evt.type=execve and proc.vpid=1 and container + and container.privileged=true + and not trusted_containers + and not user_trusted_containers + output: Privileged container started (user=%user.name command=%proc.cmdline %container.info image=%container.image) + priority: INFO + tags: [container, cis] + +# For now, only considering a full mount of /etc as +# sensitive. Ideally, this would also consider all subdirectories +# below /etc as well, but the globbing mechanism used by sysdig +# doesn't allow exclusions of a full pattern, only single characters. +- macro: sensitive_mount + condition: (container.mount.dest[/proc*] != "N/A" or + container.mount.dest[/var/run/docker.sock] != "N/A" or + container.mount.dest[/] != "N/A" or + container.mount.dest[/etc] != "N/A" or + container.mount.dest[/root*] != "N/A") + +# The steps libcontainer performs to set up the root program for a container are: +# - clone + exec self to a program runc:[0:PARENT] +# - clone a program runc:[1:CHILD] which sets up all the namespaces +# - clone a second program runc:[2:INIT] + exec to the root program. +# The parent of runc:[2:INIT] is runc:0:PARENT] +# As soon as 1:CHILD is created, 0:PARENT exits, so there's a race +# where at the time 2:INIT execs the root program, 0:PARENT might have +# already exited, or might still be around. So we handle both. +# We also let runc:[1:CHILD] count as the parent process, which can occur +# when we lose events and lose track of state. + +- macro: container_entrypoint + condition: (not proc.pname exists or proc.pname in (runc:[0:PARENT], runc:[1:CHILD], docker-runc, exe)) + +- rule: Launch Sensitive Mount Container + desc: > + Detect the initial process started by a container that has a mount from a sensitive host directory + (i.e. /proc). Exceptions are made for known trusted images. + condition: > + evt.type=execve and proc.vpid=1 and container + and sensitive_mount + and not trusted_containers + and not user_sensitive_mount_containers + output: Container with sensitive mount started (user=%user.name command=%proc.cmdline %container.info image=%container.image mounts=%container.mounts) + priority: INFO + tags: [container, cis] + +# In a local/user rules file, you could override this macro to +# explicitly enumerate the container images that you want to run in +# your environment. In this main falco rules file, there isn't any way +# to know all the containers that can run, so any container is +# alllowed, by using a filter that is guaranteed to evaluate to true +# (the same proc.vpid=1 that's in the Launch Disallowed Container +# rule). In the overridden macro, the condition would look something +# like (container.image startswith vendor/container-1 or +# container.image startswith vendor/container-2 or ...) + +- macro: allowed_containers + condition: (proc.vpid=1) + +- rule: Launch Disallowed Container + desc: > + Detect the initial process started by a container that is not in a list of allowed containers. + condition: evt.type=execve and proc.vpid=1 and container and not allowed_containers + output: Container started and not in allowed list (user=%user.name command=%proc.cmdline %container.info image=%container.image) + priority: WARNING + tags: [container] + +# Anything run interactively by root +# - condition: evt.type != switch and user.name = root and proc.name != sshd and interactive +# output: "Interactive root (%user.name %proc.name %evt.dir %evt.type %evt.args %fd.name)" +# priority: WARNING + +- rule: System user interactive + desc: an attempt to run interactive commands by a system (i.e. non-login) user + condition: spawned_process and system_users and interactive + output: "System user ran an interactive command (user=%user.name command=%proc.cmdline)" + priority: INFO + tags: [users] + +- rule: Terminal shell in container + desc: A shell was used as the entrypoint/exec point into a container with an attached terminal. + condition: > + spawned_process and container + and shell_procs and proc.tty != 0 + and container_entrypoint + output: > + A shell was spawned in a container with an attached terminal (user=%user.name %container.info + shell=%proc.name parent=%proc.pname cmdline=%proc.cmdline terminal=%proc.tty) + priority: NOTICE + tags: [container, shell] + +# For some container types (mesos), there isn't a container image to +# work with, and the container name is autogenerated, so there isn't +# any stable aspect of the software to work with. In this case, we +# fall back to allowing certain command lines. + +- list: known_shell_spawn_cmdlines + items: [ + '"sh -c uname -p 2> /dev/null"', + '"sh -c uname -s 2>&1"', + '"sh -c uname -r 2>&1"', + '"sh -c uname -v 2>&1"', + '"sh -c uname -a 2>&1"', + '"sh -c ruby -v 2>&1"', + '"sh -c getconf CLK_TCK"', + '"sh -c getconf PAGESIZE"', + '"sh -c LC_ALL=C LANG=C /sbin/ldconfig -p 2>/dev/null"', + '"sh -c LANG=C /sbin/ldconfig -p 2>/dev/null"', + '"sh -c /sbin/ldconfig -p 2>/dev/null"', + '"sh -c stty -a 2>/dev/null"', + '"sh -c stty -a < /dev/tty"', + '"sh -c stty -g < /dev/tty"', + '"sh -c node index.js"', + '"sh -c node index"', + '"sh -c node ./src/start.js"', + '"sh -c node app.js"', + '"sh -c node -e \"require(''nan'')\""', + '"sh -c node -e \"require(''nan'')\")"', + '"sh -c node $NODE_DEBUG_OPTION index.js "', + '"sh -c crontab -l 2"', + '"sh -c lsb_release -a"', + '"sh -c lsb_release -is 2>/dev/null"', + '"sh -c whoami"', + '"sh -c node_modules/.bin/bower-installer"', + '"sh -c /bin/hostname -f 2> /dev/null"', + '"sh -c locale -a"', + '"sh -c -t -i"', + '"sh -c openssl version"', + '"bash -c id -Gn kafadmin"', + '"sh -c /bin/sh -c ''date +%%s''"' + ] + +# This list allows for easy additions to the set of commands allowed +# to run shells in containers without having to without having to copy +# and override the entire run shell in container macro. Once +# https://github.com/draios/falco/issues/255 is fixed this will be a +# bit easier, as someone could append of any of the existing lists. +- list: user_known_shell_spawn_binaries + items: [] + +# This macro allows for easy additions to the set of commands allowed +# to run shells in containers without having to override the entire +# rule. Its default value is an expression that always is false, which +# becomes true when the "not ..." in the rule is applied. +- macro: user_shell_container_exclusions + condition: (never_true) + +- macro: login_doing_dns_lookup + condition: (proc.name=login and fd.l4proto=udp and fd.sport=53) + +# sockfamily ip is to exclude certain processes (like 'groups') that communicate on unix-domain sockets +# systemd can listen on ports to launch things like sshd on demand +- rule: System procs network activity + desc: any network activity performed by system binaries that are not expected to send or receive any network traffic + condition: > + (fd.sockfamily = ip and system_procs) + and (inbound_outbound) + and not proc.name in (systemd, hostid) + and not login_doing_dns_lookup + output: > + Known system binary sent/received network traffic + (user=%user.name command=%proc.cmdline connection=%fd.name) + priority: NOTICE + tags: [network] + +- list: openvpn_udp_ports + items: [1194, 1197, 1198, 8080, 9201] + +- list: l2tp_udp_ports + items: [500, 1701, 4500, 10000] + +- list: statsd_ports + items: [8125] + +- list: ntp_ports + items: [123] + +# Some applications will connect a udp socket to an address only to +# test connectivity. Assuming the udp connect works, they will follow +# up with a tcp connect that actually sends/receives data. +# +# With that in mind, we listed a few commonly seen ports here to avoid +# some false positives. In addition, we make the main rule opt-in, so +# it's disabled by default. + +- list: test_connect_ports + items: [0, 9, 80, 3306] + +- macro: do_unexpected_udp_check + condition: (never_true) + +- list: expected_udp_ports + items: [53, openvpn_udp_ports, l2tp_udp_ports, statsd_ports, ntp_ports, test_connect_ports] + +- macro: expected_udp_traffic + condition: fd.port in (expected_udp_ports) + +- rule: Unexpected UDP Traffic + desc: UDP traffic not on port 53 (DNS) or other commonly used ports + condition: (inbound_outbound) and do_unexpected_udp_check and fd.l4proto=udp and not expected_udp_traffic + output: > + Unexpected UDP Traffic Seen + (user=%user.name command=%proc.cmdline connection=%fd.name proto=%fd.l4proto evt=%evt.type %evt.args) + priority: NOTICE + tags: [network] + +# With the current restriction on system calls handled by falco +# (e.g. excluding read/write/sendto/recvfrom/etc, this rule won't +# trigger). +# - rule: Ssh error in syslog +# desc: any ssh errors (failed logins, disconnects, ...) sent to syslog +# condition: syslog and ssh_error_message and evt.dir = < +# output: "sshd sent error message to syslog (error=%evt.buffer)" +# priority: WARNING + +- macro: somebody_becoming_themself + condition: ((user.name=nobody and evt.arg.uid=nobody) or + (user.name=www-data and evt.arg.uid=www-data) or + (user.name=_apt and evt.arg.uid=_apt) or + (user.name=postfix and evt.arg.uid=postfix) or + (user.name=pki-agent and evt.arg.uid=pki-agent) or + (user.name=pki-acme and evt.arg.uid=pki-acme) or + (user.name=nfsnobody and evt.arg.uid=nfsnobody) or + (user.name=postgres and evt.arg.uid=postgres)) + +- macro: nrpe_becoming_nagios + condition: (proc.name=nrpe and evt.arg.uid=nagios) + +# In containers, the user name might be for a uid that exists in the +# container but not on the host. (See +# https://github.com/draios/sysdig/issues/954). So in that case, allow +# a setuid. +- macro: known_user_in_container + condition: (container and user.name != "N/A") + +# sshd, mail programs attempt to setuid to root even when running as non-root. Excluding here to avoid meaningless FPs +- rule: Non sudo setuid + desc: > + an attempt to change users by calling setuid. sudo/su are excluded. users "root" and "nobody" + suing to itself are also excluded, as setuid calls typically involve dropping privileges. + condition: > + evt.type=setuid and evt.dir=> + and (known_user_in_container or not container) + and not user.name=root and not somebody_becoming_themself + and not proc.name in (known_setuid_binaries, userexec_binaries, mail_binaries, docker_binaries, + nomachine_binaries) + and not java_running_sdjagent + and not nrpe_becoming_nagios + output: > + Unexpected setuid call by non-sudo, non-root program (user=%user.name cur_uid=%user.uid parent=%proc.pname + command=%proc.cmdline uid=%evt.arg.uid) + priority: NOTICE + tags: [users] + +- rule: User mgmt binaries + desc: > + activity by any programs that can manage users, passwords, or permissions. sudo and su are excluded. + Activity in containers is also excluded--some containers create custom users on top + of a base linux distribution at startup. + Some innocuous commandlines that don't actually change anything are excluded. + condition: > + spawned_process and proc.name in (user_mgmt_binaries) and + not proc.name in (su, sudo, lastlog, nologin, unix_chkpwd) and not container and + not proc.pname in (cron_binaries, systemd, systemd.postins, udev.postinst, run-parts) and + not proc.cmdline startswith "passwd -S" and + not proc.cmdline startswith "useradd -D" and + not proc.cmdline startswith "systemd --version" and + not run_by_qualys and + not run_by_sumologic_securefiles and + not run_by_yum and + not run_by_ms_oms and + not run_by_google_accounts_daemon + output: > + User management binary command run outside of container + (user=%user.name command=%proc.cmdline parent=%proc.pname gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4]) + priority: NOTICE + tags: [host, users] + +- list: allowed_dev_files + items: [ + /dev/null, /dev/stdin, /dev/stdout, /dev/stderr, + /dev/random, /dev/urandom, /dev/console, /dev/kmsg + ] + +# (we may need to add additional checks against false positives, see: +# https://bugs.launchpad.net/ubuntu/+source/rkhunter/+bug/86153) +- rule: Create files below dev + desc: creating any files below /dev other than known programs that manage devices. Some rootkits hide files in /dev. + condition: > + fd.directory = /dev and + (evt.type = creat or (evt.type = open and evt.arg.flags contains O_CREAT)) + and not proc.name in (dev_creation_binaries) + and not fd.name in (allowed_dev_files) + and not fd.name startswith /dev/tty + output: "File created below /dev by untrusted program (user=%user.name command=%proc.cmdline file=%fd.name)" + priority: ERROR + tags: [filesystem] + + +# In a local/user rules file, you could override this macro to +# explicitly enumerate the container images that you want to allow +# access to EC2 metadata. In this main falco rules file, there isn't +# any way to know all the containers that should have access, so any +# container is alllowed, by repeating the "container" macro. In the +# overridden macro, the condition would look something like +# (container.image startswith vendor/container-1 or container.image +# startswith vendor/container-2 or ...) +- macro: ec2_metadata_containers + condition: container + +# On EC2 instances, 169.254.169.254 is a special IP used to fetch +# metadata about the instance. It may be desirable to prevent access +# to this IP from containers. +- rule: Contact EC2 Instance Metadata Service From Container + desc: Detect attempts to contact the EC2 Instance Metadata Service from a container + condition: outbound and fd.sip="169.254.169.254" and container and not ec2_metadata_containers + output: Outbound connection to EC2 instance metadata service (command=%proc.cmdline connection=%fd.name %container.info image=%container.image) + priority: NOTICE + tags: [network, aws, container] + +# In a local/user rules file, you should override this macro with the +# IP address of your k8s api server. The IP 1.2.3.4 is a placeholder +# IP that is not likely to be seen in practice. +- macro: k8s_api_server + condition: (fd.sip="1.2.3.4" and fd.sport=8080) + +# In a local/user rules file, list the container images that are +# allowed to contact the K8s API Server from within a container. This +# might cover cases where the K8s infrastructure itself is running +# within a container. +- macro: k8s_containers + condition: > + (container.image startswith gcr.io/google_containers/hyperkube-amd64 or + container.image startswith gcr.io/google_containers/kube2sky or + container.image startswith sysdig/agent or + container.image startswith sysdig/falco or + container.image startswith sysdig/sysdig) + +- rule: Contact K8S API Server From Container + desc: Detect attempts to contact the K8S API Server from a container + condition: outbound and k8s_api_server and container and not k8s_containers + output: Unexpected connection to K8s API Server from container (command=%proc.cmdline %container.info image=%container.image connection=%fd.name) + priority: NOTICE + tags: [network, k8s, container] + +# In a local/user rules file, list the container images that are +# allowed to contact NodePort services from within a container. This +# might cover cases where the K8s infrastructure itself is running +# within a container. +# +# By default, all containers are allowed to contact NodePort services. +- macro: nodeport_containers + condition: container + +- rule: Unexpected K8s NodePort Connection + desc: Detect attempts to use K8s NodePorts from a container + condition: (inbound_outbound) and fd.sport >= 30000 and fd.sport <= 32767 and container and not nodeport_containers + output: Unexpected K8s NodePort Connection (command=%proc.cmdline connection=%fd.name) + priority: NOTICE + tags: [network, k8s, container] + +# Application rules have moved to application_rules.yaml. Please look +# there if you want to enable them by adding to +# falco_rules.local.yaml. + diff --git a/stable/falco/templates/NOTES.txt b/stable/falco/templates/NOTES.txt new file mode 100644 index 000000000000..834b473a3054 --- /dev/null +++ b/stable/falco/templates/NOTES.txt @@ -0,0 +1,5 @@ +Falco agents are spinning up on each node in your cluster. After a few +seconds, they are going to start monitoring your containers looking for +security issues. + +No further action should be required. diff --git a/stable/falco/templates/_helpers.tpl b/stable/falco/templates/_helpers.tpl new file mode 100644 index 000000000000..c4d786e8b3af --- /dev/null +++ b/stable/falco/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "falco.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "falco.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "falco.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/stable/falco/templates/clusterrole.yaml b/stable/falco/templates/clusterrole.yaml new file mode 100644 index 000000000000..e361b9202dd1 --- /dev/null +++ b/stable/falco/templates/clusterrole.yaml @@ -0,0 +1,32 @@ +{{- if .Values.rbac.create }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: {{ template "falco.fullname" .}} + labels: + app: {{ template "falco.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +rules: + - apiGroups: + - extensions + - "" + resources: + - nodes + - namespaces + - pods + - replicationcontrollers + - services + - events + - configmaps + verbs: + - get + - list + - watch + - nonResourceURLs: + - /healthz + - /healthz/* + verbs: + - get +{{- end }} diff --git a/stable/falco/templates/clusterrolebinding.yaml b/stable/falco/templates/clusterrolebinding.yaml new file mode 100644 index 000000000000..3191e9a3d046 --- /dev/null +++ b/stable/falco/templates/clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if .Values.rbac.create }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: {{ template "falco.fullname" .}} + namespace: default + labels: + app: {{ template "falco.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +subjects: + - kind: ServiceAccount + name: {{ template "falco.fullname" .}} + namespace: default +roleRef: + kind: ClusterRole + name: {{ template "falco.fullname" .}} + apiGroup: rbac.authorization.k8s.io +{{- end }} diff --git a/stable/falco/templates/configmap.yaml b/stable/falco/templates/configmap.yaml new file mode 100644 index 000000000000..bf4a2c2f9ac0 --- /dev/null +++ b/stable/falco/templates/configmap.yaml @@ -0,0 +1,126 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "falco.fullname" . }} + labels: + app: {{ template "falco.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +data: + falco.yaml: |- + # File(s) or Directories containing Falco rules, loaded at startup. + # The name "rules_file" is only for backwards compatibility. + # If the entry is a file, it will be read directly. If the entry is a directory, + # every file in that directory will be read, in alphabetical order. + # + # falco_rules.yaml ships with the falco package and is overridden with + # every new software version. falco_rules.local.yaml is only created + # if it doesn't exist. If you want to customize the set of rules, add + # your customizations to falco_rules.local.yaml. + # + # The files will be read in the order presented here, so make sure if + # you have overrides they appear in later files. + rules_file: + {{- range .Values.falco.rulesFile }} + - {{ . }} + {{- end }} + + # Whether to output events in json or text + {{- if .Values.falco.gcsccIntegration.enabled }} + json_output: true + {{- else }} + json_output: {{ .Values.falco.jsonOutput }} + {{- end }} + + # When using json output, whether or not to include the "output" property + # itself (e.g. "File below a known binary directory opened for writing + # (user=root ....") in the json output. + json_include_output_property: {{ .Values.falco.jsonIncludeOutputProperty }} + + # Send information logs to stderr and/or syslog Note these are *not* security + # notification logs! These are just Falco lifecycle (and possibly error) logs. + log_stderr: {{ .Values.falco.logStderr }} + log_syslog: {{ .Values.falco.logSyslog }} + + # Minimum log level to include in logs. Note: these levels are + # separate from the priority field of rules. This refers only to the + # log level of falco's internal logging. Can be one of "emergency", + # "alert", "critical", "error", "warning", "notice", "info", "debug". + log_level: {{ .Values.falco.logLevel }} + + # Minimum rule priority level to load and run. All rules having a + # priority more severe than this level will be loaded/run. Can be one + # of "emergency", "alert", "critical", "error", "warning", "notice", + # "info", "debug". + priority: {{ .Values.falco.priority }} + + # Whether or not output to any of the output channels below is + # buffered. Defaults to true + buffered_outputs: {{ .Values.falco.bufferedOutputs }} + + # A throttling mechanism implemented as a token bucket limits the + # rate of falco notifications. This throttling is controlled by the following configuration + # options: + # - rate: the number of tokens (i.e. right to send a notification) + # gained per second. Defaults to 1. + # - max_burst: the maximum number of tokens outstanding. Defaults to 1000. + # + # With these defaults, falco could send up to 1000 notifications after + # an initial quiet period, and then up to 1 notification per second + # afterward. It would gain the full burst back after 1000 seconds of + # no activity. + outputs: + rate: {{ .Values.falco.outputs.rate }} + max_burst: {{ .Values.falco.outputs.maxBurst }} + + # Where security notifications should go. + # Multiple outputs can be enabled. + + syslog_output: + enabled: {{ .Values.falco.syslogOutput.enabled }} + + # If keep_alive is set to true, the file will be opened once and + # continuously written to, with each output message on its own + # line. If keep_alive is set to false, the file will be re-opened + # for each output message. + # + # Also, the file will be closed and reopened if falco is signaled with + # SIGUSR1. + + file_output: + enabled: {{ .Values.falco.fileOutput.enabled }} + keep_alive: {{ .Values.falco.fileOutput.keepAlive }} + filename: {{ .Values.falco.fileOutput.filename }} + + stdout_output: + enabled: {{ .Values.falco.stdoutOutput.enabled }} + + # Possible additional things you might want to do with program output: + # - send to a slack webhook: + # program: "jq '{text: .output}' | curl -d @- -X POST https://hooks.slack.com/services/XXX" + # - logging (alternate method than syslog): + # program: logger -t falco-test + # - send over a network connection: + # program: nc host.example.com 80 + + # If keep_alive is set to true, the program will be started once and + # continuously written to, with each output message on its own + # line. If keep_alive is set to false, the program will be re-spawned + # for each output message. + # + # Also, the program will be closed and reopened if falco is signaled with + # SIGUSR1. + {{- if .Values.falco.gcsccIntegration.enabled }} + programOutput: + enabled: true + keep_alive: false + program: "\"curl -d @- -X POST --header 'Content-Type: application/json' --header 'Authorization: {{ .Values.falco.gcsccIntegration.webhookAuthenticationToken }}' {{ .Values.falco.gcsccIntegration.webhookUrl }}/\"" + {{- else }} + program_output: + enabled: {{ .Values.falco.programOutput.enabled }} + keep_alive: {{ .Values.falco.programOutput.keepAlive }} + program: {{ .Values.falco.programOutput.program }} + {{- end }} + +{{ (.Files.Glob "rules/*").AsConfig | indent 2 }} diff --git a/stable/falco/templates/daemonset.yaml b/stable/falco/templates/daemonset.yaml new file mode 100644 index 000000000000..5d102dd7fec2 --- /dev/null +++ b/stable/falco/templates/daemonset.yaml @@ -0,0 +1,87 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: {{ template "falco.fullname" . }} + labels: + app: {{ template "falco.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + template: + metadata: + name: {{ template "falco.fullname" .}} + labels: + app: {{ template "falco.fullname" .}} + role: security + spec: +{{- if .Values.rbac.create }} + serviceAccountName: {{ template "falco.fullname" .}} +{{- else }} + serviceAccountName: {{ .Values.rbac.serviceAccountName }} +{{- end }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + resources: +{{ toYaml .Values.resources | indent 12 }} + securityContext: + privileged: true + args: [ "/usr/bin/falco", "-K", "/var/run/secrets/kubernetes.io/serviceaccount/token", "-k", "https://kubernetes.default", "-pk"] + volumeMounts: + - mountPath: /host/var/run/docker.sock + name: docker-socket + - mountPath: /host/dev + name: dev-fs + - mountPath: /host/proc + name: proc-fs + readOnly: true + - mountPath: /host/boot + name: boot-fs + readOnly: true + - mountPath: /host/lib/modules + name: lib-modules + readOnly: true + - mountPath: /host/usr + name: usr-fs + readOnly: true + - mountPath: /etc/falco + name: config-volume + volumes: + - name: dshm + emptyDir: + medium: Memory + - name: docker-socket + hostPath: + path: /var/run/docker.sock + - name: dev-fs + hostPath: + path: /dev + - name: proc-fs + hostPath: + path: /proc + - name: boot-fs + hostPath: + path: /boot + - name: lib-modules + hostPath: + path: /lib/modules + - name: usr-fs + hostPath: + path: /usr + - name: config-volume + configMap: + name: {{ template "falco.fullname" . }} + items: + - key: falco.yaml + path: falco.yaml + - key: falco_rules.yaml + path: falco_rules.yaml + - key: falco_rules.local.yaml + path: falco_rules.local.yaml + + updateStrategy: + type: {{ default "OnDelete" .Values.daemonset.updateStrategy | quote }} diff --git a/stable/falco/templates/deployment.yaml b/stable/falco/templates/deployment.yaml new file mode 100644 index 000000000000..fee41d272105 --- /dev/null +++ b/stable/falco/templates/deployment.yaml @@ -0,0 +1,21 @@ +{{- if .Values.deployment.enabled }} +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ template "falco.fullname" . }}-event-generator + labels: + app: {{ template "falco.fullname" . }}-event-generator + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + replicas: {{ .Values.deployment.replicas }} + template: + metadata: + labels: + app: {{ template "falco.fullname" . }}-event-generator + spec: + containers: + - name: {{ template "falco.fullname" . }}-event-generator + image: sysdig/falco-event-generator:latest +{{- end }} diff --git a/stable/falco/templates/serviceaccount.yaml b/stable/falco/templates/serviceaccount.yaml new file mode 100644 index 000000000000..f329ecb17569 --- /dev/null +++ b/stable/falco/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if .Values.rbac.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "falco.fullname" .}} + labels: + app: {{ template "falco.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- end }} diff --git a/stable/falco/values.yaml b/stable/falco/values.yaml new file mode 100644 index 000000000000..df66331aecbc --- /dev/null +++ b/stable/falco/values.yaml @@ -0,0 +1,144 @@ +# Default values for falco. + +image: + repository: sysdig/falco + tag: latest + pullPolicy: Always + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 30m + # memory: 128Mi + # requests: + # cpu: 20m + # memory: 128Mi + +rbac: + # Create and use rbac resources + create: true + # Ignored if rbac.create is true + serviceAccountName: default + +deployment: + enabled: false + replicas: 1 + +daemonset: {} + # Allow the DaemonSet to perform a rolling update on helm update + # ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/ + # If you do want to specify resources, uncomment the following lines, adjust + # them as necessary, and remove the curly braces after 'resources:'. + # updateStrategy: RollingUpdate + +falco: + # The location of the rules file(s). This can contain one or more paths to + # separate rules files. + rulesFile: + - /etc/falco/falco_rules.yaml + - /etc/falco/falco_rules.local.yaml + - /etc/falco/rules.d + + # Whether to output events in json or text + jsonOutput: false + + # When using json output, whether or not to include the "output" property + # itself (e.g. "File below a known binary directory opened for writing + # (user=root ....") in the json output. + jsonIncludeOutputProperty: true + + # Send information logs to stderr and/or syslog Note these are *not* security + # notification logs! These are just Falco lifecycle (and possibly error) logs. + logStderr: true + logSyslog: true + + # Minimum log level to include in logs. Note: these levels are + # separate from the priority field of rules. This refers only to the + # log level of falco's internal logging. Can be one of "emergency", + # "alert", "critical", "error", "warning", "notice", "info", "debug". + logLevel: info + + # Minimum rule priority level to load and run. All rules having a + # priority more severe than this level will be loaded/run. Can be one + # of "emergency", "alert", "critical", "error", "warning", "notice", + # "info", "debug". + priority: debug + + # Whether or not output to any of the output channels below is + # buffered. + bufferedOutputs: false + + # A throttling mechanism implemented as a token bucket limits the + # rate of falco notifications. This throttling is controlled by the following configuration + # options: + # - rate: the number of tokens (i.e. right to send a notification) + # gained per second. Defaults to 1. + # - max_burst: the maximum number of tokens outstanding. Defaults to 1000. + # + # With these defaults, falco could send up to 1000 notifications after + # an initial quiet period, and then up to 1 notification per second + # afterward. It would gain the full burst back after 1000 seconds of + # no activity. + outputs: + rate: 1 + maxBurst: 1000 + + # Where security notifications should go. + # Multiple outputs can be enabled. + syslogOutput: + enabled: true + + # If keep_alive is set to true, the file will be opened once and + # continuously written to, with each output message on its own + # line. If keep_alive is set to false, the file will be re-opened + # for each output message. + # + # Also, the file will be closed and reopened if falco is signaled with + # SIGUSR1. + fileOutput: + enabled: false + keepAlive: false + filename: ./events.txt + + stdoutOutput: + enabled: true + + # Possible additional things you might want to do with program output: + # - send to a slack webhook: + # program: "jq '{text: .output}' | curl -d @- -X POST https://hooks.slack.com/services/XXX" + # - logging (alternate method than syslog): + # program: logger -t falco-test + # - send over a network connection: + # program: nc host.example.com 80 + + # If keep_alive is set to true, the program will be started once and + # continuously written to, with each output message on its own + # line. If keep_alive is set to false, the program will be re-spawned + # for each output message. + # + # Also, the program will be closed and reopened if falco is signaled with + # SIGUSR1. + programOutput: + enabled: false + keepAlive: false + program: mail -s "Falco Notification" someone@example.com + + # If Google Cloud Security Command Center integration is enabled, falco will + # be configured to use this integration as program_output and sets the following values: + # * json_output: true + # * program_output: + # enabled: true + # keep_alive: false + # program: "\"curl -d @- -X POST --header 'Content-Type: application/json' --header 'Authorization: authentication_token' url \"" + gcsccIntegration: + enabled: false + webhookUrl: http://sysdig-gcscc-connector.default.svc.cluster.local:8080/events + webhookAuthenticationToken: b27511f86e911f20b9e0f9c8104b4ec4 + +# Allow falco to run on Kubernetes 1.6 masters. +tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master From 71f7b3f5f2bcd07f704383edbc3b8cf55351b4c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?N=C3=A9stor=20Salceda?= Date: Thu, 31 May 2018 18:39:19 +0200 Subject: [PATCH 02/14] Fix indentation and other stuff reported by CI --- stable/falco/Chart.yaml | 4 ++-- stable/falco/values.yaml | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/stable/falco/Chart.yaml b/stable/falco/Chart.yaml index 1e61d257bbfe..9c0d296a1bdd 100644 --- a/stable/falco/Chart.yaml +++ b/stable/falco/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v1 name: falco -version: 0.1 +version: '0.1' description: Sysdig Falco keywords: - monitoring @@ -14,5 +14,5 @@ icon: https://sysdig.com/wp-content/uploads/2016/08/falco_blog_480.jpg sources: - https://github.com/draios/falco maintainers: - - name: Néstor Salceda + - name: nestorsalceda email: nestor.salceda@sysdig.com diff --git a/stable/falco/values.yaml b/stable/falco/values.yaml index df66331aecbc..c3f815951132 100644 --- a/stable/falco/values.yaml +++ b/stable/falco/values.yaml @@ -38,9 +38,9 @@ falco: # The location of the rules file(s). This can contain one or more paths to # separate rules files. rulesFile: - - /etc/falco/falco_rules.yaml - - /etc/falco/falco_rules.local.yaml - - /etc/falco/rules.d + - /etc/falco/falco_rules.yaml + - /etc/falco/falco_rules.local.yaml + - /etc/falco/rules.d # Whether to output events in json or text jsonOutput: false From 6a543597c1cc23d7b9e8599660eff298868ce7db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?N=C3=A9stor=20Salceda?= Date: Fri, 1 Jun 2018 08:56:13 +0200 Subject: [PATCH 03/14] Add appVersion to Chart.yaml --- stable/falco/Chart.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/stable/falco/Chart.yaml b/stable/falco/Chart.yaml index 9c0d296a1bdd..bcaa654cfa2a 100644 --- a/stable/falco/Chart.yaml +++ b/stable/falco/Chart.yaml @@ -1,6 +1,7 @@ apiVersion: v1 name: falco version: '0.1' +appVersion: 0.10.0 description: Sysdig Falco keywords: - monitoring From 9721844257e0c2e740d5840a8a3ce99009d64e7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?N=C3=A9stor=20Salceda?= Date: Mon, 4 Jun 2018 13:24:38 +0200 Subject: [PATCH 04/14] Specify container resources --- stable/falco/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/stable/falco/README.md b/stable/falco/README.md index 0940755ba36d..79adf7823eb1 100644 --- a/stable/falco/README.md +++ b/stable/falco/README.md @@ -48,6 +48,7 @@ The following table lists the configurable parameters of the Falco chart and the | `image.repository` | The image repository to pull from | `sysdig/falco` | | `image.tag` | The image tag to pull | `latest` | | `image.pullPolicy` | The image pull policy | `Always` | +| `resources` | Specify container resources | `{}` | | `rbac.create` | If true, create & use RBAC resources | `true` | | `rbac.serviceAccountName` | If rbac.create is false, use this value as serviceAccountName | `default` | | `deployment.enabled` | Run falco-event-generator for sample events | `false` | From e79e38f1c91795a73fe093597e1fcc39a722fc1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?N=C3=A9stor=20Salceda?= Date: Tue, 5 Jun 2018 17:41:49 +0200 Subject: [PATCH 05/14] Allow to load external Falco rules --- stable/falco/README.md | 62 +++++++++++++++++++++ stable/falco/templates/configmap-rules.yaml | 10 ++++ stable/falco/templates/daemonset.yaml | 5 ++ 3 files changed, 77 insertions(+) create mode 100644 stable/falco/templates/configmap-rules.yaml diff --git a/stable/falco/README.md b/stable/falco/README.md index 79adf7823eb1..2773db6acbc2 100644 --- a/stable/falco/README.md +++ b/stable/falco/README.md @@ -89,3 +89,65 @@ $ helm install --name my-release -f values.yaml stable/falco ``` > **Tip**: You can use the default [values.yaml](values.yaml) + +## Loading custom rules + +Falco ships with a nice default ruleset. Is a good starting point but sooner or later we are going to need to add custom rules which fits our needs. + +A few days ago [we published several rules](https://github.com/draios/falco-extras) for well known container images. + +So the question is: How we can load custom rules in our Falco deployment? + +```bash +$ helm install --name falco stable/falco +``` + +When deploying Falco using this chart, a configmap which holds rules was created and mounted in our containers, so we can edit it: + +```bash +$ kubectl edit configmap falco-rules +``` + +Note that configmap name is composed with deployment name and '-rules' suffix. + +And we add the data section: + +```yaml +data: + rules-traefik.yaml: | + - macro: traefik_consider_syscalls + condition: (evt.num < 0) + + - macro: app_traefik + condition: container and container.image startswith "traefik" + + # Restricting listening ports to selected set + + - list: traefik_allowed_inbound_ports_tcp + items: [443, 80, 8080] + + - rule: Unexpected inbound tcp connection traefik + desc: Detect inbound traffic to traefik using tcp on a port outside of expected set + condition: inbound and evt.rawres >= 0 and not fd.sport in (traefik_allowed_inbound_ports_tcp) and app_traefik + output: Inbound network connection to traefik on unexpected port (command=%proc.cmdline pid=%proc.pid connection=%fd.name sport=%fd.sport user=%user.name %container.info image=%container.image) + priority: NOTICE + + # Restricting spawned processes to selected set + + - list: traefik_allowed_processes + items: ["traefik"] + + - rule: Unexpected spawned process traefik + desc: Detect a process started in a traefik container outside of an expected set + condition: spawned_process and not proc.name in (traefik_allowed_processes) and app_traefik + output: Unexpected process spawned in traefik container (command=%proc.cmdline pid=%proc.pid user=%user.name %container.info image=%container.image) + priority: NOTICE +``` + +And the configmap is updated with the rules-traefik.yaml key. Next step is to refresh our pods, and we will see in the logs something like: + +```bash +Tue Jun 5 15:08:57 2018: Loading rules from file /etc/falco/rules.d/rules-traefik.yaml: +``` + +Finally our new file has been loaded and is ready to help us. diff --git a/stable/falco/templates/configmap-rules.yaml b/stable/falco/templates/configmap-rules.yaml new file mode 100644 index 000000000000..50f54e8ac2ae --- /dev/null +++ b/stable/falco/templates/configmap-rules.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "falco.fullname" . }}-rules + labels: + app: {{ template "falco.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +data: {} diff --git a/stable/falco/templates/daemonset.yaml b/stable/falco/templates/daemonset.yaml index 5d102dd7fec2..fcc47632bb05 100644 --- a/stable/falco/templates/daemonset.yaml +++ b/stable/falco/templates/daemonset.yaml @@ -50,6 +50,8 @@ spec: readOnly: true - mountPath: /etc/falco name: config-volume + - mountPath: /etc/falco/rules.d + name: rules-volume volumes: - name: dshm emptyDir: @@ -82,6 +84,9 @@ spec: path: falco_rules.yaml - key: falco_rules.local.yaml path: falco_rules.local.yaml + - name: rules-volume + configMap: + name: {{ template "falco.fullname" . }}-rules updateStrategy: type: {{ default "OnDelete" .Values.daemonset.updateStrategy | quote }} From 25d98c3cc1204e0e13acbe295d753508957dd3d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?N=C3=A9stor=20Salceda?= Date: Wed, 6 Jun 2018 14:04:55 +0200 Subject: [PATCH 06/14] Move GCSCC integrations to a top level integrations section We can correlate falco.* keys for falco related settings, and refer them in Falco Wiki --- stable/falco/README.md | 64 +++++++++++++-------------- stable/falco/templates/configmap.yaml | 6 +-- stable/falco/values.yaml | 3 +- 3 files changed, 37 insertions(+), 36 deletions(-) diff --git a/stable/falco/README.md b/stable/falco/README.md index 2773db6acbc2..9c9934cfc2e1 100644 --- a/stable/falco/README.md +++ b/stable/falco/README.md @@ -43,38 +43,38 @@ The command removes all the Kubernetes components associated with the chart and The following table lists the configurable parameters of the Falco chart and their default values. -| Parameter | Description | Default | -| --- | --- | --- | -| `image.repository` | The image repository to pull from | `sysdig/falco` | -| `image.tag` | The image tag to pull | `latest` | -| `image.pullPolicy` | The image pull policy | `Always` | -| `resources` | Specify container resources | `{}` | -| `rbac.create` | If true, create & use RBAC resources | `true` | -| `rbac.serviceAccountName` | If rbac.create is false, use this value as serviceAccountName | `default` | -| `deployment.enabled` | Run falco-event-generator for sample events | `false` | -| `deployment.replicas` | How many replicas of falco-event-generator to run | `1` | -| `falco.rulesFile` | The location of the rules files | `[/etc/falco/falco_rules.yaml, /etc/falco/falco_rules.local.yaml, /etc/falco/rules.d]` | -| `falco.jsonOutput` | Output events in json or text | `false` | -| `falco.jsonIncludeOutputProperty` | Include output property in json output | `true` | -| `falco.logStderr` | Send Falco debugging information logs to stderr | `true` | -| `falco.logSyslog` | Send Falco debugging information logs to syslog | `true` | -| `falco.logLevel` | The minimum level of Falco debugging information to include in logs | `info` | -| `falco.priority` | The minimum rule priority level to load an run | `debug` | -| `falco.bufferedOutputs` | Use buffered outputs to channels | `false` | -| `falco.outputs.rate` | Number of tokens gained per second | `1` | -| `falco.outputs.maxBurst` | Maximum number of tokens outstanding | `1000` | -| `falco.syslogOutput.enabled` | Enable syslog output for security notifications | `true` | -| `falco.fileOutput.enabled` | Enable file output for security notifications | `false` | -| `falco.fileOutput.keepAlive` | Open file once or every time a new notification arrives | `false` | -| `falco.fileOutput.filename` | The filename for logging notifications | `./events.txt` | -| `falco.stdoutOutput.enabled` | Enable stdout output for security notifications | `true` | -| `falco.programOutput.enabled` | Enable program output for security notifications | `false` | -| `falco.programOutput.keepAlive` | Start the program once or re-spawn when a notification arrives | `false` | -| `falco.programOutput.program` | Command to execute for program output | `mail -s "Falco Notification" someone@example.com` | -| `falco.gcsccIntegration.enabled` | Enable Google Cloud Security Command Center integration | `false` | -| `falco.gcsccIntegration.webhookUrl` | The URL where sysdig-gcscc-connector webhook is listening | `http://sysdig-gcscc-connector.default.svc.cluster.local:8080/events` | -| `falco.gcsccIntegration.webhookAuthenticationToken` | Token used for authentication and webhook | `b27511f86e911f20b9e0f9c8104b4ec4` | -| `tolerations` | The tolerations for scheduling | `node-role.kubernetes.io/master:NoSchedule` | +| Parameter | Description | Default | +| --- | --- | --- | +| `image.repository` | The image repository to pull from | `sysdig/falco` | +| `image.tag` | The image tag to pull | `latest` | +| `image.pullPolicy` | The image pull policy | `Always` | +| `resources` | Specify container resources | `{}` | +| `rbac.create` | If true, create & use RBAC resources | `true` | +| `rbac.serviceAccountName` | If rbac.create is false, use this value as serviceAccountName | `default` | +| `deployment.enabled` | Run falco-event-generator for sample events | `false` | +| `deployment.replicas` | How many replicas of falco-event-generator to run | `1` | +| `falco.rulesFile` | The location of the rules files | `[/etc/falco/falco_rules.yaml, /etc/falco/falco_rules.local.yaml, /etc/falco/rules.d]` | +| `falco.jsonOutput` | Output events in json or text | `false` | +| `falco.jsonIncludeOutputProperty` | Include output property in json output | `true` | +| `falco.logStderr` | Send Falco debugging information logs to stderr | `true` | +| `falco.logSyslog` | Send Falco debugging information logs to syslog | `true` | +| `falco.logLevel` | The minimum level of Falco debugging information to include in logs | `info` | +| `falco.priority` | The minimum rule priority level to load an run | `debug` | +| `falco.bufferedOutputs` | Use buffered outputs to channels | `false` | +| `falco.outputs.rate` | Number of tokens gained per second | `1` | +| `falco.outputs.maxBurst` | Maximum number of tokens outstanding | `1000` | +| `falco.syslogOutput.enabled` | Enable syslog output for security notifications | `true` | +| `falco.fileOutput.enabled` | Enable file output for security notifications | `false` | +| `falco.fileOutput.keepAlive` | Open file once or every time a new notification arrives | `false` | +| `falco.fileOutput.filename` | The filename for logging notifications | `./events.txt` | +| `falco.stdoutOutput.enabled` | Enable stdout output for security notifications | `true` | +| `falco.programOutput.enabled` | Enable program output for security notifications | `false` | +| `falco.programOutput.keepAlive` | Start the program once or re-spawn when a notification arrives | `false` | +| `falco.programOutput.program` | Command to execute for program output | `mail -s "Falco Notification" someone@example.com` | +| `integrations.gcscc.enabled` | Enable Google Cloud Security Command Center integration | `false` | +| `integrations.gcscc.webhookUrl` | The URL where sysdig-gcscc-connector webhook is listening | `http://sysdig-gcscc-connector.default.svc.cluster.local:8080/events` | +| `integrations.gcscc.webhookAuthenticationToken` | Token used for authentication and webhook | `b27511f86e911f20b9e0f9c8104b4ec4` | +| `tolerations` | The tolerations for scheduling | `node-role.kubernetes.io/master:NoSchedule` | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, diff --git a/stable/falco/templates/configmap.yaml b/stable/falco/templates/configmap.yaml index bf4a2c2f9ac0..1717ff183154 100644 --- a/stable/falco/templates/configmap.yaml +++ b/stable/falco/templates/configmap.yaml @@ -27,7 +27,7 @@ data: {{- end }} # Whether to output events in json or text - {{- if .Values.falco.gcsccIntegration.enabled }} + {{- if .Values.integrations.gcscc.enabled }} json_output: true {{- else }} json_output: {{ .Values.falco.jsonOutput }} @@ -111,11 +111,11 @@ data: # # Also, the program will be closed and reopened if falco is signaled with # SIGUSR1. - {{- if .Values.falco.gcsccIntegration.enabled }} + {{- if .Values.integrations.gcscc.enabled }} programOutput: enabled: true keep_alive: false - program: "\"curl -d @- -X POST --header 'Content-Type: application/json' --header 'Authorization: {{ .Values.falco.gcsccIntegration.webhookAuthenticationToken }}' {{ .Values.falco.gcsccIntegration.webhookUrl }}/\"" + program: "\"curl -d @- -X POST --header 'Content-Type: application/json' --header 'Authorization: {{ .Values.integrations.gcscc.webhookAuthenticationToken }}' {{ .Values.integrations.gcscc.webhookUrl }}/\"" {{- else }} program_output: enabled: {{ .Values.falco.programOutput.enabled }} diff --git a/stable/falco/values.yaml b/stable/falco/values.yaml index c3f815951132..9f3950f6a053 100644 --- a/stable/falco/values.yaml +++ b/stable/falco/values.yaml @@ -133,7 +133,8 @@ falco: # enabled: true # keep_alive: false # program: "\"curl -d @- -X POST --header 'Content-Type: application/json' --header 'Authorization: authentication_token' url \"" - gcsccIntegration: +integrations: + gcscc: enabled: false webhookUrl: http://sysdig-gcscc-connector.default.svc.cluster.local:8080/events webhookAuthenticationToken: b27511f86e911f20b9e0f9c8104b4ec4 From 1587e7bf61770cfbb4ad1874998203337c2642cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?N=C3=A9stor=20Salceda?= Date: Tue, 12 Jun 2018 10:24:40 +0200 Subject: [PATCH 07/14] Rename deployment to fakeEventGenerator First one is too generic --- stable/falco/README.md | 4 ++-- stable/falco/templates/deployment.yaml | 4 ++-- stable/falco/values.yaml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/stable/falco/README.md b/stable/falco/README.md index 9c9934cfc2e1..de18b5a7a6ed 100644 --- a/stable/falco/README.md +++ b/stable/falco/README.md @@ -51,8 +51,8 @@ The following table lists the configurable parameters of the Falco chart and the | `resources` | Specify container resources | `{}` | | `rbac.create` | If true, create & use RBAC resources | `true` | | `rbac.serviceAccountName` | If rbac.create is false, use this value as serviceAccountName | `default` | -| `deployment.enabled` | Run falco-event-generator for sample events | `false` | -| `deployment.replicas` | How many replicas of falco-event-generator to run | `1` | +| `fakeEventGenerator.enabled` | Run falco-event-generator for sample events | `false` | +| `fakeEventGenerator.replicas` | How many replicas of falco-event-generator to run | `1` | | `falco.rulesFile` | The location of the rules files | `[/etc/falco/falco_rules.yaml, /etc/falco/falco_rules.local.yaml, /etc/falco/rules.d]` | | `falco.jsonOutput` | Output events in json or text | `false` | | `falco.jsonIncludeOutputProperty` | Include output property in json output | `true` | diff --git a/stable/falco/templates/deployment.yaml b/stable/falco/templates/deployment.yaml index fee41d272105..9a023c392cbb 100644 --- a/stable/falco/templates/deployment.yaml +++ b/stable/falco/templates/deployment.yaml @@ -1,4 +1,4 @@ -{{- if .Values.deployment.enabled }} +{{- if .Values.fakeEventGenerator.enabled }} apiVersion: extensions/v1beta1 kind: Deployment metadata: @@ -9,7 +9,7 @@ metadata: release: "{{ .Release.Name }}" heritage: "{{ .Release.Service }}" spec: - replicas: {{ .Values.deployment.replicas }} + replicas: {{ .Values.fakeEventGenerator.replicas }} template: metadata: labels: diff --git a/stable/falco/values.yaml b/stable/falco/values.yaml index 9f3950f6a053..e7e7bf2baf81 100644 --- a/stable/falco/values.yaml +++ b/stable/falco/values.yaml @@ -23,7 +23,7 @@ rbac: # Ignored if rbac.create is true serviceAccountName: default -deployment: +fakeEventGenerator: enabled: false replicas: 1 From a17b0e8dbbedfe382089444d042e9c32654aefc6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?N=C3=A9stor=20Salceda?= Date: Fri, 15 Jun 2018 17:04:35 +0200 Subject: [PATCH 08/14] Add OWNERS file --- stable/falco/OWNERS | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 stable/falco/OWNERS diff --git a/stable/falco/OWNERS b/stable/falco/OWNERS new file mode 100644 index 000000000000..cfcffba8c318 --- /dev/null +++ b/stable/falco/OWNERS @@ -0,0 +1,5 @@ +approvers: +- bencer +reviewers: +- bencer +- nestorsalceda From 986b74ec0f26aeefd0da3c059d9278f69b6bb51e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?N=C3=A9stor=20Salceda?= Date: Mon, 18 Jun 2018 10:34:15 +0200 Subject: [PATCH 09/14] Separate rbac and serviceAccount Follow RBAC best practices: https://github.com/kubernetes/helm/blob/master/docs/chart_best_practices/rbac.md --- stable/falco/README.md | 3 ++- stable/falco/templates/_helpers.tpl | 11 +++++++++++ stable/falco/templates/daemonset.yaml | 6 +----- stable/falco/templates/serviceaccount.yaml | 6 +++--- stable/falco/values.yaml | 8 ++++++-- 5 files changed, 23 insertions(+), 11 deletions(-) diff --git a/stable/falco/README.md b/stable/falco/README.md index de18b5a7a6ed..04381e16be90 100644 --- a/stable/falco/README.md +++ b/stable/falco/README.md @@ -50,7 +50,8 @@ The following table lists the configurable parameters of the Falco chart and the | `image.pullPolicy` | The image pull policy | `Always` | | `resources` | Specify container resources | `{}` | | `rbac.create` | If true, create & use RBAC resources | `true` | -| `rbac.serviceAccountName` | If rbac.create is false, use this value as serviceAccountName | `default` | +| `serviceAccount.create` | Create serviceAccount | `default` | +| `serviceAccount.name` | Use this value as serviceAccountName | ` ` | | `fakeEventGenerator.enabled` | Run falco-event-generator for sample events | `false` | | `fakeEventGenerator.replicas` | How many replicas of falco-event-generator to run | `1` | | `falco.rulesFile` | The location of the rules files | `[/etc/falco/falco_rules.yaml, /etc/falco/falco_rules.local.yaml, /etc/falco/rules.d]` | diff --git a/stable/falco/templates/_helpers.tpl b/stable/falco/templates/_helpers.tpl index c4d786e8b3af..71d84f9ff0e8 100644 --- a/stable/falco/templates/_helpers.tpl +++ b/stable/falco/templates/_helpers.tpl @@ -30,3 +30,14 @@ Create chart name and version as used by the chart label. {{- define "falco.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "falco.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "falco.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/stable/falco/templates/daemonset.yaml b/stable/falco/templates/daemonset.yaml index fcc47632bb05..4a60139eb6d3 100644 --- a/stable/falco/templates/daemonset.yaml +++ b/stable/falco/templates/daemonset.yaml @@ -15,11 +15,7 @@ spec: app: {{ template "falco.fullname" .}} role: security spec: -{{- if .Values.rbac.create }} - serviceAccountName: {{ template "falco.fullname" .}} -{{- else }} - serviceAccountName: {{ .Values.rbac.serviceAccountName }} -{{- end }} + serviceAccountName: {{ template "falco.serviceAccountName" .}} tolerations: {{ toYaml .Values.tolerations | indent 8 }} containers: diff --git a/stable/falco/templates/serviceaccount.yaml b/stable/falco/templates/serviceaccount.yaml index f329ecb17569..4c44aee61f83 100644 --- a/stable/falco/templates/serviceaccount.yaml +++ b/stable/falco/templates/serviceaccount.yaml @@ -1,10 +1,10 @@ -{{- if .Values.rbac.create }} +{{- if .Values.serviceAccount.create }} apiVersion: v1 kind: ServiceAccount metadata: - name: {{ template "falco.fullname" .}} + name: {{ template "falco.serviceAccountName" .}} labels: - app: {{ template "falco.fullname" . }} + app: {{ template "falco.serviceAccountName" . }} chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" release: "{{ .Release.Name }}" heritage: "{{ .Release.Service }}" diff --git a/stable/falco/values.yaml b/stable/falco/values.yaml index e7e7bf2baf81..54943fec2cc0 100644 --- a/stable/falco/values.yaml +++ b/stable/falco/values.yaml @@ -20,8 +20,12 @@ resources: {} rbac: # Create and use rbac resources create: true - # Ignored if rbac.create is true - serviceAccountName: default + +serviceAccount: + # Create and use serviceAccount resources + create: true + # Use this value as serviceAccountName + name: fakeEventGenerator: enabled: false From a100d55235b081ee78cb7ad7f067034f96b82b57 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?N=C3=A9stor=20Salceda?= Date: Mon, 18 Jun 2018 12:40:58 +0200 Subject: [PATCH 10/14] Use falco.serviceAccount name template for cluster role binding --- stable/falco/templates/clusterrolebinding.yaml | 2 +- stable/falco/templates/serviceaccount.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/stable/falco/templates/clusterrolebinding.yaml b/stable/falco/templates/clusterrolebinding.yaml index 3191e9a3d046..fd6dbee678b0 100644 --- a/stable/falco/templates/clusterrolebinding.yaml +++ b/stable/falco/templates/clusterrolebinding.yaml @@ -11,7 +11,7 @@ metadata: heritage: "{{ .Release.Service }}" subjects: - kind: ServiceAccount - name: {{ template "falco.fullname" .}} + name: {{ template "falco.serviceAccountName" .}} namespace: default roleRef: kind: ClusterRole diff --git a/stable/falco/templates/serviceaccount.yaml b/stable/falco/templates/serviceaccount.yaml index 4c44aee61f83..ede522f8bdde 100644 --- a/stable/falco/templates/serviceaccount.yaml +++ b/stable/falco/templates/serviceaccount.yaml @@ -4,7 +4,7 @@ kind: ServiceAccount metadata: name: {{ template "falco.serviceAccountName" .}} labels: - app: {{ template "falco.serviceAccountName" . }} + app: {{ template "falco.fullname" . }} chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" release: "{{ .Release.Name }}" heritage: "{{ .Release.Service }}" From dbf0ae154766c17f34d3d547ccc40e72e71375df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?N=C3=A9stor=20Salceda?= Date: Tue, 19 Jun 2018 11:57:45 +0200 Subject: [PATCH 11/14] Fixes required from reviewer --- stable/falco/README.md | 2 +- stable/falco/templates/clusterrolebinding.yaml | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/stable/falco/README.md b/stable/falco/README.md index 04381e16be90..c392a8f215e6 100644 --- a/stable/falco/README.md +++ b/stable/falco/README.md @@ -50,7 +50,7 @@ The following table lists the configurable parameters of the Falco chart and the | `image.pullPolicy` | The image pull policy | `Always` | | `resources` | Specify container resources | `{}` | | `rbac.create` | If true, create & use RBAC resources | `true` | -| `serviceAccount.create` | Create serviceAccount | `default` | +| `serviceAccount.create` | Create serviceAccount | `true` | | `serviceAccount.name` | Use this value as serviceAccountName | ` ` | | `fakeEventGenerator.enabled` | Run falco-event-generator for sample events | `false` | | `fakeEventGenerator.replicas` | How many replicas of falco-event-generator to run | `1` | diff --git a/stable/falco/templates/clusterrolebinding.yaml b/stable/falco/templates/clusterrolebinding.yaml index fd6dbee678b0..50ccb056fa3c 100644 --- a/stable/falco/templates/clusterrolebinding.yaml +++ b/stable/falco/templates/clusterrolebinding.yaml @@ -3,7 +3,6 @@ kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: {{ template "falco.fullname" .}} - namespace: default labels: app: {{ template "falco.fullname" . }} chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" @@ -12,7 +11,7 @@ metadata: subjects: - kind: ServiceAccount name: {{ template "falco.serviceAccountName" .}} - namespace: default + namespace: {{ .Release.Namespace }} roleRef: kind: ClusterRole name: {{ template "falco.fullname" .}} From b9158a5559aaea743d90f88a36a4347c977715ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?N=C3=A9stor=20Salceda?= Date: Mon, 25 Jun 2018 10:23:50 +0200 Subject: [PATCH 12/14] Allow passing rules in an external file instead of editing configMap by hand --- stable/falco/README.md | 44 +++++++++++++++------ stable/falco/templates/configmap-rules.yaml | 8 +++- stable/falco/templates/daemonset.yaml | 4 ++ stable/falco/values.yaml | 11 ++++++ 4 files changed, 53 insertions(+), 14 deletions(-) diff --git a/stable/falco/README.md b/stable/falco/README.md index c392a8f215e6..d3dbd5e03a62 100644 --- a/stable/falco/README.md +++ b/stable/falco/README.md @@ -72,6 +72,7 @@ The following table lists the configurable parameters of the Falco chart and the | `falco.programOutput.enabled` | Enable program output for security notifications | `false` | | `falco.programOutput.keepAlive` | Start the program once or re-spawn when a notification arrives | `false` | | `falco.programOutput.program` | Command to execute for program output | `mail -s "Falco Notification" someone@example.com` | +| `customRules` | Third party rules enabled for Falco | `{}` | | `integrations.gcscc.enabled` | Enable Google Cloud Security Command Center integration | `false` | | `integrations.gcscc.webhookUrl` | The URL where sysdig-gcscc-connector webhook is listening | `http://sysdig-gcscc-connector.default.svc.cluster.local:8080/events` | | `integrations.gcscc.webhookAuthenticationToken` | Token used for authentication and webhook | `b27511f86e911f20b9e0f9c8104b4ec4` | @@ -99,23 +100,17 @@ A few days ago [we published several rules](https://github.com/draios/falco-extr So the question is: How we can load custom rules in our Falco deployment? -```bash -$ helm install --name falco stable/falco -``` - -When deploying Falco using this chart, a configmap which holds rules was created and mounted in our containers, so we can edit it: +We are going to create a file which contains custom rules so that we can keep it in a Git repository. ```bash -$ kubectl edit configmap falco-rules +$ cat custom-rules.yaml ``` -Note that configmap name is composed with deployment name and '-rules' suffix. - -And we add the data section: +And the file looks like this one: ```yaml -data: - rules-traefik.yaml: | +customRules: + rules-traefik.yaml: |- - macro: traefik_consider_syscalls condition: (evt.num < 0) @@ -145,10 +140,33 @@ data: priority: NOTICE ``` -And the configmap is updated with the rules-traefik.yaml key. Next step is to refresh our pods, and we will see in the logs something like: +So next step is to use the custom-rules.yaml file for installing the Falco Helm chart. + +```bash +$ helm install --name falco -f custom-rules.yaml stable/falco +``` + +And we will see in our logs something like: ```bash Tue Jun 5 15:08:57 2018: Loading rules from file /etc/falco/rules.d/rules-traefik.yaml: ``` -Finally our new file has been loaded and is ready to help us. +And this means that our Falco installation has loaded the rules and is ready to help us. + +### Automating the generation of custom-rules.yaml file + +Sometimes edit YAML files with multistrings is a bit error prone, so we added an script for automating this step and make your life easier. + +This script lives in [falco-extras repository](https://github.com/draios/falco-extras) in the scripts directory. + +Imagine that you would like to add rules for your Redis, MongoDB and Traefik containers, you have to: + +```bash +$ git clone https://github.com/draios/falco-extras.git +$ cd falco-extras +$ ./scripts/rules2helm rules/rules-mongo.yaml rules/rules-redis.yaml rules/rules-traefik.yaml > custom-rules.yaml +$ helm install --name falco -f custom-rules.yaml stable/falco +``` + +And that's all, in a few seconds you will see your pods up and running with MongoDB, Redis and Traefik rules enabled. diff --git a/stable/falco/templates/configmap-rules.yaml b/stable/falco/templates/configmap-rules.yaml index 50f54e8ac2ae..e1fd46bf26af 100644 --- a/stable/falco/templates/configmap-rules.yaml +++ b/stable/falco/templates/configmap-rules.yaml @@ -1,3 +1,4 @@ +{{- if .Values.customRules }} apiVersion: v1 kind: ConfigMap metadata: @@ -7,4 +8,9 @@ metadata: chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" release: "{{ .Release.Name }}" heritage: "{{ .Release.Service }}" -data: {} +data: +{{- range $file, $content := .Values.customRules }} + {{ $file }}: |- +{{ $content | indent 4}} +{{- end }} +{{- end }} diff --git a/stable/falco/templates/daemonset.yaml b/stable/falco/templates/daemonset.yaml index 4a60139eb6d3..839c89bcb5b1 100644 --- a/stable/falco/templates/daemonset.yaml +++ b/stable/falco/templates/daemonset.yaml @@ -46,8 +46,10 @@ spec: readOnly: true - mountPath: /etc/falco name: config-volume + {{- if .Values.customRules }} - mountPath: /etc/falco/rules.d name: rules-volume + {{- end }} volumes: - name: dshm emptyDir: @@ -80,9 +82,11 @@ spec: path: falco_rules.yaml - key: falco_rules.local.yaml path: falco_rules.local.yaml + {{- if .Values.customRules }} - name: rules-volume configMap: name: {{ template "falco.fullname" . }}-rules + {{- end }} updateStrategy: type: {{ default "OnDelete" .Values.daemonset.updateStrategy | quote }} diff --git a/stable/falco/values.yaml b/stable/falco/values.yaml index 54943fec2cc0..06a3b70a4ad4 100644 --- a/stable/falco/values.yaml +++ b/stable/falco/values.yaml @@ -130,6 +130,17 @@ falco: keepAlive: false program: mail -s "Falco Notification" someone@example.com +customRules: {} + # Although Falco comes with a nice default rule set for detecting weird + # behavior in containers, our users are going to customize the run-time + # security rule sets or policies for the specific container images and + # applications they run. This feature can be handled in this section. + # + # Example: + # + # rules-traefik.yaml: |- + # [ rule body ] + # If Google Cloud Security Command Center integration is enabled, falco will # be configured to use this integration as program_output and sets the following values: # * json_output: true From d759a0d92f5bb63b93f21006a32a4520b6a9abbd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?N=C3=A9stor=20Salceda?= Date: Mon, 2 Jul 2018 19:28:51 +0200 Subject: [PATCH 13/14] Remove quotes from Chart version I'm not sure if this break lint stage in CircleCI --- stable/falco/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stable/falco/Chart.yaml b/stable/falco/Chart.yaml index bcaa654cfa2a..b9fa5b0ca1e7 100644 --- a/stable/falco/Chart.yaml +++ b/stable/falco/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v1 name: falco -version: '0.1' +version: 0.1 appVersion: 0.10.0 description: Sysdig Falco keywords: From 98eb0d6fa6a73bbdb0473b97585e3e8f0a32bc03 Mon Sep 17 00:00:00 2001 From: Lachlan Evenson Date: Tue, 3 Jul 2018 16:02:12 -0700 Subject: [PATCH 14/14] Update Chart.yaml --- stable/falco/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stable/falco/Chart.yaml b/stable/falco/Chart.yaml index b9fa5b0ca1e7..3daa1997f98a 100644 --- a/stable/falco/Chart.yaml +++ b/stable/falco/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v1 name: falco -version: 0.1 +version: 0.1.0 appVersion: 0.10.0 description: Sysdig Falco keywords: