From c2b5e1d4128fdfff56645ca760a77c3cabc4c0a6 Mon Sep 17 00:00:00 2001 From: ofek Date: Fri, 11 Jun 2021 13:47:11 +0000 Subject: [PATCH] Add runtime configuration validation (#8960) * Sync config models * address 231435d118b1d4ed4df00137e93816894913bdc2 --- meta/status/index.html | 2 +- search/search_index.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/meta/status/index.html b/meta/status/index.html index 117bc6823e401..f746a86b48abc 100644 --- a/meta/status/index.html +++ b/meta/status/index.html @@ -1 +1 @@ - Status - Agent Integrations
Skip to content

Status


Dashboards

78.91%

Completed 116/147
  • active_directory
  • activemq
  • activemq_xml
  • aerospike
  • airflow
  • amazon_msk
  • ambari
  • apache
  • aspdotnet
  • azure_active_directory
  • azure_iot_edge
  • btrfs
  • cacti
  • cassandra
  • cassandra_nodetool
  • ceph
  • cilium
  • cisco_aci
  • clickhouse
  • cockroachdb
  • confluent_platform
  • consul
  • consul_connect
  • containerd
  • coredns
  • couch
  • couchbase
  • cri
  • crio
  • databricks
  • directory
  • disk
  • dotnetclr
  • druid
  • ecs_fargate
  • eks_fargate
  • elastic
  • envoy
  • etcd
  • exchange_server
  • external_dns
  • flink
  • fluentd
  • gearmand
  • gitlab
  • gitlab_runner
  • glusterfs
  • go_expvar
  • gunicorn
  • haproxy
  • harbor
  • hazelcast
  • hdfs_datanode
  • hdfs_namenode
  • hive
  • hivemq
  • http_check
  • hyperv
  • ibm_db2
  • ibm_mq
  • ibm_was
  • ignite
  • iis
  • istio
  • jboss_wildfly
  • jmeter
  • journald
  • kafka
  • kafka_consumer
  • kong
  • kube_apiserver_metrics
  • kube_controller_manager
  • kube_dns
  • kube_metrics_server
  • kube_proxy
  • kube_scheduler
  • kubelet
  • kubernetes
  • kubernetes_state
  • kyototycoon
  • lighttpd
  • linkerd
  • linux_proc_extras
  • mapr
  • mapreduce
  • marathon
  • marklogic
  • mcache
  • mesos_master
  • mesos_slave
  • mongo
  • mysql
  • nagios
  • network
  • nfsstat
  • nginx
  • nginx_ingress_controller
  • nvidia_jetson
  • oom_kill
  • openldap
  • openshift
  • openstack
  • openstack_controller
  • oracle
  • otel
  • pan_firewall
  • pgbouncer
  • php_fpm
  • postfix
  • postgres
  • powerdns_recursor
  • presto
  • process
  • proxysql
  • rabbitmq
  • redisdb
  • rethinkdb
  • riak
  • riakcs
  • sap_hana
  • scylla
  • sidekiq
  • snowflake
  • solr
  • sonarqube
  • spark
  • sqlserver
  • squid
  • statsd
  • system_core
  • systemd
  • tcp_check
  • tls
  • tokumx
  • tomcat
  • twemproxy
  • twistlock
  • varnish
  • vault
  • vertica
  • voltdb
  • vsphere
  • yarn
  • zk

Logs support

92.73%

Completed 102/110
  • active_directory
  • activemq
  • activemq_xml
  • aerospike
  • airflow
  • amazon_msk
  • ambari
  • apache
  • aspdotnet
  • azure_iot_edge
  • cacti
  • cassandra
  • cassandra_nodetool
  • ceph
  • cilium
  • cisco_aci
  • clickhouse
  • cloud_foundry_api
  • cockroachdb
  • confluent_platform
  • consul
  • coredns
  • couch
  • couchbase
  • crio
  • druid
  • ecs_fargate
  • eks_fargate
  • elastic
  • envoy
  • etcd
  • exchange_server
  • flink
  • fluentd
  • gearmand
  • gitlab
  • gitlab_runner
  • glusterfs
  • gunicorn
  • haproxy
  • harbor
  • hazelcast
  • hdfs_datanode
  • hdfs_namenode
  • hive
  • hivemq
  • hyperv
  • ibm_db2
  • ibm_mq
  • ibm_was
  • ignite
  • iis
  • istio
  • jboss_wildfly
  • journald
  • kafka
  • kafka_consumer
  • kong
  • kyototycoon
  • lighttpd
  • linkerd
  • mapr
  • mapreduce
  • marathon
  • marklogic
  • mcache
  • mesos_master
  • mesos_slave
  • mongo
  • mysql
  • nagios
  • nfsstat
  • nginx
  • nginx_ingress_controller
  • openldap
  • openstack
  • openstack_controller
  • pgbouncer
  • php_fpm
  • postfix
  • postgres
  • powerdns_recursor
  • presto
  • proxysql
  • rabbitmq
  • redisdb
  • rethinkdb
  • riak
  • scylla
  • sidekiq
  • solr
  • sonarqube
  • spark
  • sqlserver
  • squid
  • statsd
  • supervisord
  • teamcity
  • tenable
  • tomcat
  • twemproxy
  • twistlock
  • varnish
  • vault
  • vertica
  • voltdb
  • vsphere
  • win32_event_log
  • yarn
  • zk

17.48%

Completed 25/143
  • active_directory
  • activemq
  • activemq_xml
  • aerospike
  • airflow
  • amazon_msk
  • ambari
  • apache
  • aspdotnet
  • azure_iot_edge
  • btrfs
  • cacti
  • cassandra
  • cassandra_nodetool
  • ceph
  • cilium
  • cisco_aci
  • clickhouse
  • cloud_foundry_api
  • cockroachdb
  • confluent_platform
  • consul
  • coredns
  • couch
  • couchbase
  • crio
  • directory
  • dns_check
  • dotnetclr
  • druid
  • ecs_fargate
  • eks_fargate
  • elastic
  • envoy
  • etcd
  • exchange_server
  • external_dns
  • flink
  • fluentd
  • gearmand
  • gitlab
  • gitlab_runner
  • glusterfs
  • go_expvar
  • gunicorn
  • haproxy
  • harbor
  • hazelcast
  • hdfs_datanode
  • hdfs_namenode
  • hive
  • hivemq
  • http_check
  • hyperv
  • ibm_db2
  • ibm_mq
  • ibm_was
  • ignite
  • iis
  • istio
  • jboss_wildfly
  • journald
  • kafka
  • kafka_consumer
  • kong
  • kube_apiserver_metrics
  • kube_controller_manager
  • kube_dns
  • kube_metrics_server
  • kube_proxy
  • kube_scheduler
  • kubelet
  • kubernetes_state
  • kyototycoon
  • lighttpd
  • linkerd
  • linux_proc_extras
  • mapr
  • mapreduce
  • marathon
  • marklogic
  • mcache
  • mesos_master
  • mesos_slave
  • mongo
  • mysql
  • nagios
  • nfsstat
  • nginx
  • nginx_ingress_controller
  • openldap
  • openmetrics
  • openstack
  • openstack_controller
  • oracle
  • pdh_check
  • pgbouncer
  • php_fpm
  • postfix
  • postgres
  • powerdns_recursor
  • presto
  • process
  • prometheus
  • proxysql
  • rabbitmq
  • redisdb
  • rethinkdb
  • riak
  • riakcs
  • sap_hana
  • scylla
  • sidekiq
  • snmp
  • snowflake
  • solr
  • sonarqube
  • spark
  • sqlserver
  • squid
  • ssh_check
  • statsd
  • supervisord
  • system_core
  • system_swap
  • tcp_check
  • teamcity
  • tenable
  • tls
  • tokumx
  • tomcat
  • twemproxy
  • twistlock
  • varnish
  • vault
  • vertica
  • voltdb
  • vsphere
  • win32_event_log
  • windows_service
  • wmi_check
  • yarn
  • zk

Config specs

94.41%

Completed 135/143
  • active_directory
  • activemq
  • activemq_xml
  • aerospike
  • airflow
  • amazon_msk
  • ambari
  • apache
  • aspdotnet
  • azure_iot_edge
  • btrfs
  • cacti
  • cassandra
  • cassandra_nodetool
  • ceph
  • cilium
  • cisco_aci
  • clickhouse
  • cloud_foundry_api
  • cockroachdb
  • confluent_platform
  • consul
  • coredns
  • couch
  • couchbase
  • crio
  • directory
  • dns_check
  • dotnetclr
  • druid
  • ecs_fargate
  • eks_fargate
  • elastic
  • envoy
  • etcd
  • exchange_server
  • external_dns
  • flink
  • fluentd
  • gearmand
  • gitlab
  • gitlab_runner
  • glusterfs
  • go_expvar
  • gunicorn
  • haproxy
  • harbor
  • hazelcast
  • hdfs_datanode
  • hdfs_namenode
  • hive
  • hivemq
  • http_check
  • hyperv
  • ibm_db2
  • ibm_mq
  • ibm_was
  • ignite
  • iis
  • istio
  • jboss_wildfly
  • journald
  • kafka
  • kafka_consumer
  • kong
  • kube_apiserver_metrics
  • kube_controller_manager
  • kube_dns
  • kube_metrics_server
  • kube_proxy
  • kube_scheduler
  • kubelet
  • kubernetes_state
  • kyototycoon
  • lighttpd
  • linkerd
  • linux_proc_extras
  • mapr
  • mapreduce
  • marathon
  • marklogic
  • mcache
  • mesos_master
  • mesos_slave
  • mongo
  • mysql
  • nagios
  • nfsstat
  • nginx
  • nginx_ingress_controller
  • openldap
  • openmetrics
  • openstack
  • openstack_controller
  • oracle
  • pdh_check
  • pgbouncer
  • php_fpm
  • postfix
  • postgres
  • powerdns_recursor
  • presto
  • process
  • prometheus
  • proxysql
  • rabbitmq
  • redisdb
  • rethinkdb
  • riak
  • riakcs
  • sap_hana
  • scylla
  • sidekiq
  • snmp
  • snowflake
  • solr
  • sonarqube
  • spark
  • sqlserver
  • squid
  • ssh_check
  • statsd
  • supervisord
  • system_core
  • system_swap
  • tcp_check
  • teamcity
  • tenable
  • tls
  • tokumx
  • tomcat
  • twemproxy
  • twistlock
  • varnish
  • vault
  • vertica
  • voltdb
  • vsphere
  • win32_event_log
  • windows_service
  • wmi_check
  • yarn
  • zk

Docs specs

0.70%

Completed 1/143
  • active_directory
  • activemq
  • activemq_xml
  • aerospike
  • airflow
  • amazon_msk
  • ambari
  • apache
  • aspdotnet
  • azure_iot_edge
  • btrfs
  • cacti
  • cassandra
  • cassandra_nodetool
  • ceph
  • cilium
  • cisco_aci
  • clickhouse
  • cloud_foundry_api
  • cockroachdb
  • confluent_platform
  • consul
  • coredns
  • couch
  • couchbase
  • crio
  • directory
  • dns_check
  • dotnetclr
  • druid
  • ecs_fargate
  • eks_fargate
  • elastic
  • envoy
  • etcd
  • exchange_server
  • external_dns
  • flink
  • fluentd
  • gearmand
  • gitlab
  • gitlab_runner
  • glusterfs
  • go_expvar
  • gunicorn
  • haproxy
  • harbor
  • hazelcast
  • hdfs_datanode
  • hdfs_namenode
  • hive
  • hivemq
  • http_check
  • hyperv
  • ibm_db2
  • ibm_mq
  • ibm_was
  • ignite
  • iis
  • istio
  • jboss_wildfly
  • journald
  • kafka
  • kafka_consumer
  • kong
  • kube_apiserver_metrics
  • kube_controller_manager
  • kube_dns
  • kube_metrics_server
  • kube_proxy
  • kube_scheduler
  • kubelet
  • kubernetes_state
  • kyototycoon
  • lighttpd
  • linkerd
  • linux_proc_extras
  • mapr
  • mapreduce
  • marathon
  • marklogic
  • mcache
  • mesos_master
  • mesos_slave
  • mongo
  • mysql
  • nagios
  • nfsstat
  • nginx
  • nginx_ingress_controller
  • openldap
  • openmetrics
  • openstack
  • openstack_controller
  • oracle
  • pdh_check
  • pgbouncer
  • php_fpm
  • postfix
  • postgres
  • powerdns_recursor
  • presto
  • process
  • prometheus
  • proxysql
  • rabbitmq
  • redisdb
  • rethinkdb
  • riak
  • riakcs
  • sap_hana
  • scylla
  • sidekiq
  • snmp
  • snowflake
  • solr
  • sonarqube
  • spark
  • sqlserver
  • squid
  • ssh_check
  • statsd
  • supervisord
  • system_core
  • system_swap
  • tcp_check
  • teamcity
  • tenable
  • tls
  • tokumx
  • tomcat
  • twemproxy
  • twistlock
  • varnish
  • vault
  • vertica
  • voltdb
  • vsphere
  • win32_event_log
  • windows_service
  • wmi_check
  • yarn
  • zk

E2E tests

77.86%

Completed 109/140
  • active_directory
  • activemq
  • activemq_xml
  • aerospike
  • airflow
  • amazon_msk
  • ambari
  • apache
  • aspdotnet
  • azure_iot_edge
  • btrfs
  • cacti
  • cassandra
  • cassandra_nodetool
  • ceph
  • cilium
  • cisco_aci
  • clickhouse
  • cloud_foundry_api
  • cockroachdb
  • confluent_platform
  • consul
  • coredns
  • couch
  • couchbase
  • crio
  • directory
  • dns_check
  • dotnetclr
  • druid
  • ecs_fargate
  • eks_fargate
  • elastic
  • envoy
  • etcd
  • exchange_server
  • external_dns
  • fluentd
  • gearmand
  • gitlab
  • gitlab_runner
  • glusterfs
  • go_expvar
  • gunicorn
  • haproxy
  • harbor
  • hazelcast
  • hdfs_datanode
  • hdfs_namenode
  • hive
  • hivemq
  • http_check
  • hyperv
  • ibm_db2
  • ibm_mq
  • ibm_was
  • ignite
  • iis
  • istio
  • jboss_wildfly
  • kafka
  • kafka_consumer
  • kong
  • kube_apiserver_metrics
  • kube_controller_manager
  • kube_dns
  • kube_metrics_server
  • kube_proxy
  • kube_scheduler
  • kubelet
  • kubernetes_state
  • kyototycoon
  • lighttpd
  • linkerd
  • linux_proc_extras
  • mapr
  • mapreduce
  • marathon
  • marklogic
  • mcache
  • mesos_master
  • mesos_slave
  • mongo
  • mysql
  • nagios
  • nfsstat
  • nginx
  • nginx_ingress_controller
  • openldap
  • openmetrics
  • openstack
  • openstack_controller
  • oracle
  • pdh_check
  • pgbouncer
  • php_fpm
  • postfix
  • postgres
  • powerdns_recursor
  • presto
  • process
  • prometheus
  • proxysql
  • rabbitmq
  • redisdb
  • rethinkdb
  • riak
  • riakcs
  • sap_hana
  • scylla
  • snmp
  • snowflake
  • solr
  • sonarqube
  • spark
  • sqlserver
  • squid
  • ssh_check
  • statsd
  • supervisord
  • system_core
  • system_swap
  • tcp_check
  • teamcity
  • tenable
  • tls
  • tokumx
  • tomcat
  • twemproxy
  • twistlock
  • varnish
  • vault
  • vertica
  • voltdb
  • vsphere
  • win32_event_log
  • windows_service
  • wmi_check
  • yarn
  • zk

Config validation

35.77%

Completed 49/137
  • active_directory
  • activemq
  • activemq_xml
  • aerospike
  • airflow
  • amazon_msk
  • ambari
  • apache
  • aspdotnet
  • azure_iot_edge
  • btrfs
  • cacti
  • cassandra
  • cassandra_nodetool
  • ceph
  • cilium
  • cisco_aci
  • clickhouse
  • cloud_foundry_api
  • cockroachdb
  • confluent_platform
  • consul
  • coredns
  • couch
  • couchbase
  • crio
  • directory
  • disk
  • dns_check
  • dotnetclr
  • druid
  • ecs_fargate
  • eks_fargate
  • elastic
  • envoy
  • etcd
  • exchange_server
  • external_dns
  • flink
  • fluentd
  • gearmand
  • gitlab
  • gitlab_runner
  • glusterfs
  • go_expvar
  • gunicorn
  • haproxy
  • harbor
  • hazelcast
  • hdfs_datanode
  • hdfs_namenode
  • hive
  • hivemq
  • http_check
  • hyperv
  • ibm_db2
  • ibm_mq
  • ibm_was
  • ignite
  • iis
  • istio
  • jboss_wildfly
  • journald
  • kafka
  • kafka_consumer
  • kong
  • kube_apiserver_metrics
  • kube_scheduler
  • kyototycoon
  • lighttpd
  • linkerd
  • linux_proc_extras
  • mapr
  • mapreduce
  • marathon
  • marklogic
  • mcache
  • mesos_master
  • mesos_slave
  • mongo
  • mysql
  • nagios
  • network
  • nfsstat
  • nginx
  • nginx_ingress_controller
  • openldap
  • openmetrics
  • openstack
  • openstack_controller
  • oracle
  • pdh_check
  • pgbouncer
  • php_fpm
  • postfix
  • postgres
  • powerdns_recursor
  • presto
  • process
  • proxysql
  • rabbitmq
  • redisdb
  • rethinkdb
  • riak
  • riakcs
  • sap_hana
  • scylla
  • sidekiq
  • snowflake
  • solr
  • sonarqube
  • spark
  • sqlserver
  • squid
  • ssh_check
  • statsd
  • supervisord
  • system_core
  • system_swap
  • tcp_check
  • teamcity
  • tenable
  • tls
  • tokumx
  • tomcat
  • twemproxy
  • twistlock
  • varnish
  • vault
  • vertica
  • voltdb
  • vsphere
  • win32_event_log
  • windows_service
  • wmi_check
  • yarn
  • zk

Metadata submission

30.71%

Completed 43/140
  • active_directory
  • activemq
  • activemq_xml
  • aerospike
  • airflow
  • amazon_msk
  • ambari
  • apache
  • aspdotnet
  • azure_iot_edge
  • btrfs
  • cacti
  • cassandra
  • cassandra_nodetool
  • ceph
  • cilium
  • cisco_aci
  • clickhouse
  • cloud_foundry_api
  • cockroachdb
  • confluent_platform
  • consul
  • coredns
  • couch
  • couchbase
  • crio
  • directory
  • dns_check
  • dotnetclr
  • druid
  • ecs_fargate
  • eks_fargate
  • elastic
  • envoy
  • etcd
  • exchange_server
  • external_dns
  • fluentd
  • gearmand
  • gitlab
  • gitlab_runner
  • glusterfs
  • go_expvar
  • gunicorn
  • haproxy
  • harbor
  • hazelcast
  • hdfs_datanode
  • hdfs_namenode
  • hive
  • hivemq
  • http_check
  • hyperv
  • ibm_db2
  • ibm_mq
  • ibm_was
  • ignite
  • iis
  • istio
  • jboss_wildfly
  • kafka
  • kafka_consumer
  • kong
  • kube_apiserver_metrics
  • kube_controller_manager
  • kube_dns
  • kube_metrics_server
  • kube_proxy
  • kube_scheduler
  • kubelet
  • kubernetes_state
  • kyototycoon
  • lighttpd
  • linkerd
  • linux_proc_extras
  • mapr
  • mapreduce
  • marathon
  • marklogic
  • mcache
  • mesos_master
  • mesos_slave
  • mongo
  • mysql
  • nagios
  • nfsstat
  • nginx
  • nginx_ingress_controller
  • openldap
  • openmetrics
  • openstack
  • openstack_controller
  • oracle
  • pdh_check
  • pgbouncer
  • php_fpm
  • postfix
  • postgres
  • powerdns_recursor
  • presto
  • process
  • prometheus
  • proxysql
  • rabbitmq
  • redisdb
  • rethinkdb
  • riak
  • riakcs
  • sap_hana
  • scylla
  • snmp
  • snowflake
  • solr
  • sonarqube
  • spark
  • sqlserver
  • squid
  • ssh_check
  • statsd
  • supervisord
  • system_core
  • system_swap
  • tcp_check
  • teamcity
  • tenable
  • tls
  • tokumx
  • tomcat
  • twemproxy
  • twistlock
  • varnish
  • vault
  • vertica
  • voltdb
  • vsphere
  • win32_event_log
  • windows_service
  • wmi_check
  • yarn
  • zk

Process signatures

30.56%

Completed 44/144
  • active_directory
  • activemq
  • activemq_xml
  • aerospike
  • airflow
  • amazon_msk
  • ambari
  • apache
  • aspdotnet
  • azure_iot_edge
  • btrfs
  • cacti
  • cassandra
  • cassandra_nodetool
  • ceph
  • cilium
  • cisco_aci
  • clickhouse
  • cloud_foundry_api
  • cockroachdb
  • confluent_platform
  • consul
  • coredns
  • couch
  • couchbase
  • crio
  • directory
  • disk
  • dns_check
  • dotnetclr
  • druid
  • ecs_fargate
  • eks_fargate
  • elastic
  • envoy
  • etcd
  • exchange_server
  • external_dns
  • flink
  • fluentd
  • gearmand
  • gitlab
  • gitlab_runner
  • glusterfs
  • go_expvar
  • gunicorn
  • haproxy
  • harbor
  • hazelcast
  • hdfs_datanode
  • hdfs_namenode
  • hive
  • hivemq
  • http_check
  • hyperv
  • ibm_db2
  • ibm_mq
  • ibm_was
  • ignite
  • iis
  • istio
  • jboss_wildfly
  • journald
  • kafka
  • kafka_consumer
  • kong
  • kube_apiserver_metrics
  • kube_controller_manager
  • kube_dns
  • kube_metrics_server
  • kube_proxy
  • kube_scheduler
  • kubelet
  • kubernetes_state
  • kyototycoon
  • lighttpd
  • linkerd
  • linux_proc_extras
  • mapr
  • mapreduce
  • marathon
  • marklogic
  • mcache
  • mesos_master
  • mesos_slave
  • mongo
  • mysql
  • nagios
  • network
  • nfsstat
  • nginx
  • nginx_ingress_controller
  • openldap
  • openmetrics
  • openstack
  • openstack_controller
  • oracle
  • pdh_check
  • pgbouncer
  • php_fpm
  • postfix
  • postgres
  • powerdns_recursor
  • presto
  • process
  • prometheus
  • proxysql
  • rabbitmq
  • redisdb
  • rethinkdb
  • riak
  • riakcs
  • sap_hana
  • scylla
  • sidekiq
  • snmp
  • solr
  • sonarqube
  • spark
  • sqlserver
  • squid
  • ssh_check
  • statsd
  • supervisord
  • system_core
  • system_swap
  • tcp_check
  • teamcity
  • tenable
  • tls
  • tokumx
  • tomcat
  • twemproxy
  • twistlock
  • varnish
  • vault
  • vertica
  • voltdb
  • vsphere
  • win32_event_log
  • windows_service
  • wmi_check
  • yarn
  • zk

Agent 8 check signatures

50.34%

Completed 73/145
  • active_directory
  • activemq
  • activemq_xml
  • aerospike
  • airflow
  • amazon_msk
  • ambari
  • apache
  • aspdotnet
  • azure_iot_edge
  • btrfs
  • cacti
  • cassandra
  • cassandra_nodetool
  • ceph
  • cilium
  • cisco_aci
  • clickhouse
  • cloud_foundry_api
  • cockroachdb
  • confluent_platform
  • consul
  • coredns
  • couch
  • couchbase
  • crio
  • directory
  • disk
  • dns_check
  • dotnetclr
  • druid
  • ecs_fargate
  • eks_fargate
  • elastic
  • envoy
  • etcd
  • exchange_server
  • external_dns
  • flink
  • fluentd
  • gearmand
  • gitlab
  • gitlab_runner
  • glusterfs
  • go_expvar
  • gunicorn
  • haproxy
  • harbor
  • hazelcast
  • hdfs_datanode
  • hdfs_namenode
  • hive
  • hivemq
  • http_check
  • hyperv
  • ibm_db2
  • ibm_mq
  • ibm_was
  • ignite
  • iis
  • istio
  • jboss_wildfly
  • journald
  • kafka
  • kafka_consumer
  • kong
  • kube_apiserver_metrics
  • kube_controller_manager
  • kube_dns
  • kube_metrics_server
  • kube_proxy
  • kube_scheduler
  • kubelet
  • kubernetes_state
  • kyototycoon
  • lighttpd
  • linkerd
  • linux_proc_extras
  • mapr
  • mapreduce
  • marathon
  • marklogic
  • mcache
  • mesos_master
  • mesos_slave
  • mongo
  • mysql
  • nagios
  • network
  • nfsstat
  • nginx
  • nginx_ingress_controller
  • openldap
  • openmetrics
  • openstack
  • openstack_controller
  • oracle
  • pdh_check
  • pgbouncer
  • php_fpm
  • postfix
  • postgres
  • powerdns_recursor
  • presto
  • process
  • prometheus
  • proxysql
  • rabbitmq
  • redisdb
  • rethinkdb
  • riak
  • riakcs
  • sap_hana
  • scylla
  • sidekiq
  • snmp
  • snowflake
  • solr
  • sonarqube
  • spark
  • sqlserver
  • squid
  • ssh_check
  • statsd
  • supervisord
  • system_core
  • system_swap
  • tcp_check
  • teamcity
  • tenable
  • tls
  • tokumx
  • tomcat
  • twemproxy
  • twistlock
  • varnish
  • vault
  • vertica
  • voltdb
  • vsphere
  • win32_event_log
  • windows_service
  • wmi_check
  • yarn
  • zk

Default saved views (for integrations with logs)

44.66%

Completed 46/103
  • active_directory
  • activemq
  • activemq_xml
  • aerospike
  • airflow
  • ambari
  • apache
  • aspdotnet
  • azure_iot_edge
  • cacti
  • cassandra
  • cassandra_nodetool
  • ceph
  • cilium
  • clickhouse
  • confluent_platform
  • consul
  • coredns
  • couch
  • couchbase
  • druid
  • ecs_fargate
  • eks_fargate
  • elastic
  • envoy
  • etcd
  • exchange_server
  • flink
  • fluentd
  • gearmand
  • gitlab
  • gitlab_runner
  • glusterfs
  • gunicorn
  • haproxy
  • harbor
  • hazelcast
  • hdfs_datanode
  • hdfs_namenode
  • hive
  • hivemq
  • ibm_db2
  • ibm_mq
  • ibm_was
  • ignite
  • iis
  • istio
  • jboss_wildfly
  • journald
  • kafka
  • kafka_consumer
  • kong
  • kube_scheduler
  • kyototycoon
  • lighttpd
  • linkerd
  • mapr
  • mapreduce
  • marathon
  • marklogic
  • mcache
  • mesos_master
  • mesos_slave
  • mongo
  • mysql
  • nagios
  • nfsstat
  • nginx
  • nginx_ingress_controller
  • openldap
  • openstack
  • openstack_controller
  • pgbouncer
  • postfix
  • postgres
  • powerdns_recursor
  • presto
  • proxysql
  • rabbitmq
  • redisdb
  • rethinkdb
  • riak
  • scylla
  • sidekiq
  • solr
  • sonarqube
  • spark
  • sqlserver
  • squid
  • statsd
  • supervisord
  • teamcity
  • tenable
  • tomcat
  • twemproxy
  • twistlock
  • varnish
  • vault
  • vertica
  • voltdb
  • win32_event_log
  • yarn
  • zk

Last update: May 15, 2020
\ No newline at end of file + Status - Agent Integrations
Skip to content

Status


Dashboards

78.91%

Completed 116/147
  • active_directory
  • activemq
  • activemq_xml
  • aerospike
  • airflow
  • amazon_msk
  • ambari
  • apache
  • aspdotnet
  • azure_active_directory
  • azure_iot_edge
  • btrfs
  • cacti
  • cassandra
  • cassandra_nodetool
  • ceph
  • cilium
  • cisco_aci
  • clickhouse
  • cockroachdb
  • confluent_platform
  • consul
  • consul_connect
  • containerd
  • coredns
  • couch
  • couchbase
  • cri
  • crio
  • databricks
  • directory
  • disk
  • dotnetclr
  • druid
  • ecs_fargate
  • eks_fargate
  • elastic
  • envoy
  • etcd
  • exchange_server
  • external_dns
  • flink
  • fluentd
  • gearmand
  • gitlab
  • gitlab_runner
  • glusterfs
  • go_expvar
  • gunicorn
  • haproxy
  • harbor
  • hazelcast
  • hdfs_datanode
  • hdfs_namenode
  • hive
  • hivemq
  • http_check
  • hyperv
  • ibm_db2
  • ibm_mq
  • ibm_was
  • ignite
  • iis
  • istio
  • jboss_wildfly
  • jmeter
  • journald
  • kafka
  • kafka_consumer
  • kong
  • kube_apiserver_metrics
  • kube_controller_manager
  • kube_dns
  • kube_metrics_server
  • kube_proxy
  • kube_scheduler
  • kubelet
  • kubernetes
  • kubernetes_state
  • kyototycoon
  • lighttpd
  • linkerd
  • linux_proc_extras
  • mapr
  • mapreduce
  • marathon
  • marklogic
  • mcache
  • mesos_master
  • mesos_slave
  • mongo
  • mysql
  • nagios
  • network
  • nfsstat
  • nginx
  • nginx_ingress_controller
  • nvidia_jetson
  • oom_kill
  • openldap
  • openshift
  • openstack
  • openstack_controller
  • oracle
  • otel
  • pan_firewall
  • pgbouncer
  • php_fpm
  • postfix
  • postgres
  • powerdns_recursor
  • presto
  • process
  • proxysql
  • rabbitmq
  • redisdb
  • rethinkdb
  • riak
  • riakcs
  • sap_hana
  • scylla
  • sidekiq
  • snowflake
  • solr
  • sonarqube
  • spark
  • sqlserver
  • squid
  • statsd
  • system_core
  • systemd
  • tcp_check
  • tls
  • tokumx
  • tomcat
  • twemproxy
  • twistlock
  • varnish
  • vault
  • vertica
  • voltdb
  • vsphere
  • yarn
  • zk

Logs support

92.73%

Completed 102/110
  • active_directory
  • activemq
  • activemq_xml
  • aerospike
  • airflow
  • amazon_msk
  • ambari
  • apache
  • aspdotnet
  • azure_iot_edge
  • cacti
  • cassandra
  • cassandra_nodetool
  • ceph
  • cilium
  • cisco_aci
  • clickhouse
  • cloud_foundry_api
  • cockroachdb
  • confluent_platform
  • consul
  • coredns
  • couch
  • couchbase
  • crio
  • druid
  • ecs_fargate
  • eks_fargate
  • elastic
  • envoy
  • etcd
  • exchange_server
  • flink
  • fluentd
  • gearmand
  • gitlab
  • gitlab_runner
  • glusterfs
  • gunicorn
  • haproxy
  • harbor
  • hazelcast
  • hdfs_datanode
  • hdfs_namenode
  • hive
  • hivemq
  • hyperv
  • ibm_db2
  • ibm_mq
  • ibm_was
  • ignite
  • iis
  • istio
  • jboss_wildfly
  • journald
  • kafka
  • kafka_consumer
  • kong
  • kyototycoon
  • lighttpd
  • linkerd
  • mapr
  • mapreduce
  • marathon
  • marklogic
  • mcache
  • mesos_master
  • mesos_slave
  • mongo
  • mysql
  • nagios
  • nfsstat
  • nginx
  • nginx_ingress_controller
  • openldap
  • openstack
  • openstack_controller
  • pgbouncer
  • php_fpm
  • postfix
  • postgres
  • powerdns_recursor
  • presto
  • proxysql
  • rabbitmq
  • redisdb
  • rethinkdb
  • riak
  • scylla
  • sidekiq
  • solr
  • sonarqube
  • spark
  • sqlserver
  • squid
  • statsd
  • supervisord
  • teamcity
  • tenable
  • tomcat
  • twemproxy
  • twistlock
  • varnish
  • vault
  • vertica
  • voltdb
  • vsphere
  • win32_event_log
  • yarn
  • zk

17.48%

Completed 25/143
  • active_directory
  • activemq
  • activemq_xml
  • aerospike
  • airflow
  • amazon_msk
  • ambari
  • apache
  • aspdotnet
  • azure_iot_edge
  • btrfs
  • cacti
  • cassandra
  • cassandra_nodetool
  • ceph
  • cilium
  • cisco_aci
  • clickhouse
  • cloud_foundry_api
  • cockroachdb
  • confluent_platform
  • consul
  • coredns
  • couch
  • couchbase
  • crio
  • directory
  • dns_check
  • dotnetclr
  • druid
  • ecs_fargate
  • eks_fargate
  • elastic
  • envoy
  • etcd
  • exchange_server
  • external_dns
  • flink
  • fluentd
  • gearmand
  • gitlab
  • gitlab_runner
  • glusterfs
  • go_expvar
  • gunicorn
  • haproxy
  • harbor
  • hazelcast
  • hdfs_datanode
  • hdfs_namenode
  • hive
  • hivemq
  • http_check
  • hyperv
  • ibm_db2
  • ibm_mq
  • ibm_was
  • ignite
  • iis
  • istio
  • jboss_wildfly
  • journald
  • kafka
  • kafka_consumer
  • kong
  • kube_apiserver_metrics
  • kube_controller_manager
  • kube_dns
  • kube_metrics_server
  • kube_proxy
  • kube_scheduler
  • kubelet
  • kubernetes_state
  • kyototycoon
  • lighttpd
  • linkerd
  • linux_proc_extras
  • mapr
  • mapreduce
  • marathon
  • marklogic
  • mcache
  • mesos_master
  • mesos_slave
  • mongo
  • mysql
  • nagios
  • nfsstat
  • nginx
  • nginx_ingress_controller
  • openldap
  • openmetrics
  • openstack
  • openstack_controller
  • oracle
  • pdh_check
  • pgbouncer
  • php_fpm
  • postfix
  • postgres
  • powerdns_recursor
  • presto
  • process
  • prometheus
  • proxysql
  • rabbitmq
  • redisdb
  • rethinkdb
  • riak
  • riakcs
  • sap_hana
  • scylla
  • sidekiq
  • snmp
  • snowflake
  • solr
  • sonarqube
  • spark
  • sqlserver
  • squid
  • ssh_check
  • statsd
  • supervisord
  • system_core
  • system_swap
  • tcp_check
  • teamcity
  • tenable
  • tls
  • tokumx
  • tomcat
  • twemproxy
  • twistlock
  • varnish
  • vault
  • vertica
  • voltdb
  • vsphere
  • win32_event_log
  • windows_service
  • wmi_check
  • yarn
  • zk

Config specs

94.41%

Completed 135/143
  • active_directory
  • activemq
  • activemq_xml
  • aerospike
  • airflow
  • amazon_msk
  • ambari
  • apache
  • aspdotnet
  • azure_iot_edge
  • btrfs
  • cacti
  • cassandra
  • cassandra_nodetool
  • ceph
  • cilium
  • cisco_aci
  • clickhouse
  • cloud_foundry_api
  • cockroachdb
  • confluent_platform
  • consul
  • coredns
  • couch
  • couchbase
  • crio
  • directory
  • dns_check
  • dotnetclr
  • druid
  • ecs_fargate
  • eks_fargate
  • elastic
  • envoy
  • etcd
  • exchange_server
  • external_dns
  • flink
  • fluentd
  • gearmand
  • gitlab
  • gitlab_runner
  • glusterfs
  • go_expvar
  • gunicorn
  • haproxy
  • harbor
  • hazelcast
  • hdfs_datanode
  • hdfs_namenode
  • hive
  • hivemq
  • http_check
  • hyperv
  • ibm_db2
  • ibm_mq
  • ibm_was
  • ignite
  • iis
  • istio
  • jboss_wildfly
  • journald
  • kafka
  • kafka_consumer
  • kong
  • kube_apiserver_metrics
  • kube_controller_manager
  • kube_dns
  • kube_metrics_server
  • kube_proxy
  • kube_scheduler
  • kubelet
  • kubernetes_state
  • kyototycoon
  • lighttpd
  • linkerd
  • linux_proc_extras
  • mapr
  • mapreduce
  • marathon
  • marklogic
  • mcache
  • mesos_master
  • mesos_slave
  • mongo
  • mysql
  • nagios
  • nfsstat
  • nginx
  • nginx_ingress_controller
  • openldap
  • openmetrics
  • openstack
  • openstack_controller
  • oracle
  • pdh_check
  • pgbouncer
  • php_fpm
  • postfix
  • postgres
  • powerdns_recursor
  • presto
  • process
  • prometheus
  • proxysql
  • rabbitmq
  • redisdb
  • rethinkdb
  • riak
  • riakcs
  • sap_hana
  • scylla
  • sidekiq
  • snmp
  • snowflake
  • solr
  • sonarqube
  • spark
  • sqlserver
  • squid
  • ssh_check
  • statsd
  • supervisord
  • system_core
  • system_swap
  • tcp_check
  • teamcity
  • tenable
  • tls
  • tokumx
  • tomcat
  • twemproxy
  • twistlock
  • varnish
  • vault
  • vertica
  • voltdb
  • vsphere
  • win32_event_log
  • windows_service
  • wmi_check
  • yarn
  • zk

Docs specs

0.70%

Completed 1/143
  • active_directory
  • activemq
  • activemq_xml
  • aerospike
  • airflow
  • amazon_msk
  • ambari
  • apache
  • aspdotnet
  • azure_iot_edge
  • btrfs
  • cacti
  • cassandra
  • cassandra_nodetool
  • ceph
  • cilium
  • cisco_aci
  • clickhouse
  • cloud_foundry_api
  • cockroachdb
  • confluent_platform
  • consul
  • coredns
  • couch
  • couchbase
  • crio
  • directory
  • dns_check
  • dotnetclr
  • druid
  • ecs_fargate
  • eks_fargate
  • elastic
  • envoy
  • etcd
  • exchange_server
  • external_dns
  • flink
  • fluentd
  • gearmand
  • gitlab
  • gitlab_runner
  • glusterfs
  • go_expvar
  • gunicorn
  • haproxy
  • harbor
  • hazelcast
  • hdfs_datanode
  • hdfs_namenode
  • hive
  • hivemq
  • http_check
  • hyperv
  • ibm_db2
  • ibm_mq
  • ibm_was
  • ignite
  • iis
  • istio
  • jboss_wildfly
  • journald
  • kafka
  • kafka_consumer
  • kong
  • kube_apiserver_metrics
  • kube_controller_manager
  • kube_dns
  • kube_metrics_server
  • kube_proxy
  • kube_scheduler
  • kubelet
  • kubernetes_state
  • kyototycoon
  • lighttpd
  • linkerd
  • linux_proc_extras
  • mapr
  • mapreduce
  • marathon
  • marklogic
  • mcache
  • mesos_master
  • mesos_slave
  • mongo
  • mysql
  • nagios
  • nfsstat
  • nginx
  • nginx_ingress_controller
  • openldap
  • openmetrics
  • openstack
  • openstack_controller
  • oracle
  • pdh_check
  • pgbouncer
  • php_fpm
  • postfix
  • postgres
  • powerdns_recursor
  • presto
  • process
  • prometheus
  • proxysql
  • rabbitmq
  • redisdb
  • rethinkdb
  • riak
  • riakcs
  • sap_hana
  • scylla
  • sidekiq
  • snmp
  • snowflake
  • solr
  • sonarqube
  • spark
  • sqlserver
  • squid
  • ssh_check
  • statsd
  • supervisord
  • system_core
  • system_swap
  • tcp_check
  • teamcity
  • tenable
  • tls
  • tokumx
  • tomcat
  • twemproxy
  • twistlock
  • varnish
  • vault
  • vertica
  • voltdb
  • vsphere
  • win32_event_log
  • windows_service
  • wmi_check
  • yarn
  • zk

E2E tests

77.86%

Completed 109/140
  • active_directory
  • activemq
  • activemq_xml
  • aerospike
  • airflow
  • amazon_msk
  • ambari
  • apache
  • aspdotnet
  • azure_iot_edge
  • btrfs
  • cacti
  • cassandra
  • cassandra_nodetool
  • ceph
  • cilium
  • cisco_aci
  • clickhouse
  • cloud_foundry_api
  • cockroachdb
  • confluent_platform
  • consul
  • coredns
  • couch
  • couchbase
  • crio
  • directory
  • dns_check
  • dotnetclr
  • druid
  • ecs_fargate
  • eks_fargate
  • elastic
  • envoy
  • etcd
  • exchange_server
  • external_dns
  • fluentd
  • gearmand
  • gitlab
  • gitlab_runner
  • glusterfs
  • go_expvar
  • gunicorn
  • haproxy
  • harbor
  • hazelcast
  • hdfs_datanode
  • hdfs_namenode
  • hive
  • hivemq
  • http_check
  • hyperv
  • ibm_db2
  • ibm_mq
  • ibm_was
  • ignite
  • iis
  • istio
  • jboss_wildfly
  • kafka
  • kafka_consumer
  • kong
  • kube_apiserver_metrics
  • kube_controller_manager
  • kube_dns
  • kube_metrics_server
  • kube_proxy
  • kube_scheduler
  • kubelet
  • kubernetes_state
  • kyototycoon
  • lighttpd
  • linkerd
  • linux_proc_extras
  • mapr
  • mapreduce
  • marathon
  • marklogic
  • mcache
  • mesos_master
  • mesos_slave
  • mongo
  • mysql
  • nagios
  • nfsstat
  • nginx
  • nginx_ingress_controller
  • openldap
  • openmetrics
  • openstack
  • openstack_controller
  • oracle
  • pdh_check
  • pgbouncer
  • php_fpm
  • postfix
  • postgres
  • powerdns_recursor
  • presto
  • process
  • prometheus
  • proxysql
  • rabbitmq
  • redisdb
  • rethinkdb
  • riak
  • riakcs
  • sap_hana
  • scylla
  • snmp
  • snowflake
  • solr
  • sonarqube
  • spark
  • sqlserver
  • squid
  • ssh_check
  • statsd
  • supervisord
  • system_core
  • system_swap
  • tcp_check
  • teamcity
  • tenable
  • tls
  • tokumx
  • tomcat
  • twemproxy
  • twistlock
  • varnish
  • vault
  • vertica
  • voltdb
  • vsphere
  • win32_event_log
  • windows_service
  • wmi_check
  • yarn
  • zk

Config validation

36.50%

Completed 50/137
  • active_directory
  • activemq
  • activemq_xml
  • aerospike
  • airflow
  • amazon_msk
  • ambari
  • apache
  • aspdotnet
  • azure_iot_edge
  • btrfs
  • cacti
  • cassandra
  • cassandra_nodetool
  • ceph
  • cilium
  • cisco_aci
  • clickhouse
  • cloud_foundry_api
  • cockroachdb
  • confluent_platform
  • consul
  • coredns
  • couch
  • couchbase
  • crio
  • directory
  • disk
  • dns_check
  • dotnetclr
  • druid
  • ecs_fargate
  • eks_fargate
  • elastic
  • envoy
  • etcd
  • exchange_server
  • external_dns
  • flink
  • fluentd
  • gearmand
  • gitlab
  • gitlab_runner
  • glusterfs
  • go_expvar
  • gunicorn
  • haproxy
  • harbor
  • hazelcast
  • hdfs_datanode
  • hdfs_namenode
  • hive
  • hivemq
  • http_check
  • hyperv
  • ibm_db2
  • ibm_mq
  • ibm_was
  • ignite
  • iis
  • istio
  • jboss_wildfly
  • journald
  • kafka
  • kafka_consumer
  • kong
  • kube_apiserver_metrics
  • kube_scheduler
  • kyototycoon
  • lighttpd
  • linkerd
  • linux_proc_extras
  • mapr
  • mapreduce
  • marathon
  • marklogic
  • mcache
  • mesos_master
  • mesos_slave
  • mongo
  • mysql
  • nagios
  • network
  • nfsstat
  • nginx
  • nginx_ingress_controller
  • openldap
  • openmetrics
  • openstack
  • openstack_controller
  • oracle
  • pdh_check
  • pgbouncer
  • php_fpm
  • postfix
  • postgres
  • powerdns_recursor
  • presto
  • process
  • proxysql
  • rabbitmq
  • redisdb
  • rethinkdb
  • riak
  • riakcs
  • sap_hana
  • scylla
  • sidekiq
  • snowflake
  • solr
  • sonarqube
  • spark
  • sqlserver
  • squid
  • ssh_check
  • statsd
  • supervisord
  • system_core
  • system_swap
  • tcp_check
  • teamcity
  • tenable
  • tls
  • tokumx
  • tomcat
  • twemproxy
  • twistlock
  • varnish
  • vault
  • vertica
  • voltdb
  • vsphere
  • win32_event_log
  • windows_service
  • wmi_check
  • yarn
  • zk

Metadata submission

30.71%

Completed 43/140
  • active_directory
  • activemq
  • activemq_xml
  • aerospike
  • airflow
  • amazon_msk
  • ambari
  • apache
  • aspdotnet
  • azure_iot_edge
  • btrfs
  • cacti
  • cassandra
  • cassandra_nodetool
  • ceph
  • cilium
  • cisco_aci
  • clickhouse
  • cloud_foundry_api
  • cockroachdb
  • confluent_platform
  • consul
  • coredns
  • couch
  • couchbase
  • crio
  • directory
  • dns_check
  • dotnetclr
  • druid
  • ecs_fargate
  • eks_fargate
  • elastic
  • envoy
  • etcd
  • exchange_server
  • external_dns
  • fluentd
  • gearmand
  • gitlab
  • gitlab_runner
  • glusterfs
  • go_expvar
  • gunicorn
  • haproxy
  • harbor
  • hazelcast
  • hdfs_datanode
  • hdfs_namenode
  • hive
  • hivemq
  • http_check
  • hyperv
  • ibm_db2
  • ibm_mq
  • ibm_was
  • ignite
  • iis
  • istio
  • jboss_wildfly
  • kafka
  • kafka_consumer
  • kong
  • kube_apiserver_metrics
  • kube_controller_manager
  • kube_dns
  • kube_metrics_server
  • kube_proxy
  • kube_scheduler
  • kubelet
  • kubernetes_state
  • kyototycoon
  • lighttpd
  • linkerd
  • linux_proc_extras
  • mapr
  • mapreduce
  • marathon
  • marklogic
  • mcache
  • mesos_master
  • mesos_slave
  • mongo
  • mysql
  • nagios
  • nfsstat
  • nginx
  • nginx_ingress_controller
  • openldap
  • openmetrics
  • openstack
  • openstack_controller
  • oracle
  • pdh_check
  • pgbouncer
  • php_fpm
  • postfix
  • postgres
  • powerdns_recursor
  • presto
  • process
  • prometheus
  • proxysql
  • rabbitmq
  • redisdb
  • rethinkdb
  • riak
  • riakcs
  • sap_hana
  • scylla
  • snmp
  • snowflake
  • solr
  • sonarqube
  • spark
  • sqlserver
  • squid
  • ssh_check
  • statsd
  • supervisord
  • system_core
  • system_swap
  • tcp_check
  • teamcity
  • tenable
  • tls
  • tokumx
  • tomcat
  • twemproxy
  • twistlock
  • varnish
  • vault
  • vertica
  • voltdb
  • vsphere
  • win32_event_log
  • windows_service
  • wmi_check
  • yarn
  • zk

Process signatures

30.56%

Completed 44/144
  • active_directory
  • activemq
  • activemq_xml
  • aerospike
  • airflow
  • amazon_msk
  • ambari
  • apache
  • aspdotnet
  • azure_iot_edge
  • btrfs
  • cacti
  • cassandra
  • cassandra_nodetool
  • ceph
  • cilium
  • cisco_aci
  • clickhouse
  • cloud_foundry_api
  • cockroachdb
  • confluent_platform
  • consul
  • coredns
  • couch
  • couchbase
  • crio
  • directory
  • disk
  • dns_check
  • dotnetclr
  • druid
  • ecs_fargate
  • eks_fargate
  • elastic
  • envoy
  • etcd
  • exchange_server
  • external_dns
  • flink
  • fluentd
  • gearmand
  • gitlab
  • gitlab_runner
  • glusterfs
  • go_expvar
  • gunicorn
  • haproxy
  • harbor
  • hazelcast
  • hdfs_datanode
  • hdfs_namenode
  • hive
  • hivemq
  • http_check
  • hyperv
  • ibm_db2
  • ibm_mq
  • ibm_was
  • ignite
  • iis
  • istio
  • jboss_wildfly
  • journald
  • kafka
  • kafka_consumer
  • kong
  • kube_apiserver_metrics
  • kube_controller_manager
  • kube_dns
  • kube_metrics_server
  • kube_proxy
  • kube_scheduler
  • kubelet
  • kubernetes_state
  • kyototycoon
  • lighttpd
  • linkerd
  • linux_proc_extras
  • mapr
  • mapreduce
  • marathon
  • marklogic
  • mcache
  • mesos_master
  • mesos_slave
  • mongo
  • mysql
  • nagios
  • network
  • nfsstat
  • nginx
  • nginx_ingress_controller
  • openldap
  • openmetrics
  • openstack
  • openstack_controller
  • oracle
  • pdh_check
  • pgbouncer
  • php_fpm
  • postfix
  • postgres
  • powerdns_recursor
  • presto
  • process
  • prometheus
  • proxysql
  • rabbitmq
  • redisdb
  • rethinkdb
  • riak
  • riakcs
  • sap_hana
  • scylla
  • sidekiq
  • snmp
  • solr
  • sonarqube
  • spark
  • sqlserver
  • squid
  • ssh_check
  • statsd
  • supervisord
  • system_core
  • system_swap
  • tcp_check
  • teamcity
  • tenable
  • tls
  • tokumx
  • tomcat
  • twemproxy
  • twistlock
  • varnish
  • vault
  • vertica
  • voltdb
  • vsphere
  • win32_event_log
  • windows_service
  • wmi_check
  • yarn
  • zk

Agent 8 check signatures

50.34%

Completed 73/145
  • active_directory
  • activemq
  • activemq_xml
  • aerospike
  • airflow
  • amazon_msk
  • ambari
  • apache
  • aspdotnet
  • azure_iot_edge
  • btrfs
  • cacti
  • cassandra
  • cassandra_nodetool
  • ceph
  • cilium
  • cisco_aci
  • clickhouse
  • cloud_foundry_api
  • cockroachdb
  • confluent_platform
  • consul
  • coredns
  • couch
  • couchbase
  • crio
  • directory
  • disk
  • dns_check
  • dotnetclr
  • druid
  • ecs_fargate
  • eks_fargate
  • elastic
  • envoy
  • etcd
  • exchange_server
  • external_dns
  • flink
  • fluentd
  • gearmand
  • gitlab
  • gitlab_runner
  • glusterfs
  • go_expvar
  • gunicorn
  • haproxy
  • harbor
  • hazelcast
  • hdfs_datanode
  • hdfs_namenode
  • hive
  • hivemq
  • http_check
  • hyperv
  • ibm_db2
  • ibm_mq
  • ibm_was
  • ignite
  • iis
  • istio
  • jboss_wildfly
  • journald
  • kafka
  • kafka_consumer
  • kong
  • kube_apiserver_metrics
  • kube_controller_manager
  • kube_dns
  • kube_metrics_server
  • kube_proxy
  • kube_scheduler
  • kubelet
  • kubernetes_state
  • kyototycoon
  • lighttpd
  • linkerd
  • linux_proc_extras
  • mapr
  • mapreduce
  • marathon
  • marklogic
  • mcache
  • mesos_master
  • mesos_slave
  • mongo
  • mysql
  • nagios
  • network
  • nfsstat
  • nginx
  • nginx_ingress_controller
  • openldap
  • openmetrics
  • openstack
  • openstack_controller
  • oracle
  • pdh_check
  • pgbouncer
  • php_fpm
  • postfix
  • postgres
  • powerdns_recursor
  • presto
  • process
  • prometheus
  • proxysql
  • rabbitmq
  • redisdb
  • rethinkdb
  • riak
  • riakcs
  • sap_hana
  • scylla
  • sidekiq
  • snmp
  • snowflake
  • solr
  • sonarqube
  • spark
  • sqlserver
  • squid
  • ssh_check
  • statsd
  • supervisord
  • system_core
  • system_swap
  • tcp_check
  • teamcity
  • tenable
  • tls
  • tokumx
  • tomcat
  • twemproxy
  • twistlock
  • varnish
  • vault
  • vertica
  • voltdb
  • vsphere
  • win32_event_log
  • windows_service
  • wmi_check
  • yarn
  • zk

Default saved views (for integrations with logs)

44.66%

Completed 46/103
  • active_directory
  • activemq
  • activemq_xml
  • aerospike
  • airflow
  • ambari
  • apache
  • aspdotnet
  • azure_iot_edge
  • cacti
  • cassandra
  • cassandra_nodetool
  • ceph
  • cilium
  • clickhouse
  • confluent_platform
  • consul
  • coredns
  • couch
  • couchbase
  • druid
  • ecs_fargate
  • eks_fargate
  • elastic
  • envoy
  • etcd
  • exchange_server
  • flink
  • fluentd
  • gearmand
  • gitlab
  • gitlab_runner
  • glusterfs
  • gunicorn
  • haproxy
  • harbor
  • hazelcast
  • hdfs_datanode
  • hdfs_namenode
  • hive
  • hivemq
  • ibm_db2
  • ibm_mq
  • ibm_was
  • ignite
  • iis
  • istio
  • jboss_wildfly
  • journald
  • kafka
  • kafka_consumer
  • kong
  • kube_scheduler
  • kyototycoon
  • lighttpd
  • linkerd
  • mapr
  • mapreduce
  • marathon
  • marklogic
  • mcache
  • mesos_master
  • mesos_slave
  • mongo
  • mysql
  • nagios
  • nfsstat
  • nginx
  • nginx_ingress_controller
  • openldap
  • openstack
  • openstack_controller
  • pgbouncer
  • postfix
  • postgres
  • powerdns_recursor
  • presto
  • proxysql
  • rabbitmq
  • redisdb
  • rethinkdb
  • riak
  • scylla
  • sidekiq
  • solr
  • sonarqube
  • spark
  • sqlserver
  • squid
  • statsd
  • supervisord
  • teamcity
  • tenable
  • tomcat
  • twemproxy
  • twistlock
  • varnish
  • vault
  • vertica
  • voltdb
  • win32_event_log
  • yarn
  • zk

Last update: May 15, 2020
\ No newline at end of file diff --git a/search/search_index.json b/search/search_index.json index c1777c00fb1fe..659bfde4d0466 100644 --- a/search/search_index.json +++ b/search/search_index.json @@ -1 +1 @@ -{"config":{"indexing":"full","lang":["en"],"min_search_length":3,"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"Agent Integrations \u00b6 Welcome to the wonderful world of developing Agent Integrations for Datadog. Here we document how we do things, the processes for various tasks, coding conventions & best practices, the internals of our testing infrastructure, and so much more. If you are intrigued, continue reading. If not, continue all the same Getting started \u00b6 To work on any integration (a.k.a. Check ), you must setup your development environment. After that you may immediately begin testing or read through the best practices we strive to follow. Also, feel free to check out how ddev works and browse the API reference of the base package. Navigation \u00b6 Desktop readers can use keyboard shortcuts to navigate. Keys Action , (comma) p Navigate to the \"previous\" page . (period) n Navigate to the \"next\" page / s Display the search modal","title":"About"},{"location":"#agent-integrations","text":"Welcome to the wonderful world of developing Agent Integrations for Datadog. Here we document how we do things, the processes for various tasks, coding conventions & best practices, the internals of our testing infrastructure, and so much more. If you are intrigued, continue reading. If not, continue all the same","title":"Agent Integrations"},{"location":"#getting-started","text":"To work on any integration (a.k.a. Check ), you must setup your development environment. After that you may immediately begin testing or read through the best practices we strive to follow. Also, feel free to check out how ddev works and browse the API reference of the base package.","title":"Getting started"},{"location":"#navigation","text":"Desktop readers can use keyboard shortcuts to navigate. Keys Action , (comma) p Navigate to the \"previous\" page . (period) n Navigate to the \"next\" page / s Display the search modal","title":"Navigation"},{"location":"e2e/","text":"E2E \u00b6 Any integration that makes use of our pytest plugin in its test suite supports end-to-end testing on a live Datadog Agent . The entrypoint for E2E management is the command group ddev env . Discovery \u00b6 Use the ls command to see what environments are available, for example: $ ddev env ls envoy envoy: py27 py38 You'll notice that only environments that actually run tests are available. Running simply ddev env ls with no arguments will display the active environments. Creation \u00b6 To start an environment run ddev env start , for example: $ ddev env start envoy py38 Setting up environment `py38`... success! Updating `datadog/agent-dev:master`... success! Detecting the major version... Agent 7 detected Writing configuration for `py38`... success! Starting the Agent... success! Config file (copied to your clipboard): C:\\Users\\ofek\\AppData\\Local\\dd-checks-dev\\envs\\envoy\\py38\\config\\envoy.yaml To run this check, do: ddev env check envoy py38 To stop this check, do: ddev env stop envoy py38 This sets up the selected environment and an instance of the Agent running in a Docker container. The default configuration is defined by each environment's test suite and is saved to a file, which is then mounted to the Agent container so you may freely modify it. Let's see what we have running: $ docker ps --format \"table {{.Image}}\\t{{.Status}}\\t{{.Ports}}\\t{{.Names}}\" IMAGE STATUS PORTS NAMES datadog/agent-dev:master-py3 Up 4 seconds (health: starting) dd_envoy_py38 default_service2 Up 5 seconds 80/tcp, 10000/tcp default_service2_1 envoyproxy/envoy:latest Up 5 seconds 0.0.0.0:8001->8001/tcp, 10000/tcp, 0.0.0.0:8000->80/tcp default_front-envoy_1 default_xds Up 5 seconds 8080/tcp default_xds_1 default_service1 Up 5 seconds 80/tcp, 10000/tcp default_service1_1 Agent version \u00b6 You can select a particular build of the Agent to use with the --agent / -a option. Any Docker image is valid e.g. datadog/agent:7.17.0 . A custom nightly build will be used by default, which is re-built on every commit to the Datadog Agent repository . Integration version \u00b6 By default the version of the integration used will be the one shipped with the chosen Agent version, as if you had passed in the --prod flag. If you wish to modify an integration and test changes in real time, use the --dev flag. Doing so will mount and install the integration in the Agent container. All modifications to the integration's directory will be propagated to the Agent, whether it be a code change or switching to a different Git branch. If you modify the base package then you will need to mount that with the --base flag, which implicitly activates --dev . Testing \u00b6 To run tests against the live Agent, use the ddev env test command. It is similar to the test command except it is capable of running tests marked as E2E , and only runs such tests. Automation \u00b6 You can use the --new-env / -ne flag to automate environment management. For example running: ddev env test apache:py38 vault:py38 -ne will start the py38 environment for Apache, run E2E tests, tear down the environment, and then do the same for Vault. Tip Since running tests implies code changes are being introduced, --new-env enables --dev by default. Execution \u00b6 Similar to the Agent's check command, you can perform manual check runs using ddev env check , for example: $ ddev env check envoy py38 --log-level debug ... ========= Collector ========= Running Checks ============== envoy (1.12.0) -------------- Instance ID: envoy:c705bd922a3c275c [OK] Configuration Source: file:/etc/datadog-agent/conf.d/envoy.d/envoy.yaml Total Runs: 1 Metric Samples: Last Run: 546, Total: 546 Events: Last Run: 0, Total: 0 Service Checks: Last Run: 1, Total: 1 Average Execution Time : 25ms Last Execution Date : 2020-02-17 00:58:05.000000 UTC Last Successful Execution Date : 2020-02-17 00:58:05.000000 UTC Debugging \u00b6 You may start an interactive debugging session using the --breakpoint / -b option. The option accepts an integer representing the line number at which to break. For convenience, 0 and -1 are shortcuts to the first and last line of the integration's check method, respectively. $ ddev env check envoy py38 -b 0 > /opt/datadog-agent/embedded/lib/python3.8/site-packages/datadog_checks/envoy/envoy.py(34)check() -> custom_tags = instance.get('tags', []) (Pdb) list 29 self.blacklisted_metrics = set() 30 31 self.caching_metrics = None 32 33 def check(self, instance): 34 B-> custom_tags = instance.get('tags', []) 35 36 try: 37 stats_url = instance['stats_url'] 38 except KeyError: 39 msg = 'Envoy configuration setting `stats_url` is required' (Pdb) print(instance) {'stats_url': 'http://localhost:8001/stats'} Caveat The line number must be within the integration's check method. Refreshing state \u00b6 Testing and manual check runs always reflect the current state of code and configuration however, if you want to see the result of changes in-app , you will need to refresh the environment by running ddev env reload . Removal \u00b6 To stop an environment run ddev env stop . Any environments that haven't been explicitly stopped will show as active in the output of ddev env ls , even persisting through system restarts. If you are confident that environments are no longer active, you can run ddev env prune to remove all accumulated environment state.","title":"E2E"},{"location":"e2e/#e2e","text":"Any integration that makes use of our pytest plugin in its test suite supports end-to-end testing on a live Datadog Agent . The entrypoint for E2E management is the command group ddev env .","title":"E2E"},{"location":"e2e/#discovery","text":"Use the ls command to see what environments are available, for example: $ ddev env ls envoy envoy: py27 py38 You'll notice that only environments that actually run tests are available. Running simply ddev env ls with no arguments will display the active environments.","title":"Discovery"},{"location":"e2e/#creation","text":"To start an environment run ddev env start , for example: $ ddev env start envoy py38 Setting up environment `py38`... success! Updating `datadog/agent-dev:master`... success! Detecting the major version... Agent 7 detected Writing configuration for `py38`... success! Starting the Agent... success! Config file (copied to your clipboard): C:\\Users\\ofek\\AppData\\Local\\dd-checks-dev\\envs\\envoy\\py38\\config\\envoy.yaml To run this check, do: ddev env check envoy py38 To stop this check, do: ddev env stop envoy py38 This sets up the selected environment and an instance of the Agent running in a Docker container. The default configuration is defined by each environment's test suite and is saved to a file, which is then mounted to the Agent container so you may freely modify it. Let's see what we have running: $ docker ps --format \"table {{.Image}}\\t{{.Status}}\\t{{.Ports}}\\t{{.Names}}\" IMAGE STATUS PORTS NAMES datadog/agent-dev:master-py3 Up 4 seconds (health: starting) dd_envoy_py38 default_service2 Up 5 seconds 80/tcp, 10000/tcp default_service2_1 envoyproxy/envoy:latest Up 5 seconds 0.0.0.0:8001->8001/tcp, 10000/tcp, 0.0.0.0:8000->80/tcp default_front-envoy_1 default_xds Up 5 seconds 8080/tcp default_xds_1 default_service1 Up 5 seconds 80/tcp, 10000/tcp default_service1_1","title":"Creation"},{"location":"e2e/#agent-version","text":"You can select a particular build of the Agent to use with the --agent / -a option. Any Docker image is valid e.g. datadog/agent:7.17.0 . A custom nightly build will be used by default, which is re-built on every commit to the Datadog Agent repository .","title":"Agent version"},{"location":"e2e/#integration-version","text":"By default the version of the integration used will be the one shipped with the chosen Agent version, as if you had passed in the --prod flag. If you wish to modify an integration and test changes in real time, use the --dev flag. Doing so will mount and install the integration in the Agent container. All modifications to the integration's directory will be propagated to the Agent, whether it be a code change or switching to a different Git branch. If you modify the base package then you will need to mount that with the --base flag, which implicitly activates --dev .","title":"Integration version"},{"location":"e2e/#testing","text":"To run tests against the live Agent, use the ddev env test command. It is similar to the test command except it is capable of running tests marked as E2E , and only runs such tests.","title":"Testing"},{"location":"e2e/#automation","text":"You can use the --new-env / -ne flag to automate environment management. For example running: ddev env test apache:py38 vault:py38 -ne will start the py38 environment for Apache, run E2E tests, tear down the environment, and then do the same for Vault. Tip Since running tests implies code changes are being introduced, --new-env enables --dev by default.","title":"Automation"},{"location":"e2e/#execution","text":"Similar to the Agent's check command, you can perform manual check runs using ddev env check , for example: $ ddev env check envoy py38 --log-level debug ... ========= Collector ========= Running Checks ============== envoy (1.12.0) -------------- Instance ID: envoy:c705bd922a3c275c [OK] Configuration Source: file:/etc/datadog-agent/conf.d/envoy.d/envoy.yaml Total Runs: 1 Metric Samples: Last Run: 546, Total: 546 Events: Last Run: 0, Total: 0 Service Checks: Last Run: 1, Total: 1 Average Execution Time : 25ms Last Execution Date : 2020-02-17 00:58:05.000000 UTC Last Successful Execution Date : 2020-02-17 00:58:05.000000 UTC","title":"Execution"},{"location":"e2e/#debugging","text":"You may start an interactive debugging session using the --breakpoint / -b option. The option accepts an integer representing the line number at which to break. For convenience, 0 and -1 are shortcuts to the first and last line of the integration's check method, respectively. $ ddev env check envoy py38 -b 0 > /opt/datadog-agent/embedded/lib/python3.8/site-packages/datadog_checks/envoy/envoy.py(34)check() -> custom_tags = instance.get('tags', []) (Pdb) list 29 self.blacklisted_metrics = set() 30 31 self.caching_metrics = None 32 33 def check(self, instance): 34 B-> custom_tags = instance.get('tags', []) 35 36 try: 37 stats_url = instance['stats_url'] 38 except KeyError: 39 msg = 'Envoy configuration setting `stats_url` is required' (Pdb) print(instance) {'stats_url': 'http://localhost:8001/stats'} Caveat The line number must be within the integration's check method.","title":"Debugging"},{"location":"e2e/#refreshing-state","text":"Testing and manual check runs always reflect the current state of code and configuration however, if you want to see the result of changes in-app , you will need to refresh the environment by running ddev env reload .","title":"Refreshing state"},{"location":"e2e/#removal","text":"To stop an environment run ddev env stop . Any environments that haven't been explicitly stopped will show as active in the output of ddev env ls , even persisting through system restarts. If you are confident that environments are no longer active, you can run ddev env prune to remove all accumulated environment state.","title":"Removal"},{"location":"setup/","text":"Setup \u00b6 This will be relatively painless, we promise! Integrations \u00b6 You will need to clone integrations-core and/or integrations-extras depending on which integrations you intend to work on. Python \u00b6 To work on any integration you must install Python 3.8+. After installation, restart your terminal and ensure that your newly installed Python comes first in your PATH . macOS We recommend using Homebrew . First update the formulae and Homebrew itself: brew update then either install Python: brew install python or upgrade it: brew upgrade python After it completes, check the output to see if it asked you to run any extra commands and if so, execute them. Verify successful PATH modification: which -a python Windows Windows users have it the easiest. Simply download the latest x86-64 executable installer and run it. When prompted, be sure to select the option to add to your PATH . Also, it is recommended that you choose the per-user installation method. Verify successful PATH modification: where python Linux Ah, you enjoy difficult things. Are you using Gentoo? We recommend using either Miniconda or pyenv . Whatever you do, never modify the system Python. Verify successful PATH modification: which -a python ddev \u00b6 Installation \u00b6 You have 2 options to install the CLI provided by the package datadog-checks-dev . Warning For either option, if you are on macOS/Linux do not use sudo ! Doing so will result in a broken installation. Development \u00b6 If you cloned integrations-core and want to always use the version based on the current branch, run: python -m pip install -e \"path/to/datadog_checks_dev[cli]\" Note Be aware that this method does not keep track of dependencies so you will need to re-run the command if/when the required dependencies are changed. Stable \u00b6 The latest released version may be installed from PyPI : python -m pip install --upgrade \"datadog-checks-dev[cli]\" Configuration \u00b6 Upon the first invocation, ddev will create its config file if it does not yet exist. You will need to set the location of each cloned repository: ddev config set /path/to/integrations- The may be either core or extras . By default, the repo core will be the target of all commands. If you want to switch to integrations-extras , run: ddev config set repo extras Docker \u00b6 Docker is used in nearly every integration's test suite therefore we simply require it to avoid confusion. macOS Install Docker Desktop for Mac . Right-click the Docker taskbar item and update Preferences > File Sharing with any locations you need to open. Windows Install Docker Desktop for Windows . Right-click the Docker taskbar item and update Settings > Shared Drives with any locations you need to open e.g. C:\\ . Linux Install Docker Engine for your distribution: Ubuntu Docker CE for Ubuntu Debian Docker CE for Debian Fedora Docker CE for Fedora CentOS Docker CE for CentOS Add your user to the docker group: sudo usermod -aG docker $USER Sign out and then back in again so your changes take effect. After installation, restart your terminal one last time.","title":"Setup"},{"location":"setup/#setup","text":"This will be relatively painless, we promise!","title":"Setup"},{"location":"setup/#integrations","text":"You will need to clone integrations-core and/or integrations-extras depending on which integrations you intend to work on.","title":"Integrations"},{"location":"setup/#python","text":"To work on any integration you must install Python 3.8+. After installation, restart your terminal and ensure that your newly installed Python comes first in your PATH . macOS We recommend using Homebrew . First update the formulae and Homebrew itself: brew update then either install Python: brew install python or upgrade it: brew upgrade python After it completes, check the output to see if it asked you to run any extra commands and if so, execute them. Verify successful PATH modification: which -a python Windows Windows users have it the easiest. Simply download the latest x86-64 executable installer and run it. When prompted, be sure to select the option to add to your PATH . Also, it is recommended that you choose the per-user installation method. Verify successful PATH modification: where python Linux Ah, you enjoy difficult things. Are you using Gentoo? We recommend using either Miniconda or pyenv . Whatever you do, never modify the system Python. Verify successful PATH modification: which -a python","title":"Python"},{"location":"setup/#ddev","text":"","title":"ddev"},{"location":"setup/#installation","text":"You have 2 options to install the CLI provided by the package datadog-checks-dev . Warning For either option, if you are on macOS/Linux do not use sudo ! Doing so will result in a broken installation.","title":"Installation"},{"location":"setup/#development","text":"If you cloned integrations-core and want to always use the version based on the current branch, run: python -m pip install -e \"path/to/datadog_checks_dev[cli]\" Note Be aware that this method does not keep track of dependencies so you will need to re-run the command if/when the required dependencies are changed.","title":"Development"},{"location":"setup/#stable","text":"The latest released version may be installed from PyPI : python -m pip install --upgrade \"datadog-checks-dev[cli]\"","title":"Stable"},{"location":"setup/#configuration","text":"Upon the first invocation, ddev will create its config file if it does not yet exist. You will need to set the location of each cloned repository: ddev config set /path/to/integrations- The may be either core or extras . By default, the repo core will be the target of all commands. If you want to switch to integrations-extras , run: ddev config set repo extras","title":"Configuration"},{"location":"setup/#docker","text":"Docker is used in nearly every integration's test suite therefore we simply require it to avoid confusion. macOS Install Docker Desktop for Mac . Right-click the Docker taskbar item and update Preferences > File Sharing with any locations you need to open. Windows Install Docker Desktop for Windows . Right-click the Docker taskbar item and update Settings > Shared Drives with any locations you need to open e.g. C:\\ . Linux Install Docker Engine for your distribution: Ubuntu Docker CE for Ubuntu Debian Docker CE for Debian Fedora Docker CE for Fedora CentOS Docker CE for CentOS Add your user to the docker group: sudo usermod -aG docker $USER Sign out and then back in again so your changes take effect. After installation, restart your terminal one last time.","title":"Docker"},{"location":"testing/","text":"Testing \u00b6 The entrypoint for testing any integration is the command ddev test , which accepts an arbitrary number of integrations as arguments. Under the hood, we use tox for environment management and pytest as our test framework. Discovery \u00b6 Use the --list / -l flag to see what environments are available, for example: $ ddev test postgres envoy -l postgres: py27-10 py27-11 py27-93 py27-94 py27-95 py27-96 py38-10 py38-11 py38-93 py38-94 py38-95 py38-96 format_style style envoy: py27 py38 bench format_style style You'll notice that all environments for running tests are prefixed with pyXY , indicating the Python version to use. If you don't have a particular version installed (for example Python 2.7), such environments will be skipped. The second part of a test environment's name corresponds to the version of the product. For example, the 11 in py38-11 implies tests will run against version 11.x of PostgreSQL. If there is no version suffix, it means that either: the version is pinned, usually set to pull the latest release, or there is no concept of a product, such as the disk check Usage \u00b6 Explicit \u00b6 Passing just the integration name will run every test environment e.g. executing ddev test envoy will run the environments py27 , py38 , and style . You may select a subset of environments to run by appending a : followed by a comma-separated list of environments. For example, executing: ddev test postgres:py38-11,style envoy:py38 will run, in order, the environments py38-11 and style for the PostgreSQL check and the environment py38 for the Envoy check. Detection \u00b6 If no integrations are specified then only integrations that were changed will be tested, based on a diff between the latest commit to the current and master branches. The criteria for an integration to be considered changed is based on the file extension of paths in the diff. So for example if only Markdown files were modified then nothing will be tested. The integrations will be tested in lexicographical order. Coverage \u00b6 To measure code coverage, use the --cov / -c flag. Doing so will display a summary of coverage statistics after successful execution of integrations' tests. $ ddev test tls -c ... ---------- Coverage report ---------- Name Stmts Miss Branch BrPart Cover ------------------------------------------------------------------- datadog_checks\\tls\\__about__.py 1 0 0 0 100% datadog_checks\\tls\\__init__.py 3 0 0 0 100% datadog_checks\\tls\\tls.py 185 4 50 2 97% datadog_checks\\tls\\utils.py 43 0 16 0 100% tests\\__init__.py 0 0 0 0 100% tests\\conftest.py 105 0 0 0 100% tests\\test_config.py 47 0 0 0 100% tests\\test_local.py 113 0 0 0 100% tests\\test_remote.py 189 0 2 0 100% tests\\test_utils.py 15 0 0 0 100% tests\\utils.py 36 0 2 0 100% ------------------------------------------------------------------- TOTAL 737 4 70 2 99% To also show any line numbers that were not hit, use the --cov-missing / -cm flag instead. $ ddev test tls -cm ... ---------- Coverage report ---------- Name Stmts Miss Branch BrPart Cover Missing ----------------------------------------------------------------------------- datadog_checks\\tls\\__about__.py 1 0 0 0 100% datadog_checks\\tls\\__init__.py 3 0 0 0 100% datadog_checks\\tls\\tls.py 185 4 50 2 97% 160-167, 288->275, 297->300, 300 datadog_checks\\tls\\utils.py 43 0 16 0 100% tests\\__init__.py 0 0 0 0 100% tests\\conftest.py 105 0 0 0 100% tests\\test_config.py 47 0 0 0 100% tests\\test_local.py 113 0 0 0 100% tests\\test_remote.py 189 0 2 0 100% tests\\test_utils.py 15 0 0 0 100% tests\\utils.py 36 0 2 0 100% ----------------------------------------------------------------------------- TOTAL 737 4 70 2 99% Style \u00b6 To run only the style checking environments, use the --style / -s shortcut flag. You may also only run the formatter environment using the --format-style / -fs shortcut flag. The formatter will automatically resolve the most common errors caught by the style checker. Advanced \u00b6 There are a number of shortcut options available that correspond to pytest options . --marker / -m ( pytest : -m ) - Only run tests matching a given marker expression e.g. ddev test elastic:py38-7.2 -m unit --filter / -k ( pytest : -k ) - Only run tests matching a given substring expression e.g. ddev test redisdb -k replication --debug / -d ( pytest : --log-level=debug -s ) - Set the log level to debug --pdb ( pytest : --pdb -x ) - Drop to PDB on first failure, then end test session --verbose / -v ( pytest : -v --tb=auto ) - Increase verbosity (can be used additively) and disables shortened tracebacks You may also pass arguments directly to pytest using the --pytest-args / -pa option. For example, you could re-write -d as -pa \"--log-level=debug -s\" .","title":"Testing"},{"location":"testing/#testing","text":"The entrypoint for testing any integration is the command ddev test , which accepts an arbitrary number of integrations as arguments. Under the hood, we use tox for environment management and pytest as our test framework.","title":"Testing"},{"location":"testing/#discovery","text":"Use the --list / -l flag to see what environments are available, for example: $ ddev test postgres envoy -l postgres: py27-10 py27-11 py27-93 py27-94 py27-95 py27-96 py38-10 py38-11 py38-93 py38-94 py38-95 py38-96 format_style style envoy: py27 py38 bench format_style style You'll notice that all environments for running tests are prefixed with pyXY , indicating the Python version to use. If you don't have a particular version installed (for example Python 2.7), such environments will be skipped. The second part of a test environment's name corresponds to the version of the product. For example, the 11 in py38-11 implies tests will run against version 11.x of PostgreSQL. If there is no version suffix, it means that either: the version is pinned, usually set to pull the latest release, or there is no concept of a product, such as the disk check","title":"Discovery"},{"location":"testing/#usage","text":"","title":"Usage"},{"location":"testing/#explicit","text":"Passing just the integration name will run every test environment e.g. executing ddev test envoy will run the environments py27 , py38 , and style . You may select a subset of environments to run by appending a : followed by a comma-separated list of environments. For example, executing: ddev test postgres:py38-11,style envoy:py38 will run, in order, the environments py38-11 and style for the PostgreSQL check and the environment py38 for the Envoy check.","title":"Explicit"},{"location":"testing/#detection","text":"If no integrations are specified then only integrations that were changed will be tested, based on a diff between the latest commit to the current and master branches. The criteria for an integration to be considered changed is based on the file extension of paths in the diff. So for example if only Markdown files were modified then nothing will be tested. The integrations will be tested in lexicographical order.","title":"Detection"},{"location":"testing/#coverage","text":"To measure code coverage, use the --cov / -c flag. Doing so will display a summary of coverage statistics after successful execution of integrations' tests. $ ddev test tls -c ... ---------- Coverage report ---------- Name Stmts Miss Branch BrPart Cover ------------------------------------------------------------------- datadog_checks\\tls\\__about__.py 1 0 0 0 100% datadog_checks\\tls\\__init__.py 3 0 0 0 100% datadog_checks\\tls\\tls.py 185 4 50 2 97% datadog_checks\\tls\\utils.py 43 0 16 0 100% tests\\__init__.py 0 0 0 0 100% tests\\conftest.py 105 0 0 0 100% tests\\test_config.py 47 0 0 0 100% tests\\test_local.py 113 0 0 0 100% tests\\test_remote.py 189 0 2 0 100% tests\\test_utils.py 15 0 0 0 100% tests\\utils.py 36 0 2 0 100% ------------------------------------------------------------------- TOTAL 737 4 70 2 99% To also show any line numbers that were not hit, use the --cov-missing / -cm flag instead. $ ddev test tls -cm ... ---------- Coverage report ---------- Name Stmts Miss Branch BrPart Cover Missing ----------------------------------------------------------------------------- datadog_checks\\tls\\__about__.py 1 0 0 0 100% datadog_checks\\tls\\__init__.py 3 0 0 0 100% datadog_checks\\tls\\tls.py 185 4 50 2 97% 160-167, 288->275, 297->300, 300 datadog_checks\\tls\\utils.py 43 0 16 0 100% tests\\__init__.py 0 0 0 0 100% tests\\conftest.py 105 0 0 0 100% tests\\test_config.py 47 0 0 0 100% tests\\test_local.py 113 0 0 0 100% tests\\test_remote.py 189 0 2 0 100% tests\\test_utils.py 15 0 0 0 100% tests\\utils.py 36 0 2 0 100% ----------------------------------------------------------------------------- TOTAL 737 4 70 2 99%","title":"Coverage"},{"location":"testing/#style","text":"To run only the style checking environments, use the --style / -s shortcut flag. You may also only run the formatter environment using the --format-style / -fs shortcut flag. The formatter will automatically resolve the most common errors caught by the style checker.","title":"Style"},{"location":"testing/#advanced","text":"There are a number of shortcut options available that correspond to pytest options . --marker / -m ( pytest : -m ) - Only run tests matching a given marker expression e.g. ddev test elastic:py38-7.2 -m unit --filter / -k ( pytest : -k ) - Only run tests matching a given substring expression e.g. ddev test redisdb -k replication --debug / -d ( pytest : --log-level=debug -s ) - Set the log level to debug --pdb ( pytest : --pdb -x ) - Drop to PDB on first failure, then end test session --verbose / -v ( pytest : -v --tb=auto ) - Increase verbosity (can be used additively) and disables shortened tracebacks You may also pass arguments directly to pytest using the --pytest-args / -pa option. For example, you could re-write -d as -pa \"--log-level=debug -s\" .","title":"Advanced"},{"location":"architecture/snmp/","text":"SNMP \u00b6 Note This section is meant for developers that want to understand the working of the SNMP integration. Be sure you are familiar with SNMP concepts , and you have read through the official SNMP integration docs . Overview \u00b6 While most integrations are either Python, JMX, or implemented in the Agent in Go, the SNMP integration is a bit more complex. Here's an overview of what this integration involves: A Python check , responsible for: Collecting metrics from a specific device IP. Metrics typically come from profiles , but they can also be specified explicitly . Auto-discovering devices over a network. (Pending deprecation in favor of Agent auto-discovery.) An Agent service listener , responsible for auto-discovering devices over a network and forwarding discovered instances to the existing Agent check scheduling pipeline. Also known as \"Agent SNMP auto-discovery\". The diagram below shows how these components interact for a typical VM-based setup (single Agent on a host). For Datadog Cluster Agent (DCA) deployments, see Cluster Agent Integration . Python Check \u00b6 Dependencies \u00b6 The Python check uses PySNMP to make SNMP queries and manipulate SNMP data (OIDs, variables, and MIBs). Device Monitoring \u00b6 The primary functionality of the Python check is to collect metrics from a given device given its IP address. As all Python checks, it supports multi-instances configuration, where each instance represents a device: instances : - ip_address : \"192.168.0.12\" # Python Auto-Discovery \u00b6 Approach \u00b6 The Python check includes a multithreaded implementation of device auto-discovery. It runs on instances that use network_address instead of ip_address : instances : - network_address : \"192.168.0.0/28\" # The main tasks performed by device auto-discovery are: Find new devices : For each IP in the network_address CIDR range, the check queries the device sysObjectID . If the query succeeds and the sysObjectID matches one of the registered profiles, the device is added as a discovered instance. This logic is run at regular intervals in a separate thread. Cache devices : To improve performance, discovered instances are cached on disk based on a hash of the instance. Since options from the network_address instance are copied into discovered instances, the cache is invalidated if the network_address changes. Check devices : On each check run, the check runs a check on all discovered instances. This is done in parallel using a threadpool. The check waits for all sub-checks to finish. Handle failures : Discovered instances that fail after a configured number of times are dropped. They may be rediscovered later. Submit discovery-related metrics : the check submits the total number of discovered devices for a given network_address instance. Caveats \u00b6 The approach described above is not ideal for several reasons: The check code is harder to understand since the two distinct paths (\"single device\" vs \"entire network\") live in a single integration. Each network instance manages several long-running threads that span well beyond the lifespan of a single check run. Each network check pseudo-schedules other instances, which is normally the responsibility of the Agent. For this reason, auto-discovery was eventually implemented in the Agent as a proper service listener (see below), and users should be discouraged from using Python auto-discovery. When the deprecation period expires, we will be able to remove auto-discovery logic from the Python check, making it exclusively focused on checking single devices. Agent Auto-Discovery \u00b6 Dependencies \u00b6 Agent auto-discovery uses GoSNMP to get the sysObjectID of devices in the network. Standalone Agent \u00b6 Agent auto-discovery implements the same logic than the Python auto-discovery, but as a service listener in the Agent Go package. This approach leverages the existing Agent scheduling logic, and makes it possible to scale device auto-discovery using the Datadog Cluster Agent (see Cluster Agent Integration ). Pending official documentation, here is an example configuration: # datadog.yaml listeners : - name : snmp snmp_listener : configs : - network : 10.0.0.0/28 version : 2 community : public - network : 10.0.1.0/30 version : 3 user : my-snmp-user authentication_protocol : SHA authentication_key : \"*****\" privacy_protocol : AES privacy_key : \"*****\" ignored_ip_addresses : - 10.0.1.0 - 10.0.1.1 Cluster Agent Support \u00b6 For Kubernetes environments, the Cluster Agent can be configured to use the SNMP Agent auto-discovery (via snmp listener) logic as a source of Cluster checks . The Datadog Cluster Agent (DCA) uses the snmp_listener config (Agent auto-discovery) to listen for IP ranges, then schedules snmp check instances to be run by one or more normal Datadog Agents. Agent auto-discovery combined with Cluster Agent is very scalable, it can be used to monitor a large number of snmp devices. Example Cluster Agent setup with SNMP Agent auto-discovery using Datadog helm-chart \u00b6 First you need to add Datadog Helm repository . ``` $ helm repo add datadog https://helm.datadoghq.com $ helm repo update ``` Then run: helm install datadog-monitoring --set datadog.apiKey = -f cluster-agent-values.yaml datadog/datadog Example cluster-agent-values.yaml datadog : ## @param apiKey - string - required ## Set this to your Datadog API key before the Agent runs. ## ref: https://app.datadoghq.com/account/settings#agent/kubernetes # apiKey : ## @param clusterName - string - optional ## Set a unique cluster name to allow scoping hosts and Cluster Checks easily ## The name must be unique and must be dot-separated tokens where a token can be up to 40 characters with the following restrictions: ## * Lowercase letters, numbers, and hyphens only. ## * Must start with a letter. ## * Must end with a number or a letter. ## Compared to the rules of GKE, dots are allowed whereas they are not allowed on GKE: ## https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#Cluster.FIELDS.name # clusterName : my-snmp-cluster ## @param clusterChecks - object - required ## Enable the Cluster Checks feature on both the cluster-agents and the daemonset ## ref: https://docs.datadoghq.com/agent/autodiscovery/clusterchecks/ ## Autodiscovery via Kube Service annotations is automatically enabled # clusterChecks : enabled : true ## @param tags - list of key:value elements - optional ## List of tags to attach to every metric, event and service check collected by this Agent. ## ## Learn more about tagging: https://docs.datadoghq.com/tagging/ # tags : - 'env:test-snmp-cluster-agent' ## @param clusterAgent - object - required ## This is the Datadog Cluster Agent implementation that handles cluster-wide ## metrics more cleanly, separates concerns for better rbac, and implements ## the external metrics API so you can autoscale HPAs based on datadog metrics ## ref: https://docs.datadoghq.com/agent/kubernetes/cluster/ # clusterAgent : ## @param enabled - boolean - required ## Set this to true to enable Datadog Cluster Agent # enabled : true ## @param confd - list of objects - optional ## Provide additional cluster check configurations ## Each key will become a file in /conf.d ## ref: https://docs.datadoghq.com/agent/autodiscovery/ # confd : # Static checks http_check.yaml : |- cluster_check: true instances: - name: 'Check Example Site1' url: http://example.net - name: 'Check Example Site2' url: http://example.net - name: 'Check Example Site3' url: http://example.net # Autodiscovery template needed for `snmp_listener` to create instance configs snmp.yaml : |- cluster_check: true # AD config below is copied from: https://github.com/DataDog/datadog-agent/blob/master/cmd/agent/dist/conf.d/snmp.d/auto_conf.yaml ad_identifiers: - snmp init_config: instances: - ## @param ip_address - string - optional ## The IP address of the device to monitor. # ip_address: \"%%host%%\" ## @param port - integer - optional - default: 161 ## Default SNMP port. # port: \"%%port%%\" ## @param snmp_version - integer - optional - default: 2 ## If you are using SNMP v1 set snmp_version to 1 (required) ## If you are using SNMP v3 set snmp_version to 3 (required) # snmp_version: \"%%extra_version%%\" ## @param timeout - integer - optional - default: 5 ## Amount of second before timing out. # timeout: \"%%extra_timeout%%\" ## @param retries - integer - optional - default: 5 ## Amount of retries before failure. # retries: \"%%extra_retries%%\" ## @param community_string - string - optional ## Only useful for SNMP v1 & v2. # community_string: \"%%extra_community%%\" ## @param user - string - optional ## USERNAME to connect to your SNMP devices. # user: \"%%extra_user%%\" ## @param authKey - string - optional ## Authentication key to use with your Authentication type. # authKey: \"%%extra_auth_key%%\" ## @param authProtocol - string - optional ## Authentication type to use when connecting to your SNMP devices. ## It can be one of: MD5, SHA, SHA224, SHA256, SHA384, SHA512. ## Default to MD5 when `authKey` is specified. # authProtocol: \"%%extra_auth_protocol%%\" ## @param privKey - string - optional ## Privacy type key to use with your Privacy type. # privKey: \"%%extra_priv_key%%\" ## @param privProtocol - string - optional ## Privacy type to use when connecting to your SNMP devices. ## It can be one of: DES, 3DES, AES, AES192, AES256, AES192C, AES256C. ## Default to DES when `privKey` is specified. # privProtocol: \"%%extra_priv_protocol%%\" ## @param context_engine_id - string - optional ## ID of your context engine; typically unneeded. ## (optional SNMP v3-only parameter) # context_engine_id: \"%%extra_context_engine_id%%\" ## @param context_name - string - optional ## Name of your context (optional SNMP v3-only parameter). # context_name: \"%%extra_context_name%%\" ## @param tags - list of key:value element - optional ## List of tags to attach to every metric, event and service check emitted by this integration. ## ## Learn more about tagging: https://docs.datadoghq.com/tagging/ # tags: # The autodiscovery subnet the device is part of. # Used by Agent autodiscovery to pass subnet name. - \"autodiscovery_subnet:%%extra_autodiscovery_subnet%%\" ## @param extra_tags - string - optional ## Comma separated tags to attach to every metric, event and service check emitted by this integration. ## Example: ## extra_tags: \"tag1:val1,tag2:val2\" # extra_tags: \"%%extra_tags%%\" ## @param oid_batch_size - integer - optional - default: 60 ## The number of OIDs handled by each batch. Increasing this number improves performance but ## uses more resources. # oid_batch_size: \"%%extra_oid_batch_size%%\" ## @param datadog-cluster.yaml - object - optional ## Specify custom contents for the datadog cluster agent config (datadog-cluster.yaml). # datadog_cluster_yaml : listeners : - name : snmp # See here for all `snmp_listener` configs: https://github.com/DataDog/datadog-agent/blob/master/pkg/config/config_template.yaml snmp_listener : workers : 2 discovery_interval : 10 configs : - network : 192.168.1.16/29 version : 2 port : 1161 community : cisco_icm - network : 192.168.1.16/29 version : 2 port : 1161 community : f5 TODO: architecture diagram, example setup, affected files and repos, local testing tools, etc.","title":"SNMP"},{"location":"architecture/snmp/#snmp","text":"Note This section is meant for developers that want to understand the working of the SNMP integration. Be sure you are familiar with SNMP concepts , and you have read through the official SNMP integration docs .","title":"SNMP"},{"location":"architecture/snmp/#overview","text":"While most integrations are either Python, JMX, or implemented in the Agent in Go, the SNMP integration is a bit more complex. Here's an overview of what this integration involves: A Python check , responsible for: Collecting metrics from a specific device IP. Metrics typically come from profiles , but they can also be specified explicitly . Auto-discovering devices over a network. (Pending deprecation in favor of Agent auto-discovery.) An Agent service listener , responsible for auto-discovering devices over a network and forwarding discovered instances to the existing Agent check scheduling pipeline. Also known as \"Agent SNMP auto-discovery\". The diagram below shows how these components interact for a typical VM-based setup (single Agent on a host). For Datadog Cluster Agent (DCA) deployments, see Cluster Agent Integration .","title":"Overview"},{"location":"architecture/snmp/#python-check","text":"","title":"Python Check"},{"location":"architecture/snmp/#dependencies","text":"The Python check uses PySNMP to make SNMP queries and manipulate SNMP data (OIDs, variables, and MIBs).","title":"Dependencies"},{"location":"architecture/snmp/#device-monitoring","text":"The primary functionality of the Python check is to collect metrics from a given device given its IP address. As all Python checks, it supports multi-instances configuration, where each instance represents a device: instances : - ip_address : \"192.168.0.12\" # ","title":"Device Monitoring"},{"location":"architecture/snmp/#python-auto-discovery","text":"","title":"Python Auto-Discovery"},{"location":"architecture/snmp/#approach","text":"The Python check includes a multithreaded implementation of device auto-discovery. It runs on instances that use network_address instead of ip_address : instances : - network_address : \"192.168.0.0/28\" # The main tasks performed by device auto-discovery are: Find new devices : For each IP in the network_address CIDR range, the check queries the device sysObjectID . If the query succeeds and the sysObjectID matches one of the registered profiles, the device is added as a discovered instance. This logic is run at regular intervals in a separate thread. Cache devices : To improve performance, discovered instances are cached on disk based on a hash of the instance. Since options from the network_address instance are copied into discovered instances, the cache is invalidated if the network_address changes. Check devices : On each check run, the check runs a check on all discovered instances. This is done in parallel using a threadpool. The check waits for all sub-checks to finish. Handle failures : Discovered instances that fail after a configured number of times are dropped. They may be rediscovered later. Submit discovery-related metrics : the check submits the total number of discovered devices for a given network_address instance.","title":"Approach"},{"location":"architecture/snmp/#caveats","text":"The approach described above is not ideal for several reasons: The check code is harder to understand since the two distinct paths (\"single device\" vs \"entire network\") live in a single integration. Each network instance manages several long-running threads that span well beyond the lifespan of a single check run. Each network check pseudo-schedules other instances, which is normally the responsibility of the Agent. For this reason, auto-discovery was eventually implemented in the Agent as a proper service listener (see below), and users should be discouraged from using Python auto-discovery. When the deprecation period expires, we will be able to remove auto-discovery logic from the Python check, making it exclusively focused on checking single devices.","title":"Caveats"},{"location":"architecture/snmp/#agent-auto-discovery","text":"","title":"Agent Auto-Discovery"},{"location":"architecture/snmp/#dependencies_1","text":"Agent auto-discovery uses GoSNMP to get the sysObjectID of devices in the network.","title":"Dependencies"},{"location":"architecture/snmp/#standalone-agent","text":"Agent auto-discovery implements the same logic than the Python auto-discovery, but as a service listener in the Agent Go package. This approach leverages the existing Agent scheduling logic, and makes it possible to scale device auto-discovery using the Datadog Cluster Agent (see Cluster Agent Integration ). Pending official documentation, here is an example configuration: # datadog.yaml listeners : - name : snmp snmp_listener : configs : - network : 10.0.0.0/28 version : 2 community : public - network : 10.0.1.0/30 version : 3 user : my-snmp-user authentication_protocol : SHA authentication_key : \"*****\" privacy_protocol : AES privacy_key : \"*****\" ignored_ip_addresses : - 10.0.1.0 - 10.0.1.1","title":"Standalone Agent"},{"location":"architecture/snmp/#cluster-agent-support","text":"For Kubernetes environments, the Cluster Agent can be configured to use the SNMP Agent auto-discovery (via snmp listener) logic as a source of Cluster checks . The Datadog Cluster Agent (DCA) uses the snmp_listener config (Agent auto-discovery) to listen for IP ranges, then schedules snmp check instances to be run by one or more normal Datadog Agents. Agent auto-discovery combined with Cluster Agent is very scalable, it can be used to monitor a large number of snmp devices.","title":"Cluster Agent Support"},{"location":"architecture/snmp/#example-cluster-agent-setup-with-snmp-agent-auto-discovery-using-datadog-helm-chart","text":"First you need to add Datadog Helm repository . ``` $ helm repo add datadog https://helm.datadoghq.com $ helm repo update ``` Then run: helm install datadog-monitoring --set datadog.apiKey = -f cluster-agent-values.yaml datadog/datadog Example cluster-agent-values.yaml datadog : ## @param apiKey - string - required ## Set this to your Datadog API key before the Agent runs. ## ref: https://app.datadoghq.com/account/settings#agent/kubernetes # apiKey : ## @param clusterName - string - optional ## Set a unique cluster name to allow scoping hosts and Cluster Checks easily ## The name must be unique and must be dot-separated tokens where a token can be up to 40 characters with the following restrictions: ## * Lowercase letters, numbers, and hyphens only. ## * Must start with a letter. ## * Must end with a number or a letter. ## Compared to the rules of GKE, dots are allowed whereas they are not allowed on GKE: ## https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#Cluster.FIELDS.name # clusterName : my-snmp-cluster ## @param clusterChecks - object - required ## Enable the Cluster Checks feature on both the cluster-agents and the daemonset ## ref: https://docs.datadoghq.com/agent/autodiscovery/clusterchecks/ ## Autodiscovery via Kube Service annotations is automatically enabled # clusterChecks : enabled : true ## @param tags - list of key:value elements - optional ## List of tags to attach to every metric, event and service check collected by this Agent. ## ## Learn more about tagging: https://docs.datadoghq.com/tagging/ # tags : - 'env:test-snmp-cluster-agent' ## @param clusterAgent - object - required ## This is the Datadog Cluster Agent implementation that handles cluster-wide ## metrics more cleanly, separates concerns for better rbac, and implements ## the external metrics API so you can autoscale HPAs based on datadog metrics ## ref: https://docs.datadoghq.com/agent/kubernetes/cluster/ # clusterAgent : ## @param enabled - boolean - required ## Set this to true to enable Datadog Cluster Agent # enabled : true ## @param confd - list of objects - optional ## Provide additional cluster check configurations ## Each key will become a file in /conf.d ## ref: https://docs.datadoghq.com/agent/autodiscovery/ # confd : # Static checks http_check.yaml : |- cluster_check: true instances: - name: 'Check Example Site1' url: http://example.net - name: 'Check Example Site2' url: http://example.net - name: 'Check Example Site3' url: http://example.net # Autodiscovery template needed for `snmp_listener` to create instance configs snmp.yaml : |- cluster_check: true # AD config below is copied from: https://github.com/DataDog/datadog-agent/blob/master/cmd/agent/dist/conf.d/snmp.d/auto_conf.yaml ad_identifiers: - snmp init_config: instances: - ## @param ip_address - string - optional ## The IP address of the device to monitor. # ip_address: \"%%host%%\" ## @param port - integer - optional - default: 161 ## Default SNMP port. # port: \"%%port%%\" ## @param snmp_version - integer - optional - default: 2 ## If you are using SNMP v1 set snmp_version to 1 (required) ## If you are using SNMP v3 set snmp_version to 3 (required) # snmp_version: \"%%extra_version%%\" ## @param timeout - integer - optional - default: 5 ## Amount of second before timing out. # timeout: \"%%extra_timeout%%\" ## @param retries - integer - optional - default: 5 ## Amount of retries before failure. # retries: \"%%extra_retries%%\" ## @param community_string - string - optional ## Only useful for SNMP v1 & v2. # community_string: \"%%extra_community%%\" ## @param user - string - optional ## USERNAME to connect to your SNMP devices. # user: \"%%extra_user%%\" ## @param authKey - string - optional ## Authentication key to use with your Authentication type. # authKey: \"%%extra_auth_key%%\" ## @param authProtocol - string - optional ## Authentication type to use when connecting to your SNMP devices. ## It can be one of: MD5, SHA, SHA224, SHA256, SHA384, SHA512. ## Default to MD5 when `authKey` is specified. # authProtocol: \"%%extra_auth_protocol%%\" ## @param privKey - string - optional ## Privacy type key to use with your Privacy type. # privKey: \"%%extra_priv_key%%\" ## @param privProtocol - string - optional ## Privacy type to use when connecting to your SNMP devices. ## It can be one of: DES, 3DES, AES, AES192, AES256, AES192C, AES256C. ## Default to DES when `privKey` is specified. # privProtocol: \"%%extra_priv_protocol%%\" ## @param context_engine_id - string - optional ## ID of your context engine; typically unneeded. ## (optional SNMP v3-only parameter) # context_engine_id: \"%%extra_context_engine_id%%\" ## @param context_name - string - optional ## Name of your context (optional SNMP v3-only parameter). # context_name: \"%%extra_context_name%%\" ## @param tags - list of key:value element - optional ## List of tags to attach to every metric, event and service check emitted by this integration. ## ## Learn more about tagging: https://docs.datadoghq.com/tagging/ # tags: # The autodiscovery subnet the device is part of. # Used by Agent autodiscovery to pass subnet name. - \"autodiscovery_subnet:%%extra_autodiscovery_subnet%%\" ## @param extra_tags - string - optional ## Comma separated tags to attach to every metric, event and service check emitted by this integration. ## Example: ## extra_tags: \"tag1:val1,tag2:val2\" # extra_tags: \"%%extra_tags%%\" ## @param oid_batch_size - integer - optional - default: 60 ## The number of OIDs handled by each batch. Increasing this number improves performance but ## uses more resources. # oid_batch_size: \"%%extra_oid_batch_size%%\" ## @param datadog-cluster.yaml - object - optional ## Specify custom contents for the datadog cluster agent config (datadog-cluster.yaml). # datadog_cluster_yaml : listeners : - name : snmp # See here for all `snmp_listener` configs: https://github.com/DataDog/datadog-agent/blob/master/pkg/config/config_template.yaml snmp_listener : workers : 2 discovery_interval : 10 configs : - network : 192.168.1.16/29 version : 2 port : 1161 community : cisco_icm - network : 192.168.1.16/29 version : 2 port : 1161 community : f5 TODO: architecture diagram, example setup, affected files and repos, local testing tools, etc.","title":"Example Cluster Agent setup with SNMP Agent auto-discovery using Datadog helm-chart"},{"location":"architecture/vsphere/","text":"vSphere \u00b6 High-Level information \u00b6 Product overview \u00b6 vSphere is a VMware product dedicated to managing a (usually) on-premise infrastructure. From physical machines running VMware ESXi that are called ESXi Hosts, users can spin up or migrate Virtual Machines from one host to another. vSphere is an integrated solution and provides an easy managing interface over concepts like data storage, or computing resource. Terminology \u00b6 This section details some of vSphere specific elements. This section does not intend to be an extensive list, but rather a place for those unfamiliar with the product to have the basics required to understand how the Datadog integration works. vSphere - The complete suite of tools and technologies detailed in this article. vCenter server - The main machine which controls ESXi hosts and provides both a web UI and an API to control the vSphere environment. vCSA (vCenter Server Appliance) - A specific kind of vCenter where the software runs in a dedicated Linux machine (more recent). By opposition, the legacy vCenter is typically installed on an existing Windows machine. ESXi host - The physical machine controlled by vCenter where the ESXi (bare-metal) virtualizer is installed. The host boots a minimal OS that can run Virtual Machines. VM - What anyone using vSphere really needs in the end, instances that can run applications and code. Note: Datadog monitors both ESXi hosts and VMs and it calls them both \"host\" (they are in the host map). Attributes/tags - It is possible to add attributes and tags to any vSphere resource, note that those two are now very similar with \"attributes\" being the deprecated thing to use. Datacenter - A set of resources grouped together. A single vCenter server can handle multiple datacenters. Datastore - A virtual vSphere concept to represent data storing capabilities. It can be an NFS server that ESXi hosts have read/write access to, it can be a mounted disk on the host and more. Datastores are often shared between multiple hosts. This allows Virtual Machines to be migrated from one host to another. Cluster - A logical grouping of computational resources, you can add multiple ESXi hosts in your cluster and then you can create VM in the cluster (and not on a specific host, vSphere will take care of placing your VM in one of the ESXi hosts and migrating it when needed). Photon OS - An open-source minimal Linux distribution and used by both ESXi and vCSA as a base. The integration \u00b6 Setup \u00b6 The Datadog vSphere integration runs from a single agent and pulls all the information from a single vCenter endpoint. Because the agent cannot run directly on Photon OS, it is usually required that the agent runs within a dedicated VM inside the vSphere infrastructure. Once the agent is running, the minimal configuration (as of version 5.x) is as follows: init_config : instances : - host : username : password : use_legacy_check_version : false empty_default_hostname : true host is the endpoint used to access the vSphere Client from a web browser. The host is either a FQDN or an IP, not an http url. username and password are the credentials to log in to vCenter. use_legacy_check_version is a backward compatibility flag. It should always be set to false and this flag will be removed in a future version of the integration. Setting it to true tells the agent to use an older and deprecated version of the vSphere integration. empty_default_hostname is a field used by the agent directly (and not the integration). By default, the agent does not allow submitting metrics without attaching an explicit host tag unless this flag is set to true. The vSphere integration uses that behavior for some metrics and service checks. For example, the vsphere.vm.count metric which gives a count of the VMs in the infra is not submitted with a host tag. This is particularly important if the agent runs inside a vSphere VM. If the vsphere.vm.count was submitted with a host tag, the Datadog backend would attach all the other host tags to the metric, for example vsphere_type:vm or vsphere_host: which makes the metric almost impossible to use. Concepts \u00b6 Collection level \u00b6 vSphere metrics are documented in their documentation page an each metric has a defined \"collection level\". That level determines the amount of data gathered by the integration and especially which metrics are available. More details here . By default, only the level 1 metrics are collected but this can be increased in the integration configuration file. Realtime vs historical \u00b6 Each ESXi host collects and stores data for each metric on himself and every VM it hosts every 20 seconds. Those data points are stored for up to one hour and are called realtime. Note: Each metric concerns always either a VM or an ESXi hosts. Metrics that concern datastore for example are not collected in the ESXi hosts. Additionally, the vCenter server collects data from all the ESXi hosts and stores the datapoint with some aggregation rollup into its own database. Those data points are called \"historical\". Finally, the vCenter server also collects metrics for other kinds of resources (like Datastore, ClusterComputeResource, Datacenter...) Those data points are necessarily \"historical\". The reason for such an important distinction is that historical metrics are much MUCH slower to collect than realtime metrics. The vSphere integration will always collect the \"realtime\" data for metrics that concern ESXi hosts and VMs. But the integration also collects metrics for Datastores, ClusterComputeResources, Datacenters, and maybe others in the future. That's why, in the context of the Datadog vSphere integration, we usually simplify by considering that: VMs and ESXi hosts are \"realtime resources\". Metrics for such resources are quick and easy to get by querying vCenter that will in turn query all the ESXi hosts. Datastores, ClusterComputeResources, and Datacenters are \"historical resources\" and are much slower to collect. To collect all metrics (realtime and historical), it is advised to use two \"check instances\". One with collection_type: realtime and one with collection_type: historical . This way all metrics will be collected but because both check instances are on different schedules, the slowness of collecting historical metrics won't affect the rate at which realtime metrics are collected. vSphere tags and attributes \u00b6 Similarly to how Datadog allows you to add tags to your different hosts (thins like the os or the instance-type of your machines), vSphere has \"tags\" and \"attributes\". A lot of details can be found here: https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vcenterhost.doc/GUID-E8E854DD-AA97-4E0C-8419-CE84F93C4058.html#:~:text=Tags%20and%20attributes%20allow%20you,that%20tag%20to%20a%20category. But the overall idea is that both tags and attributes are additional information that you can attach to your vSphere resources and that \"tags\" are newer and more featureful than \"attributes\". Filtering \u00b6 A very flexible filtering system has been implemented with the vSphere integration. This allows fine-tuned configuration so that: You only pay for the host and VMs you really want to monitor. You reduce the load on your vCenter server by running just the queries that you need. You improve the check runtime which otherwise increases linearly with the size of their infrastructure and that was seen to take up to 10min in some large environments. We provide two types of filtering, one based on metrics, the other based on resources. The metric filter is fairly simple, for each resource type, you can provide some regexes. If a metric match any of the filter, it will be fetched and submitted. The configuration looks like this: metric_filters : vm : - cpu\\..* - mem\\..* host : - WHATEVER # Excludes everything datacenter : - .* The resource filter on the other hand, allows to exclude some vSphere resources (VM, ESXi host, etc.), based on an \"attribute\" of that resource. The possible attributes as of today are: - name , literally the name of the resource (as defined in vCenter) - inventory_path , a path-like string that represents the location of the resource in the inventory tree as each resource only ever has a single parent and recursively up to the root. For example: /my.datacenter.local/vm/staging/myservice/vm_name - tag , see the tags and attributes section. Used to filter resources based on the attached tags. - attribute , see the tags and attributes section. Used to filter resources based on the attached attributes. - hostname (only for VMs), the name of the ESXi host where the VM is running. - guest_hostname (only for VMs), the name of the OS as reported from within the machine. VMware tools have to be installed on the VM otherwise, vCenter is not able to fetch this information. A possible filtering configuration would look like this: resource_filters : - resource : vm property : name patterns : - - - resource : vm property : hostname patterns : - - resource : vm property : tag type : blacklist patterns : - '^env:staging$' - resource : vm property : tag type : whitelist # type defaults to whitelist patterns : - '^env:.*$' - resource : vm property : guest_hostname patterns : - - resource : host property : inventory_path patterns : - Instance tag \u00b6 In vSphere each metric is defined by three \"dimensions\". The resource on which the metric applies (for example the VM called \"abc1\") The name of the metric (for example cpu.usage). An additional available dimension that varies between metrics. (for example the cpu core id) This is similar to how Datadog represent metrics, except that the context cardinality is limited to two \"keys\", the name of the resource (usually the \"host\" tag), and there is space for one additional tag key. This available tag key is defined as the \"instance\" property, or \"instance tag\" in vSphere, and this dimension is not collected by default by the Datadog integration as it can have too big performance implications in large systems when compared to their added value from a monitoring perspective. Also when fetching metrics with the instance tag, vSphere only provides the value of the instance tag, it doesn't expose a human-readable \"key\" for that tag. In the cpu.usage metric with the core_id as the instance tag, the integration has to \"know\" that the meaning of the instance tag and that's why we rely on a hardcoded list in the integration. Because this instance tag can provide additional visibility, it is possible to enable it for some metrics from the configuration. For example, if we're really interested in getting the usage of the cpu per core, the setup can look like this: collect_per_instance_filters : vm : - cpu\\.usage\\..*","title":"vSphere"},{"location":"architecture/vsphere/#vsphere","text":"","title":"vSphere"},{"location":"architecture/vsphere/#high-level-information","text":"","title":"High-Level information"},{"location":"architecture/vsphere/#product-overview","text":"vSphere is a VMware product dedicated to managing a (usually) on-premise infrastructure. From physical machines running VMware ESXi that are called ESXi Hosts, users can spin up or migrate Virtual Machines from one host to another. vSphere is an integrated solution and provides an easy managing interface over concepts like data storage, or computing resource.","title":"Product overview"},{"location":"architecture/vsphere/#terminology","text":"This section details some of vSphere specific elements. This section does not intend to be an extensive list, but rather a place for those unfamiliar with the product to have the basics required to understand how the Datadog integration works. vSphere - The complete suite of tools and technologies detailed in this article. vCenter server - The main machine which controls ESXi hosts and provides both a web UI and an API to control the vSphere environment. vCSA (vCenter Server Appliance) - A specific kind of vCenter where the software runs in a dedicated Linux machine (more recent). By opposition, the legacy vCenter is typically installed on an existing Windows machine. ESXi host - The physical machine controlled by vCenter where the ESXi (bare-metal) virtualizer is installed. The host boots a minimal OS that can run Virtual Machines. VM - What anyone using vSphere really needs in the end, instances that can run applications and code. Note: Datadog monitors both ESXi hosts and VMs and it calls them both \"host\" (they are in the host map). Attributes/tags - It is possible to add attributes and tags to any vSphere resource, note that those two are now very similar with \"attributes\" being the deprecated thing to use. Datacenter - A set of resources grouped together. A single vCenter server can handle multiple datacenters. Datastore - A virtual vSphere concept to represent data storing capabilities. It can be an NFS server that ESXi hosts have read/write access to, it can be a mounted disk on the host and more. Datastores are often shared between multiple hosts. This allows Virtual Machines to be migrated from one host to another. Cluster - A logical grouping of computational resources, you can add multiple ESXi hosts in your cluster and then you can create VM in the cluster (and not on a specific host, vSphere will take care of placing your VM in one of the ESXi hosts and migrating it when needed). Photon OS - An open-source minimal Linux distribution and used by both ESXi and vCSA as a base.","title":"Terminology"},{"location":"architecture/vsphere/#the-integration","text":"","title":"The integration"},{"location":"architecture/vsphere/#setup","text":"The Datadog vSphere integration runs from a single agent and pulls all the information from a single vCenter endpoint. Because the agent cannot run directly on Photon OS, it is usually required that the agent runs within a dedicated VM inside the vSphere infrastructure. Once the agent is running, the minimal configuration (as of version 5.x) is as follows: init_config : instances : - host : username : password : use_legacy_check_version : false empty_default_hostname : true host is the endpoint used to access the vSphere Client from a web browser. The host is either a FQDN or an IP, not an http url. username and password are the credentials to log in to vCenter. use_legacy_check_version is a backward compatibility flag. It should always be set to false and this flag will be removed in a future version of the integration. Setting it to true tells the agent to use an older and deprecated version of the vSphere integration. empty_default_hostname is a field used by the agent directly (and not the integration). By default, the agent does not allow submitting metrics without attaching an explicit host tag unless this flag is set to true. The vSphere integration uses that behavior for some metrics and service checks. For example, the vsphere.vm.count metric which gives a count of the VMs in the infra is not submitted with a host tag. This is particularly important if the agent runs inside a vSphere VM. If the vsphere.vm.count was submitted with a host tag, the Datadog backend would attach all the other host tags to the metric, for example vsphere_type:vm or vsphere_host: which makes the metric almost impossible to use.","title":"Setup"},{"location":"architecture/vsphere/#concepts","text":"","title":"Concepts"},{"location":"architecture/vsphere/#collection-level","text":"vSphere metrics are documented in their documentation page an each metric has a defined \"collection level\". That level determines the amount of data gathered by the integration and especially which metrics are available. More details here . By default, only the level 1 metrics are collected but this can be increased in the integration configuration file.","title":"Collection level"},{"location":"architecture/vsphere/#realtime-vs-historical","text":"Each ESXi host collects and stores data for each metric on himself and every VM it hosts every 20 seconds. Those data points are stored for up to one hour and are called realtime. Note: Each metric concerns always either a VM or an ESXi hosts. Metrics that concern datastore for example are not collected in the ESXi hosts. Additionally, the vCenter server collects data from all the ESXi hosts and stores the datapoint with some aggregation rollup into its own database. Those data points are called \"historical\". Finally, the vCenter server also collects metrics for other kinds of resources (like Datastore, ClusterComputeResource, Datacenter...) Those data points are necessarily \"historical\". The reason for such an important distinction is that historical metrics are much MUCH slower to collect than realtime metrics. The vSphere integration will always collect the \"realtime\" data for metrics that concern ESXi hosts and VMs. But the integration also collects metrics for Datastores, ClusterComputeResources, Datacenters, and maybe others in the future. That's why, in the context of the Datadog vSphere integration, we usually simplify by considering that: VMs and ESXi hosts are \"realtime resources\". Metrics for such resources are quick and easy to get by querying vCenter that will in turn query all the ESXi hosts. Datastores, ClusterComputeResources, and Datacenters are \"historical resources\" and are much slower to collect. To collect all metrics (realtime and historical), it is advised to use two \"check instances\". One with collection_type: realtime and one with collection_type: historical . This way all metrics will be collected but because both check instances are on different schedules, the slowness of collecting historical metrics won't affect the rate at which realtime metrics are collected.","title":"Realtime vs historical"},{"location":"architecture/vsphere/#vsphere-tags-and-attributes","text":"Similarly to how Datadog allows you to add tags to your different hosts (thins like the os or the instance-type of your machines), vSphere has \"tags\" and \"attributes\". A lot of details can be found here: https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vcenterhost.doc/GUID-E8E854DD-AA97-4E0C-8419-CE84F93C4058.html#:~:text=Tags%20and%20attributes%20allow%20you,that%20tag%20to%20a%20category. But the overall idea is that both tags and attributes are additional information that you can attach to your vSphere resources and that \"tags\" are newer and more featureful than \"attributes\".","title":"vSphere tags and attributes"},{"location":"architecture/vsphere/#filtering","text":"A very flexible filtering system has been implemented with the vSphere integration. This allows fine-tuned configuration so that: You only pay for the host and VMs you really want to monitor. You reduce the load on your vCenter server by running just the queries that you need. You improve the check runtime which otherwise increases linearly with the size of their infrastructure and that was seen to take up to 10min in some large environments. We provide two types of filtering, one based on metrics, the other based on resources. The metric filter is fairly simple, for each resource type, you can provide some regexes. If a metric match any of the filter, it will be fetched and submitted. The configuration looks like this: metric_filters : vm : - cpu\\..* - mem\\..* host : - WHATEVER # Excludes everything datacenter : - .* The resource filter on the other hand, allows to exclude some vSphere resources (VM, ESXi host, etc.), based on an \"attribute\" of that resource. The possible attributes as of today are: - name , literally the name of the resource (as defined in vCenter) - inventory_path , a path-like string that represents the location of the resource in the inventory tree as each resource only ever has a single parent and recursively up to the root. For example: /my.datacenter.local/vm/staging/myservice/vm_name - tag , see the tags and attributes section. Used to filter resources based on the attached tags. - attribute , see the tags and attributes section. Used to filter resources based on the attached attributes. - hostname (only for VMs), the name of the ESXi host where the VM is running. - guest_hostname (only for VMs), the name of the OS as reported from within the machine. VMware tools have to be installed on the VM otherwise, vCenter is not able to fetch this information. A possible filtering configuration would look like this: resource_filters : - resource : vm property : name patterns : - - - resource : vm property : hostname patterns : - - resource : vm property : tag type : blacklist patterns : - '^env:staging$' - resource : vm property : tag type : whitelist # type defaults to whitelist patterns : - '^env:.*$' - resource : vm property : guest_hostname patterns : - - resource : host property : inventory_path patterns : - ","title":"Filtering"},{"location":"architecture/vsphere/#instance-tag","text":"In vSphere each metric is defined by three \"dimensions\". The resource on which the metric applies (for example the VM called \"abc1\") The name of the metric (for example cpu.usage). An additional available dimension that varies between metrics. (for example the cpu core id) This is similar to how Datadog represent metrics, except that the context cardinality is limited to two \"keys\", the name of the resource (usually the \"host\" tag), and there is space for one additional tag key. This available tag key is defined as the \"instance\" property, or \"instance tag\" in vSphere, and this dimension is not collected by default by the Datadog integration as it can have too big performance implications in large systems when compared to their added value from a monitoring perspective. Also when fetching metrics with the instance tag, vSphere only provides the value of the instance tag, it doesn't expose a human-readable \"key\" for that tag. In the cpu.usage metric with the core_id as the instance tag, the integration has to \"know\" that the meaning of the instance tag and that's why we rely on a hardcoded list in the integration. Because this instance tag can provide additional visibility, it is possible to enable it for some metrics from the configuration. For example, if we're really interested in getting the usage of the cpu per core, the setup can look like this: collect_per_instance_filters : vm : - cpu\\.usage\\..*","title":"Instance tag"},{"location":"architecture/win32_event_log/","text":"Windows Event Log \u00b6 Overview \u00b6 Users set a path with which to collect events from. It can be the name of a channel (like System , Application , etc.) or the full path to a log file. There are 3 ways to select filter criteria rather than collecting all events: query - A raw XPath or structured XML query used to filter events. This overrides any selected filters . filters - A mapping of properties to allowed values. Every filter (equivalent to the and operator) must match any value (equivalent to the or operator). This option is a convenience for a query that is relatively basic. Rather than collect all events and perform filtering within the check, the filters are converted to an XPath expression. This approach offloads all filtering to the kernel (like query ), which increases performance and reduces bandwidth usage when connecting to a remote machine. included_messages / excluded_messages - These are regular expression patterns used to filter by events' messages specifically (if a message is found), with the exclude list taking precedence. These may be used in place of or with query / filters , as there exists no query construct by which to select a message attribute. A pull subscription model is used. At every check run, the cached event log handle waits to be signaled for a configurable number of seconds. If signaled, the check then polls all available events in batches of a configurable size. At configurable intervals, the most recently encountered event is saved to the filesystem. This is useful for preventing duplicate events being sent as a consequence of Agent restarts, especially when the start option is set to oldest . Logs \u00b6 Events may alternatively be configured to be submitted as logs. The code for that resides here . Only a subset of the check's functionality is available. Namely, each log configuration will collect all events of the given channel without filtering, tagging, nor remote connection options. This implementation uses the push subscription model . There is a bit of C in charge of rendering the relevant data and registering the Go tailer callback that ultimately sends the log to the backend. Legacy mode \u00b6 Setting legacy_mode to true in the check will use WMI to collect events, which is significantly more resource intensive. This mode has entirely different configuration options and will be removed in a future release. Agent 6 can only use this mode as Python 2 does not support the new implementation.","title":"Windows Event Log"},{"location":"architecture/win32_event_log/#windows-event-log","text":"","title":"Windows Event Log"},{"location":"architecture/win32_event_log/#overview","text":"Users set a path with which to collect events from. It can be the name of a channel (like System , Application , etc.) or the full path to a log file. There are 3 ways to select filter criteria rather than collecting all events: query - A raw XPath or structured XML query used to filter events. This overrides any selected filters . filters - A mapping of properties to allowed values. Every filter (equivalent to the and operator) must match any value (equivalent to the or operator). This option is a convenience for a query that is relatively basic. Rather than collect all events and perform filtering within the check, the filters are converted to an XPath expression. This approach offloads all filtering to the kernel (like query ), which increases performance and reduces bandwidth usage when connecting to a remote machine. included_messages / excluded_messages - These are regular expression patterns used to filter by events' messages specifically (if a message is found), with the exclude list taking precedence. These may be used in place of or with query / filters , as there exists no query construct by which to select a message attribute. A pull subscription model is used. At every check run, the cached event log handle waits to be signaled for a configurable number of seconds. If signaled, the check then polls all available events in batches of a configurable size. At configurable intervals, the most recently encountered event is saved to the filesystem. This is useful for preventing duplicate events being sent as a consequence of Agent restarts, especially when the start option is set to oldest .","title":"Overview"},{"location":"architecture/win32_event_log/#logs","text":"Events may alternatively be configured to be submitted as logs. The code for that resides here . Only a subset of the check's functionality is available. Namely, each log configuration will collect all events of the given channel without filtering, tagging, nor remote connection options. This implementation uses the push subscription model . There is a bit of C in charge of rendering the relevant data and registering the Go tailer callback that ultimately sends the log to the backend.","title":"Logs"},{"location":"architecture/win32_event_log/#legacy-mode","text":"Setting legacy_mode to true in the check will use WMI to collect events, which is significantly more resource intensive. This mode has entirely different configuration options and will be removed in a future release. Agent 6 can only use this mode as Python 2 does not support the new implementation.","title":"Legacy mode"},{"location":"base/about/","text":"About \u00b6 The Base package provides all the functionality and utilities necessary for writing Agent Integrations. Most importantly it provides the AgentCheck base class from which every Check must be inherited. You would use it like so: from datadog_checks.base import AgentCheck class AwesomeCheck ( AgentCheck ): __NAMESPACE__ = 'awesome' def check ( self , instance ): self . gauge ( 'test' , 1.23 , tags = [ 'foo:bar' ]) The check method is what the Datadog Agent will execute. In this example we created a Check and gave it a namespace of awesome . This means that by default, every submission's name will be prefixed with awesome. . We submitted a gauge metric named awesome.test with a value of 1.23 tagged by foo:bar . The magic hidden by the usability of the API is that this actually calls a C binding which communicates with the Agent (written in Go).","title":"About"},{"location":"base/about/#about","text":"The Base package provides all the functionality and utilities necessary for writing Agent Integrations. Most importantly it provides the AgentCheck base class from which every Check must be inherited. You would use it like so: from datadog_checks.base import AgentCheck class AwesomeCheck ( AgentCheck ): __NAMESPACE__ = 'awesome' def check ( self , instance ): self . gauge ( 'test' , 1.23 , tags = [ 'foo:bar' ]) The check method is what the Datadog Agent will execute. In this example we created a Check and gave it a namespace of awesome . This means that by default, every submission's name will be prefixed with awesome. . We submitted a gauge metric named awesome.test with a value of 1.23 tagged by foo:bar . The magic hidden by the usability of the API is that this actually calls a C binding which communicates with the Agent (written in Go).","title":"About"},{"location":"base/api/","text":"API \u00b6 datadog_checks.base.checks.base.AgentCheck \u00b6 The base class for any Agent based integration. In general, you don't need to and you should not override anything from the base class except the check method but sometimes it might be useful for a Check to have its own constructor. When overriding __init__ you have to remember that, depending on the configuration, the Agent might create several different Check instances and the method would be called as many times. Agent 6,7 signature: AgentCheck(name, init_config, instances) # instances contain only 1 instance AgentCheck.check(instance) Agent 8 signature: AgentCheck(name, init_config, instance) # one instance AgentCheck.check() # no more instance argument for check method Note when loading a Custom check, the Agent will inspect the module searching for a subclass of AgentCheck . If such a class exists but has been derived in turn, it'll be ignored - you should never derive from an existing Check . __init__ ( self , * args , ** kwargs ) special \u00b6 name ( str ) - the name of the check init_config ( dict ) - the init_config section of the configuration. instance ( List[dict] ) - a one-element list containing the instance options from the configuration file (a list is used to keep backward compatibility with older versions of the Agent). Source code in def __init__ ( self , * args , ** kwargs ): # type: (*Any, **Any) -> None \"\"\" - **name** (_str_) - the name of the check - **init_config** (_dict_) - the `init_config` section of the configuration. - **instance** (_List[dict]_) - a one-element list containing the instance options from the configuration file (a list is used to keep backward compatibility with older versions of the Agent). \"\"\" # NOTE: these variable assignments exist to ease type checking when eventually assigned as attributes. name = kwargs . get ( 'name' , '' ) init_config = kwargs . get ( 'init_config' , {}) agentConfig = kwargs . get ( 'agentConfig' , {}) instances = kwargs . get ( 'instances' , []) if len ( args ) > 0 : name = args [ 0 ] if len ( args ) > 1 : init_config = args [ 1 ] if len ( args ) > 2 : # agent pass instances as tuple but in test we are usually using list, so we are testing for both if len ( args ) > 3 or not isinstance ( args [ 2 ], ( list , tuple )) or 'instances' in kwargs : # old-style init: the 3rd argument is `agentConfig` agentConfig = args [ 2 ] if len ( args ) > 3 : instances = args [ 3 ] else : # new-style init: the 3rd argument is `instances` instances = args [ 2 ] # NOTE: Agent 6+ should pass exactly one instance... But we are not abiding by that rule on our side # everywhere just yet. It's complicated... See: https://github.com/DataDog/integrations-core/pull/5573 instance = instances [ 0 ] if instances else None self . check_id = '' self . name = name # type: str self . init_config = init_config # type: InitConfigType self . agentConfig = agentConfig # type: AgentConfigType self . instance = instance # type: InstanceType self . instances = instances # type: List[InstanceType] self . warnings = [] # type: List[str] # `self.hostname` is deprecated, use `datadog_agent.get_hostname()` instead self . hostname = datadog_agent . get_hostname () # type: str logger = logging . getLogger ( ' {} . {} ' . format ( __name__ , self . name )) self . log = CheckLoggingAdapter ( logger , self ) # TODO: Remove with Agent 5 # Set proxy settings self . proxies = self . _get_requests_proxy () if not self . init_config : self . _use_agent_proxy = True else : self . _use_agent_proxy = is_affirmative ( self . init_config . get ( 'use_agent_proxy' , True )) # TODO: Remove with Agent 5 self . default_integration_http_timeout = float ( self . agentConfig . get ( 'default_integration_http_timeout' , 9 )) self . _deprecations = { 'increment' : ( False , ( 'DEPRECATION NOTICE: `AgentCheck.increment`/`AgentCheck.decrement` are deprecated, please ' 'use `AgentCheck.gauge` or `AgentCheck.count` instead, with a different metric name' ), ), 'device_name' : ( False , ( 'DEPRECATION NOTICE: `device_name` is deprecated, please use a `device:` ' 'tag in the `tags` list instead' ), ), 'in_developer_mode' : ( False , 'DEPRECATION NOTICE: `in_developer_mode` is deprecated, please stop using it.' , ), 'no_proxy' : ( False , ( 'DEPRECATION NOTICE: The `no_proxy` config option has been renamed ' 'to `skip_proxy` and will be removed in a future release.' ), ), 'service_tag' : ( False , ( 'DEPRECATION NOTICE: The `service` tag is deprecated and has been renamed to ` %s `. ' 'Set `disable_legacy_service_tag` to `true` to disable this warning. ' 'The default will become `true` and cannot be changed in Agent version 8.' ), ), } # type: Dict[str, Tuple[bool, str]] # Setup metric limits self . metric_limiter = self . _get_metric_limiter ( self . name , instance = self . instance ) # Lazily load and validate config self . _config_model_instance = None # type: Any self . _config_model_shared = None # type: Any # Functions that will be called exactly once (if successful) before the first check run self . check_initializations = deque ([ self . send_config_metadata ]) # type: Deque[Callable[[], None]] if not PY2 : self . check_initializations . append ( self . load_configuration_models ) count ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ) \u00b6 Sample a raw count metric. name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def count ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ): # type: (str, float, Sequence[str], str, str, bool) -> None \"\"\"Sample a raw count metric. - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" self . _submit_metric ( aggregator . COUNT , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw ) event ( self , event ) \u00b6 Send an event. An event is a dictionary with the following keys and data types: { \"timestamp\" : int , # the epoch timestamp for the event \"event_type\" : str , # the event name \"api_key\" : str , # the api key for your account \"msg_title\" : str , # the title of the event \"msg_text\" : str , # the text body of the event \"aggregation_key\" : str , # a key to use for aggregating events \"alert_type\" : str , # (optional) one of ('error', 'warning', 'success', 'info'), defaults to 'info' \"source_type_name\" : str , # (optional) the source type name \"host\" : str , # (optional) the name of the host \"tags\" : list , # (optional) a list of tags to associate with this event \"priority\" : str , # (optional) specifies the priority of the event (\"normal\" or \"low\") } event ( dict ) - the event to be sent Source code in def event ( self , event ): # type: (Event) -> None \"\"\"Send an event. An event is a dictionary with the following keys and data types: ```python { \"timestamp\": int, # the epoch timestamp for the event \"event_type\": str, # the event name \"api_key\": str, # the api key for your account \"msg_title\": str, # the title of the event \"msg_text\": str, # the text body of the event \"aggregation_key\": str, # a key to use for aggregating events \"alert_type\": str, # (optional) one of ('error', 'warning', 'success', 'info'), defaults to 'info' \"source_type_name\": str, # (optional) the source type name \"host\": str, # (optional) the name of the host \"tags\": list, # (optional) a list of tags to associate with this event \"priority\": str, # (optional) specifies the priority of the event (\"normal\" or \"low\") } ``` - **event** (_dict_) - the event to be sent \"\"\" # Enforce types of some fields, considerably facilitates handling in go bindings downstream for key , value in iteritems ( event ): if not isinstance ( value , ( text_type , binary_type )): continue try : event [ key ] = to_native_string ( value ) # type: ignore # ^ Mypy complains about dynamic key assignment -- arguably for good reason. # Ideally we should convert this to a dict literal so that submitted events only include known keys. except UnicodeError : self . log . warning ( 'Encoding error with field ` %s `, cannot submit event' , key ) return if event . get ( 'tags' ): event [ 'tags' ] = self . _normalize_tags_type ( event [ 'tags' ]) if event . get ( 'timestamp' ): event [ 'timestamp' ] = int ( event [ 'timestamp' ]) if event . get ( 'aggregation_key' ): event [ 'aggregation_key' ] = to_native_string ( event [ 'aggregation_key' ]) if self . __NAMESPACE__ : event . setdefault ( 'source_type_name' , self . __NAMESPACE__ ) aggregator . submit_event ( self , self . check_id , event ) gauge ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ) \u00b6 Sample a gauge metric. Parameters: name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def gauge ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ): # type: (str, float, Sequence[str], str, str, bool) -> None \"\"\"Sample a gauge metric. **Parameters:** - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" self . _submit_metric ( aggregator . GAUGE , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw ) histogram ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ) \u00b6 Sample a histogram metric. name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def histogram ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ): # type: (str, float, Sequence[str], str, str, bool) -> None \"\"\"Sample a histogram metric. - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" self . _submit_metric ( aggregator . HISTOGRAM , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw ) historate ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ) \u00b6 Sample a histogram based on rate metrics. name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def historate ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ): # type: (str, float, Sequence[str], str, str, bool) -> None \"\"\"Sample a histogram based on rate metrics. - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" self . _submit_metric ( aggregator . HISTORATE , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw ) metadata_entrypoint ( method ) classmethod \u00b6 Skip execution of the decorated method if metadata collection is disabled on the Agent. Usage: class MyCheck ( AgentCheck ): @AgentCheck . metadata_entrypoint def collect_metadata ( self ): ... Source code in @classmethod def metadata_entrypoint ( cls , method ): # type: (Callable[..., None]) -> Callable[..., None] \"\"\" Skip execution of the decorated method if metadata collection is disabled on the Agent. Usage: ```python class MyCheck(AgentCheck): @AgentCheck.metadata_entrypoint def collect_metadata(self): ... ``` \"\"\" @functools . wraps ( method ) def entrypoint ( self , * args , ** kwargs ): # type: (AgentCheck, *Any, **Any) -> None if not self . is_metadata_collection_enabled (): return # NOTE: error handling still at the discretion of the wrapped method. method ( self , * args , ** kwargs ) return entrypoint monotonic_count ( self , name , value , tags = None , hostname = None , device_name = None , raw = False , flush_first_value = False ) \u00b6 Sample an increasing counter metric. name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix flush_first_value ( bool ) - whether to sample the first value Source code in def monotonic_count ( self , name , value , tags = None , hostname = None , device_name = None , raw = False , flush_first_value = False ): # type: (str, float, Sequence[str], str, str, bool, bool) -> None \"\"\"Sample an increasing counter metric. - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix - **flush_first_value** (_bool_) - whether to sample the first value \"\"\" self . _submit_metric ( aggregator . MONOTONIC_COUNT , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw , flush_first_value = flush_first_value , ) rate ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ) \u00b6 Sample a point, with the rate calculated at the end of the check. name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def rate ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ): # type: (str, float, Sequence[str], str, str, bool) -> None \"\"\"Sample a point, with the rate calculated at the end of the check. - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" self . _submit_metric ( aggregator . RATE , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw ) service_check ( self , name , status , tags = None , hostname = None , message = None , raw = False ) \u00b6 Send the status of a service. name ( str ) - the name of the service check status ( int ) - a constant describing the service status. tags ( List[str] ) - a list of tags to associate with this service check message ( str ) - additional information or a description of why this status occurred. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def service_check ( self , name , status , tags = None , hostname = None , message = None , raw = False ): # type: (str, ServiceCheckStatus, Sequence[str], str, str, bool) -> None \"\"\"Send the status of a service. - **name** (_str_) - the name of the service check - **status** (_int_) - a constant describing the service status. - **tags** (_List[str]_) - a list of tags to associate with this service check - **message** (_str_) - additional information or a description of why this status occurred. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" tags = self . _normalize_tags_type ( tags or []) if hostname is None : hostname = '' if message is None : message = '' else : message = to_native_string ( message ) message = self . sanitize ( message ) aggregator . submit_service_check ( self , self . check_id , self . _format_namespace ( name , raw ), status , tags , hostname , message ) set_metadata ( self , name , value , ** options ) \u00b6 Updates the cached metadata name with value , which is then sent by the Agent at regular intervals. :param str name: the name of the metadata :param object value: the value for the metadata. if name has no transformer defined then the raw value will be submitted and therefore it must be a str :param options: keyword arguments to pass to any defined transformer Source code in def set_metadata ( self , name , value , ** options ): # type: (str, Any, **Any) -> None \"\"\"Updates the cached metadata ``name`` with ``value``, which is then sent by the Agent at regular intervals. :param str name: the name of the metadata :param object value: the value for the metadata. if ``name`` has no transformer defined then the raw ``value`` will be submitted and therefore it must be a ``str`` :param options: keyword arguments to pass to any defined transformer \"\"\" self . metadata_manager . submit ( name , value , options ) Stubs \u00b6 datadog_checks.base.stubs.aggregator.AggregatorStub \u00b6 This implements the methods defined by the Agent's C bindings which in turn call the Go backend . It also provides utility methods for test assertions. assert_all_metrics_covered ( self ) \u00b6 Source code in def assert_all_metrics_covered ( self ): # use `condition` to avoid building the `msg` if not needed condition = self . metrics_asserted_pct >= 100.0 msg = '' if not condition : prefix = ' \\n\\t - ' msg = 'Some metrics are missing:' msg += ' \\n Asserted Metrics: {}{} ' . format ( prefix , prefix . join ( sorted ( self . _asserted ))) msg += ' \\n Missing Metrics: {}{} ' . format ( prefix , prefix . join ( sorted ( self . not_asserted ()))) assert condition , msg assert_event ( self , msg_text , count = None , at_least = 1 , exact_match = True , tags = None , ** kwargs ) \u00b6 Source code in def assert_event ( self , msg_text , count = None , at_least = 1 , exact_match = True , tags = None , ** kwargs ): candidates = [] for e in self . events : if exact_match and msg_text != e [ 'msg_text' ] or msg_text not in e [ 'msg_text' ]: continue if tags and set ( tags ) != set ( e [ 'tags' ]): continue for name , value in iteritems ( kwargs ): if e [ name ] != value : break else : candidates . append ( e ) msg = \"Candidates size assertion for ` {} `, count: {} , at_least: {} ) failed\" . format ( msg_text , count , at_least ) if count is not None : assert len ( candidates ) == count , msg else : assert len ( candidates ) >= at_least , msg assert_metric ( self , name , value = None , tags = None , count = None , at_least = 1 , hostname = None , metric_type = None , device = None ) \u00b6 Assert a metric was processed by this stub Source code in def assert_metric ( self , name , value = None , tags = None , count = None , at_least = 1 , hostname = None , metric_type = None , device = None ): \"\"\" Assert a metric was processed by this stub \"\"\" self . _asserted . add ( name ) expected_tags = normalize_tags ( tags , sort = True ) candidates = [] for metric in self . metrics ( name ): if value is not None and not self . is_aggregate ( metric . type ) and value != metric . value : continue if expected_tags and expected_tags != sorted ( metric . tags ): continue if hostname is not None and hostname != metric . hostname : continue if metric_type is not None and metric_type != metric . type : continue if device is not None and device != metric . device : continue candidates . append ( metric ) expected_metric = MetricStub ( name , metric_type , value , tags , hostname , device ) if value is not None and candidates and all ( self . is_aggregate ( m . type ) for m in candidates ): got = sum ( m . value for m in candidates ) msg = \"Expected count value for ' {} ': {} , got {} \" . format ( name , value , got ) condition = value == got elif count is not None : msg = \"Needed exactly {} candidates for ' {} ', got {} \" . format ( count , name , len ( candidates )) condition = len ( candidates ) == count else : msg = \"Needed at least {} candidates for ' {} ', got {} \" . format ( at_least , name , len ( candidates )) condition = len ( candidates ) >= at_least self . _assert ( condition , msg = msg , expected_stub = expected_metric , submitted_elements = self . _metrics ) assert_metric_has_tag ( self , metric_name , tag , count = None , at_least = 1 ) \u00b6 Assert a metric is tagged with tag Source code in def assert_metric_has_tag ( self , metric_name , tag , count = None , at_least = 1 ): \"\"\" Assert a metric is tagged with tag \"\"\" self . _asserted . add ( metric_name ) candidates = [] for metric in self . metrics ( metric_name ): if tag in metric . tags : candidates . append ( metric ) msg = \"Candidates size assertion for ` {} `, count: {} , at_least: {} ) failed\" . format ( metric_name , count , at_least ) if count is not None : assert len ( candidates ) == count , msg else : assert len ( candidates ) >= at_least , msg assert_metric_has_tag_prefix ( self , metric_name , tag_prefix , count = None , at_least = 1 ) \u00b6 Source code in def assert_metric_has_tag_prefix ( self , metric_name , tag_prefix , count = None , at_least = 1 ): candidates = [] self . _asserted . add ( metric_name ) for metric in self . metrics ( metric_name ): tags = metric . tags gtags = [ t for t in tags if t . startswith ( tag_prefix )] if len ( gtags ) > 0 : candidates . append ( metric ) msg = \"Candidates size assertion for ` {} `, count: {} , at_least: {} ) failed\" . format ( metric_name , count , at_least ) if count is not None : assert len ( candidates ) == count , msg else : assert len ( candidates ) >= at_least , msg assert_no_duplicate_metrics ( self ) \u00b6 Assert no duplicate metrics have been submitted. Metrics are considered duplicate when all following fields match: metric name type (gauge, rate, etc) tags hostname Source code in def assert_no_duplicate_metrics ( self ): \"\"\" Assert no duplicate metrics have been submitted. Metrics are considered duplicate when all following fields match: - metric name - type (gauge, rate, etc) - tags - hostname \"\"\" # metric types that intended to be called multiple times are ignored ignored_types = [ self . COUNT , self . MONOTONIC_COUNT , self . COUNTER ] metric_stubs = [ m for metrics in self . _metrics . values () for m in metrics if m . type not in ignored_types ] def stub_to_key_fn ( stub ): return stub . name , stub . type , str ( sorted ( stub . tags )), stub . hostname self . _assert_no_duplicate_stub ( 'metric' , metric_stubs , stub_to_key_fn ) assert_no_duplicate_service_checks ( self ) \u00b6 Assert no duplicate service checks have been submitted. Service checks are considered duplicate when all following fields match: - metric name - status - tags - hostname Source code in def assert_no_duplicate_service_checks ( self ): \"\"\" Assert no duplicate service checks have been submitted. Service checks are considered duplicate when all following fields match: - metric name - status - tags - hostname \"\"\" service_check_stubs = [ m for metrics in self . _service_checks . values () for m in metrics ] def stub_to_key_fn ( stub ): return stub . name , stub . status , str ( sorted ( stub . tags )), stub . hostname self . _assert_no_duplicate_stub ( 'service_check' , service_check_stubs , stub_to_key_fn ) assert_service_check ( self , name , status = None , tags = None , count = None , at_least = 1 , hostname = None , message = None ) \u00b6 Assert a service check was processed by this stub Source code in def assert_service_check ( self , name , status = None , tags = None , count = None , at_least = 1 , hostname = None , message = None ): \"\"\" Assert a service check was processed by this stub \"\"\" tags = normalize_tags ( tags , sort = True ) candidates = [] for sc in self . service_checks ( name ): if status is not None and status != sc . status : continue if tags and tags != sorted ( sc . tags ): continue if hostname is not None and hostname != sc . hostname : continue if message is not None and message != sc . message : continue candidates . append ( sc ) expected_service_check = ServiceCheckStub ( None , name = name , status = status , tags = tags , hostname = hostname , message = message ) if count is not None : msg = \"Needed exactly {} candidates for ' {} ', got {} \" . format ( count , name , len ( candidates )) condition = len ( candidates ) == count else : msg = \"Needed at least {} candidates for ' {} ', got {} \" . format ( at_least , name , len ( candidates )) condition = len ( candidates ) >= at_least self . _assert ( condition = condition , msg = msg , expected_stub = expected_service_check , submitted_elements = self . _service_checks ) reset ( self ) \u00b6 Set the stub to its initial state Source code in def reset ( self ): \"\"\" Set the stub to its initial state \"\"\" self . _metrics = defaultdict ( list ) self . _asserted = set () self . _service_checks = defaultdict ( list ) self . _events = [] self . _event_platform_events = defaultdict ( list ) datadog_checks.base.stubs.datadog_agent.DatadogAgentStub \u00b6 This implements the methods defined by the Agent's C bindings which in turn call the Go backend . It also provides utility methods for test assertions. assert_metadata ( self , check_id , data ) \u00b6 Source code in def assert_metadata ( self , check_id , data ): actual = {} for name in data : key = ( check_id , name ) if key in self . _metadata : actual [ name ] = self . _metadata [ key ] assert data == actual assert_metadata_count ( self , count ) \u00b6 Source code in def assert_metadata_count ( self , count ): assert len ( self . _metadata ) == count reset ( self ) \u00b6 Source code in def reset ( self ): self . _metadata . clear () self . _cache . clear () self . _config = self . get_default_config ()","title":"API"},{"location":"base/api/#api","text":"","title":"API"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck","text":"The base class for any Agent based integration. In general, you don't need to and you should not override anything from the base class except the check method but sometimes it might be useful for a Check to have its own constructor. When overriding __init__ you have to remember that, depending on the configuration, the Agent might create several different Check instances and the method would be called as many times. Agent 6,7 signature: AgentCheck(name, init_config, instances) # instances contain only 1 instance AgentCheck.check(instance) Agent 8 signature: AgentCheck(name, init_config, instance) # one instance AgentCheck.check() # no more instance argument for check method Note when loading a Custom check, the Agent will inspect the module searching for a subclass of AgentCheck . If such a class exists but has been derived in turn, it'll be ignored - you should never derive from an existing Check .","title":"AgentCheck"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck.__init__","text":"name ( str ) - the name of the check init_config ( dict ) - the init_config section of the configuration. instance ( List[dict] ) - a one-element list containing the instance options from the configuration file (a list is used to keep backward compatibility with older versions of the Agent). Source code in def __init__ ( self , * args , ** kwargs ): # type: (*Any, **Any) -> None \"\"\" - **name** (_str_) - the name of the check - **init_config** (_dict_) - the `init_config` section of the configuration. - **instance** (_List[dict]_) - a one-element list containing the instance options from the configuration file (a list is used to keep backward compatibility with older versions of the Agent). \"\"\" # NOTE: these variable assignments exist to ease type checking when eventually assigned as attributes. name = kwargs . get ( 'name' , '' ) init_config = kwargs . get ( 'init_config' , {}) agentConfig = kwargs . get ( 'agentConfig' , {}) instances = kwargs . get ( 'instances' , []) if len ( args ) > 0 : name = args [ 0 ] if len ( args ) > 1 : init_config = args [ 1 ] if len ( args ) > 2 : # agent pass instances as tuple but in test we are usually using list, so we are testing for both if len ( args ) > 3 or not isinstance ( args [ 2 ], ( list , tuple )) or 'instances' in kwargs : # old-style init: the 3rd argument is `agentConfig` agentConfig = args [ 2 ] if len ( args ) > 3 : instances = args [ 3 ] else : # new-style init: the 3rd argument is `instances` instances = args [ 2 ] # NOTE: Agent 6+ should pass exactly one instance... But we are not abiding by that rule on our side # everywhere just yet. It's complicated... See: https://github.com/DataDog/integrations-core/pull/5573 instance = instances [ 0 ] if instances else None self . check_id = '' self . name = name # type: str self . init_config = init_config # type: InitConfigType self . agentConfig = agentConfig # type: AgentConfigType self . instance = instance # type: InstanceType self . instances = instances # type: List[InstanceType] self . warnings = [] # type: List[str] # `self.hostname` is deprecated, use `datadog_agent.get_hostname()` instead self . hostname = datadog_agent . get_hostname () # type: str logger = logging . getLogger ( ' {} . {} ' . format ( __name__ , self . name )) self . log = CheckLoggingAdapter ( logger , self ) # TODO: Remove with Agent 5 # Set proxy settings self . proxies = self . _get_requests_proxy () if not self . init_config : self . _use_agent_proxy = True else : self . _use_agent_proxy = is_affirmative ( self . init_config . get ( 'use_agent_proxy' , True )) # TODO: Remove with Agent 5 self . default_integration_http_timeout = float ( self . agentConfig . get ( 'default_integration_http_timeout' , 9 )) self . _deprecations = { 'increment' : ( False , ( 'DEPRECATION NOTICE: `AgentCheck.increment`/`AgentCheck.decrement` are deprecated, please ' 'use `AgentCheck.gauge` or `AgentCheck.count` instead, with a different metric name' ), ), 'device_name' : ( False , ( 'DEPRECATION NOTICE: `device_name` is deprecated, please use a `device:` ' 'tag in the `tags` list instead' ), ), 'in_developer_mode' : ( False , 'DEPRECATION NOTICE: `in_developer_mode` is deprecated, please stop using it.' , ), 'no_proxy' : ( False , ( 'DEPRECATION NOTICE: The `no_proxy` config option has been renamed ' 'to `skip_proxy` and will be removed in a future release.' ), ), 'service_tag' : ( False , ( 'DEPRECATION NOTICE: The `service` tag is deprecated and has been renamed to ` %s `. ' 'Set `disable_legacy_service_tag` to `true` to disable this warning. ' 'The default will become `true` and cannot be changed in Agent version 8.' ), ), } # type: Dict[str, Tuple[bool, str]] # Setup metric limits self . metric_limiter = self . _get_metric_limiter ( self . name , instance = self . instance ) # Lazily load and validate config self . _config_model_instance = None # type: Any self . _config_model_shared = None # type: Any # Functions that will be called exactly once (if successful) before the first check run self . check_initializations = deque ([ self . send_config_metadata ]) # type: Deque[Callable[[], None]] if not PY2 : self . check_initializations . append ( self . load_configuration_models )","title":"__init__()"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck.count","text":"Sample a raw count metric. name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def count ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ): # type: (str, float, Sequence[str], str, str, bool) -> None \"\"\"Sample a raw count metric. - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" self . _submit_metric ( aggregator . COUNT , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw )","title":"count()"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck.event","text":"Send an event. An event is a dictionary with the following keys and data types: { \"timestamp\" : int , # the epoch timestamp for the event \"event_type\" : str , # the event name \"api_key\" : str , # the api key for your account \"msg_title\" : str , # the title of the event \"msg_text\" : str , # the text body of the event \"aggregation_key\" : str , # a key to use for aggregating events \"alert_type\" : str , # (optional) one of ('error', 'warning', 'success', 'info'), defaults to 'info' \"source_type_name\" : str , # (optional) the source type name \"host\" : str , # (optional) the name of the host \"tags\" : list , # (optional) a list of tags to associate with this event \"priority\" : str , # (optional) specifies the priority of the event (\"normal\" or \"low\") } event ( dict ) - the event to be sent Source code in def event ( self , event ): # type: (Event) -> None \"\"\"Send an event. An event is a dictionary with the following keys and data types: ```python { \"timestamp\": int, # the epoch timestamp for the event \"event_type\": str, # the event name \"api_key\": str, # the api key for your account \"msg_title\": str, # the title of the event \"msg_text\": str, # the text body of the event \"aggregation_key\": str, # a key to use for aggregating events \"alert_type\": str, # (optional) one of ('error', 'warning', 'success', 'info'), defaults to 'info' \"source_type_name\": str, # (optional) the source type name \"host\": str, # (optional) the name of the host \"tags\": list, # (optional) a list of tags to associate with this event \"priority\": str, # (optional) specifies the priority of the event (\"normal\" or \"low\") } ``` - **event** (_dict_) - the event to be sent \"\"\" # Enforce types of some fields, considerably facilitates handling in go bindings downstream for key , value in iteritems ( event ): if not isinstance ( value , ( text_type , binary_type )): continue try : event [ key ] = to_native_string ( value ) # type: ignore # ^ Mypy complains about dynamic key assignment -- arguably for good reason. # Ideally we should convert this to a dict literal so that submitted events only include known keys. except UnicodeError : self . log . warning ( 'Encoding error with field ` %s `, cannot submit event' , key ) return if event . get ( 'tags' ): event [ 'tags' ] = self . _normalize_tags_type ( event [ 'tags' ]) if event . get ( 'timestamp' ): event [ 'timestamp' ] = int ( event [ 'timestamp' ]) if event . get ( 'aggregation_key' ): event [ 'aggregation_key' ] = to_native_string ( event [ 'aggregation_key' ]) if self . __NAMESPACE__ : event . setdefault ( 'source_type_name' , self . __NAMESPACE__ ) aggregator . submit_event ( self , self . check_id , event )","title":"event()"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck.gauge","text":"Sample a gauge metric. Parameters: name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def gauge ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ): # type: (str, float, Sequence[str], str, str, bool) -> None \"\"\"Sample a gauge metric. **Parameters:** - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" self . _submit_metric ( aggregator . GAUGE , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw )","title":"gauge()"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck.histogram","text":"Sample a histogram metric. name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def histogram ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ): # type: (str, float, Sequence[str], str, str, bool) -> None \"\"\"Sample a histogram metric. - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" self . _submit_metric ( aggregator . HISTOGRAM , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw )","title":"histogram()"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck.historate","text":"Sample a histogram based on rate metrics. name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def historate ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ): # type: (str, float, Sequence[str], str, str, bool) -> None \"\"\"Sample a histogram based on rate metrics. - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" self . _submit_metric ( aggregator . HISTORATE , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw )","title":"historate()"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck.metadata_entrypoint","text":"Skip execution of the decorated method if metadata collection is disabled on the Agent. Usage: class MyCheck ( AgentCheck ): @AgentCheck . metadata_entrypoint def collect_metadata ( self ): ... Source code in @classmethod def metadata_entrypoint ( cls , method ): # type: (Callable[..., None]) -> Callable[..., None] \"\"\" Skip execution of the decorated method if metadata collection is disabled on the Agent. Usage: ```python class MyCheck(AgentCheck): @AgentCheck.metadata_entrypoint def collect_metadata(self): ... ``` \"\"\" @functools . wraps ( method ) def entrypoint ( self , * args , ** kwargs ): # type: (AgentCheck, *Any, **Any) -> None if not self . is_metadata_collection_enabled (): return # NOTE: error handling still at the discretion of the wrapped method. method ( self , * args , ** kwargs ) return entrypoint","title":"metadata_entrypoint()"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck.monotonic_count","text":"Sample an increasing counter metric. name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix flush_first_value ( bool ) - whether to sample the first value Source code in def monotonic_count ( self , name , value , tags = None , hostname = None , device_name = None , raw = False , flush_first_value = False ): # type: (str, float, Sequence[str], str, str, bool, bool) -> None \"\"\"Sample an increasing counter metric. - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix - **flush_first_value** (_bool_) - whether to sample the first value \"\"\" self . _submit_metric ( aggregator . MONOTONIC_COUNT , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw , flush_first_value = flush_first_value , )","title":"monotonic_count()"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck.rate","text":"Sample a point, with the rate calculated at the end of the check. name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def rate ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ): # type: (str, float, Sequence[str], str, str, bool) -> None \"\"\"Sample a point, with the rate calculated at the end of the check. - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" self . _submit_metric ( aggregator . RATE , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw )","title":"rate()"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck.service_check","text":"Send the status of a service. name ( str ) - the name of the service check status ( int ) - a constant describing the service status. tags ( List[str] ) - a list of tags to associate with this service check message ( str ) - additional information or a description of why this status occurred. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def service_check ( self , name , status , tags = None , hostname = None , message = None , raw = False ): # type: (str, ServiceCheckStatus, Sequence[str], str, str, bool) -> None \"\"\"Send the status of a service. - **name** (_str_) - the name of the service check - **status** (_int_) - a constant describing the service status. - **tags** (_List[str]_) - a list of tags to associate with this service check - **message** (_str_) - additional information or a description of why this status occurred. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" tags = self . _normalize_tags_type ( tags or []) if hostname is None : hostname = '' if message is None : message = '' else : message = to_native_string ( message ) message = self . sanitize ( message ) aggregator . submit_service_check ( self , self . check_id , self . _format_namespace ( name , raw ), status , tags , hostname , message )","title":"service_check()"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck.set_metadata","text":"Updates the cached metadata name with value , which is then sent by the Agent at regular intervals. :param str name: the name of the metadata :param object value: the value for the metadata. if name has no transformer defined then the raw value will be submitted and therefore it must be a str :param options: keyword arguments to pass to any defined transformer Source code in def set_metadata ( self , name , value , ** options ): # type: (str, Any, **Any) -> None \"\"\"Updates the cached metadata ``name`` with ``value``, which is then sent by the Agent at regular intervals. :param str name: the name of the metadata :param object value: the value for the metadata. if ``name`` has no transformer defined then the raw ``value`` will be submitted and therefore it must be a ``str`` :param options: keyword arguments to pass to any defined transformer \"\"\" self . metadata_manager . submit ( name , value , options )","title":"set_metadata()"},{"location":"base/api/#stubs","text":"","title":"Stubs"},{"location":"base/api/#datadog_checks.base.stubs.aggregator.AggregatorStub","text":"This implements the methods defined by the Agent's C bindings which in turn call the Go backend . It also provides utility methods for test assertions.","title":"AggregatorStub"},{"location":"base/api/#datadog_checks.base.stubs.aggregator.AggregatorStub.assert_all_metrics_covered","text":"Source code in def assert_all_metrics_covered ( self ): # use `condition` to avoid building the `msg` if not needed condition = self . metrics_asserted_pct >= 100.0 msg = '' if not condition : prefix = ' \\n\\t - ' msg = 'Some metrics are missing:' msg += ' \\n Asserted Metrics: {}{} ' . format ( prefix , prefix . join ( sorted ( self . _asserted ))) msg += ' \\n Missing Metrics: {}{} ' . format ( prefix , prefix . join ( sorted ( self . not_asserted ()))) assert condition , msg","title":"assert_all_metrics_covered()"},{"location":"base/api/#datadog_checks.base.stubs.aggregator.AggregatorStub.assert_event","text":"Source code in def assert_event ( self , msg_text , count = None , at_least = 1 , exact_match = True , tags = None , ** kwargs ): candidates = [] for e in self . events : if exact_match and msg_text != e [ 'msg_text' ] or msg_text not in e [ 'msg_text' ]: continue if tags and set ( tags ) != set ( e [ 'tags' ]): continue for name , value in iteritems ( kwargs ): if e [ name ] != value : break else : candidates . append ( e ) msg = \"Candidates size assertion for ` {} `, count: {} , at_least: {} ) failed\" . format ( msg_text , count , at_least ) if count is not None : assert len ( candidates ) == count , msg else : assert len ( candidates ) >= at_least , msg","title":"assert_event()"},{"location":"base/api/#datadog_checks.base.stubs.aggregator.AggregatorStub.assert_metric","text":"Assert a metric was processed by this stub Source code in def assert_metric ( self , name , value = None , tags = None , count = None , at_least = 1 , hostname = None , metric_type = None , device = None ): \"\"\" Assert a metric was processed by this stub \"\"\" self . _asserted . add ( name ) expected_tags = normalize_tags ( tags , sort = True ) candidates = [] for metric in self . metrics ( name ): if value is not None and not self . is_aggregate ( metric . type ) and value != metric . value : continue if expected_tags and expected_tags != sorted ( metric . tags ): continue if hostname is not None and hostname != metric . hostname : continue if metric_type is not None and metric_type != metric . type : continue if device is not None and device != metric . device : continue candidates . append ( metric ) expected_metric = MetricStub ( name , metric_type , value , tags , hostname , device ) if value is not None and candidates and all ( self . is_aggregate ( m . type ) for m in candidates ): got = sum ( m . value for m in candidates ) msg = \"Expected count value for ' {} ': {} , got {} \" . format ( name , value , got ) condition = value == got elif count is not None : msg = \"Needed exactly {} candidates for ' {} ', got {} \" . format ( count , name , len ( candidates )) condition = len ( candidates ) == count else : msg = \"Needed at least {} candidates for ' {} ', got {} \" . format ( at_least , name , len ( candidates )) condition = len ( candidates ) >= at_least self . _assert ( condition , msg = msg , expected_stub = expected_metric , submitted_elements = self . _metrics )","title":"assert_metric()"},{"location":"base/api/#datadog_checks.base.stubs.aggregator.AggregatorStub.assert_metric_has_tag","text":"Assert a metric is tagged with tag Source code in def assert_metric_has_tag ( self , metric_name , tag , count = None , at_least = 1 ): \"\"\" Assert a metric is tagged with tag \"\"\" self . _asserted . add ( metric_name ) candidates = [] for metric in self . metrics ( metric_name ): if tag in metric . tags : candidates . append ( metric ) msg = \"Candidates size assertion for ` {} `, count: {} , at_least: {} ) failed\" . format ( metric_name , count , at_least ) if count is not None : assert len ( candidates ) == count , msg else : assert len ( candidates ) >= at_least , msg","title":"assert_metric_has_tag()"},{"location":"base/api/#datadog_checks.base.stubs.aggregator.AggregatorStub.assert_metric_has_tag_prefix","text":"Source code in def assert_metric_has_tag_prefix ( self , metric_name , tag_prefix , count = None , at_least = 1 ): candidates = [] self . _asserted . add ( metric_name ) for metric in self . metrics ( metric_name ): tags = metric . tags gtags = [ t for t in tags if t . startswith ( tag_prefix )] if len ( gtags ) > 0 : candidates . append ( metric ) msg = \"Candidates size assertion for ` {} `, count: {} , at_least: {} ) failed\" . format ( metric_name , count , at_least ) if count is not None : assert len ( candidates ) == count , msg else : assert len ( candidates ) >= at_least , msg","title":"assert_metric_has_tag_prefix()"},{"location":"base/api/#datadog_checks.base.stubs.aggregator.AggregatorStub.assert_no_duplicate_metrics","text":"Assert no duplicate metrics have been submitted. Metrics are considered duplicate when all following fields match: metric name type (gauge, rate, etc) tags hostname Source code in def assert_no_duplicate_metrics ( self ): \"\"\" Assert no duplicate metrics have been submitted. Metrics are considered duplicate when all following fields match: - metric name - type (gauge, rate, etc) - tags - hostname \"\"\" # metric types that intended to be called multiple times are ignored ignored_types = [ self . COUNT , self . MONOTONIC_COUNT , self . COUNTER ] metric_stubs = [ m for metrics in self . _metrics . values () for m in metrics if m . type not in ignored_types ] def stub_to_key_fn ( stub ): return stub . name , stub . type , str ( sorted ( stub . tags )), stub . hostname self . _assert_no_duplicate_stub ( 'metric' , metric_stubs , stub_to_key_fn )","title":"assert_no_duplicate_metrics()"},{"location":"base/api/#datadog_checks.base.stubs.aggregator.AggregatorStub.assert_no_duplicate_service_checks","text":"Assert no duplicate service checks have been submitted. Service checks are considered duplicate when all following fields match: - metric name - status - tags - hostname Source code in def assert_no_duplicate_service_checks ( self ): \"\"\" Assert no duplicate service checks have been submitted. Service checks are considered duplicate when all following fields match: - metric name - status - tags - hostname \"\"\" service_check_stubs = [ m for metrics in self . _service_checks . values () for m in metrics ] def stub_to_key_fn ( stub ): return stub . name , stub . status , str ( sorted ( stub . tags )), stub . hostname self . _assert_no_duplicate_stub ( 'service_check' , service_check_stubs , stub_to_key_fn )","title":"assert_no_duplicate_service_checks()"},{"location":"base/api/#datadog_checks.base.stubs.aggregator.AggregatorStub.assert_service_check","text":"Assert a service check was processed by this stub Source code in def assert_service_check ( self , name , status = None , tags = None , count = None , at_least = 1 , hostname = None , message = None ): \"\"\" Assert a service check was processed by this stub \"\"\" tags = normalize_tags ( tags , sort = True ) candidates = [] for sc in self . service_checks ( name ): if status is not None and status != sc . status : continue if tags and tags != sorted ( sc . tags ): continue if hostname is not None and hostname != sc . hostname : continue if message is not None and message != sc . message : continue candidates . append ( sc ) expected_service_check = ServiceCheckStub ( None , name = name , status = status , tags = tags , hostname = hostname , message = message ) if count is not None : msg = \"Needed exactly {} candidates for ' {} ', got {} \" . format ( count , name , len ( candidates )) condition = len ( candidates ) == count else : msg = \"Needed at least {} candidates for ' {} ', got {} \" . format ( at_least , name , len ( candidates )) condition = len ( candidates ) >= at_least self . _assert ( condition = condition , msg = msg , expected_stub = expected_service_check , submitted_elements = self . _service_checks )","title":"assert_service_check()"},{"location":"base/api/#datadog_checks.base.stubs.aggregator.AggregatorStub.reset","text":"Set the stub to its initial state Source code in def reset ( self ): \"\"\" Set the stub to its initial state \"\"\" self . _metrics = defaultdict ( list ) self . _asserted = set () self . _service_checks = defaultdict ( list ) self . _events = [] self . _event_platform_events = defaultdict ( list )","title":"reset()"},{"location":"base/api/#datadog_checks.base.stubs.datadog_agent.DatadogAgentStub","text":"This implements the methods defined by the Agent's C bindings which in turn call the Go backend . It also provides utility methods for test assertions.","title":"DatadogAgentStub"},{"location":"base/api/#datadog_checks.base.stubs.datadog_agent.DatadogAgentStub.assert_metadata","text":"Source code in def assert_metadata ( self , check_id , data ): actual = {} for name in data : key = ( check_id , name ) if key in self . _metadata : actual [ name ] = self . _metadata [ key ] assert data == actual","title":"assert_metadata()"},{"location":"base/api/#datadog_checks.base.stubs.datadog_agent.DatadogAgentStub.assert_metadata_count","text":"Source code in def assert_metadata_count ( self , count ): assert len ( self . _metadata ) == count","title":"assert_metadata_count()"},{"location":"base/api/#datadog_checks.base.stubs.datadog_agent.DatadogAgentStub.reset","text":"Source code in def reset ( self ): self . _metadata . clear () self . _cache . clear () self . _config = self . get_default_config ()","title":"reset()"},{"location":"base/basics/","text":"Basics \u00b6 The AgentCheck base class contains the logic that all Checks inherit. In addition to the integrations inheriting from AgentCheck, other classes that inherit from AgentCheck include: PDHBaseCheck OpenMetricsBaseCheck KubeLeaderElectionBaseCheck Getting Started \u00b6 The Datadog Agent looks for __version__ and a subclass of AgentCheck at the root of every Check package. Below is an example of the __init__.py file for a hypothetical Awesome Check: from .__about__ import __version__ from .check import AwesomeCheck __all__ = [ '__version__' , 'AwesomeCheck' ] The version is used in the Agent's status output (if no __version__ is found, it will default to 0.0.0 ): ========= Collector ========= Running Checks ============== AwesomeCheck (0.0.1) ------------------- Instance ID: 1234 [OK] Configuration Source: file:/etc/datadog-agent/conf.d/awesomecheck.d/awesomecheck.yaml Total Runs: 12 Metric Samples: Last Run: 242, Total: 2,904 Events: Last Run: 0, Total: 0 Service Checks: Last Run: 0, Total: 0 Average Execution Time : 49ms Last Execution Date : 2020-10-26 19:09:22.000000 UTC Last Successful Execution Date : 2020-10-26 19:09:22.000000 UTC ... Checks \u00b6 AgentCheck contains functions that you use to execute Checks and submit data to Datadog. Metrics \u00b6 This list enumerates what is collected from your system by each integration. For more information on metrics, see the Metric Types documentation. You can find the metrics for each integration in that integration's metadata.csv file. You can also set up custom metrics , so if the integration doesn\u2019t offer a metric out of the box, you can usually add it. Gauge \u00b6 The gauge metric submission type represents a snapshot of events in one time interval. This representative snapshot value is the last value submitted to the Agent during a time interval. A gauge can be used to take a measure of something reporting continuously\u2014like the available disk space or memory used. For more information, see the API documentation Count \u00b6 The count metric submission type represents the total number of event occurrences in one time interval. A count can be used to track the total number of connections made to a database or the total number of requests to an endpoint. This number of events can increase or decrease over time\u2014it is not monotonically increasing. For more information, see the API documentation . Monotonic Count \u00b6 Similar to Count, Monotonic Count represents the total number of event occurrences in one time interval. However, this value can ONLY increment. For more information, see the API documentation . Rate \u00b6 The rate metric submission type represents the total number of event occurrences per second in one time interval. A rate can be used to track how often something is happening\u2014like the frequency of connections made to a database or the flow of requests made to an endpoint. For more information, see the API documentation . Histogram \u00b6 The histogram metric submission type represents the statistical distribution of a set of values calculated Agent-side in one time interval. Datadog\u2019s histogram metric type is an extension of the StatsD timing metric type: the Agent aggregates the values that are sent in a defined time interval and produces different metrics which represent the set of values. For more information, see the API documentation . Historate \u00b6 Similar to the histogram metric, the historate represents statistical distribution over one time interval, although this is based on rate metrics. For more information, see the API documentation . Service Checks \u00b6 Service checks are a type of monitor used to track the uptime status of the service. For more information, see the Service checks guide. For more information, see the API documentation . Events \u00b6 Events are informational messages about your system that are consumed by the events stream so that you can build monitors on them. For more information, see the API documentation . Namespacing \u00b6 Within every integration, you can specify the value of __NAMESPACE__ : from datadog_checks.base import AgentCheck class AwesomeCheck ( AgentCheck ): __NAMESPACE__ = 'awesome' ... This is an optional addition, but it makes submissions easier since it prefixes every metric with the __NAMESPACE__ automatically. In this case it would append awesome. to each metric submitted to Datadog. If you wish to ignore the namespace for any reason, you can append an optional Boolean raw=True to each submission: self . gauge ( 'test' , 1.23 , tags = [ 'foo:bar' ], raw = True ) ... You submitted a gauge metric named test with a value of 1.23 tagged by foo:bar ignoring the namespace. Check Initializations \u00b6 In the AgentCheck class, there is a useful property called check_initializations , which you can use to execute functions that are called once before the first check run. You can fill up check_initializations with instructions in the __init__ function of an integration. For example, you could use it to parse configuration information before running a check. Listed below is an example with Airflow: class AirflowCheck ( AgentCheck ): def __init__ ( self , name , init_config , instances ): super ( AirflowCheck , self ) . __init__ ( name , init_config , instances ) self . _url = self . instance . get ( 'url' , '' ) self . _tags = self . instance . get ( 'tags' , []) # The Agent only makes one attempt to instantiate each AgentCheck so any errors occurring # in `__init__` are logged just once, making it difficult to spot. Therefore, # potential configuration errors are emitted as part of the check run phase. # The configuration is only parsed once if it succeed, otherwise it's retried. self . check_initializations . append ( self . _parse_config ) ...","title":"Basics"},{"location":"base/basics/#basics","text":"The AgentCheck base class contains the logic that all Checks inherit. In addition to the integrations inheriting from AgentCheck, other classes that inherit from AgentCheck include: PDHBaseCheck OpenMetricsBaseCheck KubeLeaderElectionBaseCheck","title":"Basics"},{"location":"base/basics/#getting-started","text":"The Datadog Agent looks for __version__ and a subclass of AgentCheck at the root of every Check package. Below is an example of the __init__.py file for a hypothetical Awesome Check: from .__about__ import __version__ from .check import AwesomeCheck __all__ = [ '__version__' , 'AwesomeCheck' ] The version is used in the Agent's status output (if no __version__ is found, it will default to 0.0.0 ): ========= Collector ========= Running Checks ============== AwesomeCheck (0.0.1) ------------------- Instance ID: 1234 [OK] Configuration Source: file:/etc/datadog-agent/conf.d/awesomecheck.d/awesomecheck.yaml Total Runs: 12 Metric Samples: Last Run: 242, Total: 2,904 Events: Last Run: 0, Total: 0 Service Checks: Last Run: 0, Total: 0 Average Execution Time : 49ms Last Execution Date : 2020-10-26 19:09:22.000000 UTC Last Successful Execution Date : 2020-10-26 19:09:22.000000 UTC ...","title":"Getting Started"},{"location":"base/basics/#checks","text":"AgentCheck contains functions that you use to execute Checks and submit data to Datadog.","title":"Checks"},{"location":"base/basics/#metrics","text":"This list enumerates what is collected from your system by each integration. For more information on metrics, see the Metric Types documentation. You can find the metrics for each integration in that integration's metadata.csv file. You can also set up custom metrics , so if the integration doesn\u2019t offer a metric out of the box, you can usually add it.","title":"Metrics"},{"location":"base/basics/#gauge","text":"The gauge metric submission type represents a snapshot of events in one time interval. This representative snapshot value is the last value submitted to the Agent during a time interval. A gauge can be used to take a measure of something reporting continuously\u2014like the available disk space or memory used. For more information, see the API documentation","title":"Gauge"},{"location":"base/basics/#count","text":"The count metric submission type represents the total number of event occurrences in one time interval. A count can be used to track the total number of connections made to a database or the total number of requests to an endpoint. This number of events can increase or decrease over time\u2014it is not monotonically increasing. For more information, see the API documentation .","title":"Count"},{"location":"base/basics/#monotonic-count","text":"Similar to Count, Monotonic Count represents the total number of event occurrences in one time interval. However, this value can ONLY increment. For more information, see the API documentation .","title":"Monotonic Count"},{"location":"base/basics/#rate","text":"The rate metric submission type represents the total number of event occurrences per second in one time interval. A rate can be used to track how often something is happening\u2014like the frequency of connections made to a database or the flow of requests made to an endpoint. For more information, see the API documentation .","title":"Rate"},{"location":"base/basics/#histogram","text":"The histogram metric submission type represents the statistical distribution of a set of values calculated Agent-side in one time interval. Datadog\u2019s histogram metric type is an extension of the StatsD timing metric type: the Agent aggregates the values that are sent in a defined time interval and produces different metrics which represent the set of values. For more information, see the API documentation .","title":"Histogram"},{"location":"base/basics/#historate","text":"Similar to the histogram metric, the historate represents statistical distribution over one time interval, although this is based on rate metrics. For more information, see the API documentation .","title":"Historate"},{"location":"base/basics/#service-checks","text":"Service checks are a type of monitor used to track the uptime status of the service. For more information, see the Service checks guide. For more information, see the API documentation .","title":"Service Checks"},{"location":"base/basics/#events","text":"Events are informational messages about your system that are consumed by the events stream so that you can build monitors on them. For more information, see the API documentation .","title":"Events"},{"location":"base/basics/#namespacing","text":"Within every integration, you can specify the value of __NAMESPACE__ : from datadog_checks.base import AgentCheck class AwesomeCheck ( AgentCheck ): __NAMESPACE__ = 'awesome' ... This is an optional addition, but it makes submissions easier since it prefixes every metric with the __NAMESPACE__ automatically. In this case it would append awesome. to each metric submitted to Datadog. If you wish to ignore the namespace for any reason, you can append an optional Boolean raw=True to each submission: self . gauge ( 'test' , 1.23 , tags = [ 'foo:bar' ], raw = True ) ... You submitted a gauge metric named test with a value of 1.23 tagged by foo:bar ignoring the namespace.","title":"Namespacing"},{"location":"base/basics/#check-initializations","text":"In the AgentCheck class, there is a useful property called check_initializations , which you can use to execute functions that are called once before the first check run. You can fill up check_initializations with instructions in the __init__ function of an integration. For example, you could use it to parse configuration information before running a check. Listed below is an example with Airflow: class AirflowCheck ( AgentCheck ): def __init__ ( self , name , init_config , instances ): super ( AirflowCheck , self ) . __init__ ( name , init_config , instances ) self . _url = self . instance . get ( 'url' , '' ) self . _tags = self . instance . get ( 'tags' , []) # The Agent only makes one attempt to instantiate each AgentCheck so any errors occurring # in `__init__` are logged just once, making it difficult to spot. Therefore, # potential configuration errors are emitted as part of the check run phase. # The configuration is only parsed once if it succeed, otherwise it's retried. self . check_initializations . append ( self . _parse_config ) ...","title":"Check Initializations"},{"location":"base/databases/","text":"Databases \u00b6 No matter the database you wish to monitor, the base package provides a standard way to define and collect data from arbitrary queries. The core premise is that you define a function that accepts a query (usually a str ) and it returns a sequence of equal length results. Interface \u00b6 All the functionality is exposed by the Query and QueryManager classes. datadog_checks.base.utils.db.query.Query \u00b6 This class accepts a single dict argument which is necessary to run the query. The representation is based on our custom_queries format originally designed and implemented in !1528 . It is now part of all our database integrations and other products have since adopted this format. __init__ ( self , query_data ) special \u00b6 Source code in def __init__ ( self , query_data ): # type: (Dict[str, Any]) -> Query self . query_data = deepcopy ( query_data or {}) # type: Dict[str, Any] self . name = None # type: str self . query = None # type: str self . columns = None # type: List[str] self . extras = None # type: List[Dict[str, str]] self . tags = None # type: List[str] compile ( self , column_transformers , extra_transformers ) \u00b6 This idempotent method will be called by QueryManager.compile_queries so you should never need to call it directly. Source code in def compile ( self , column_transformers , # type: Dict[str, Callable[[Dict[str, Callable], str, Any], Any]] extra_transformers , # type: Dict[str, Callable[[Dict[str, Callable], str, Any], Any]] ): # type: (...) -> None \"\"\" This idempotent method will be called by `QueryManager.compile_queries` so you should never need to call it directly. \"\"\" # Check for previous compilation if self . name is not None : return query_name = self . query_data . get ( 'name' ) if not query_name : raise ValueError ( 'query field `name` is required' ) elif not isinstance ( query_name , str ): raise ValueError ( 'query field `name` must be a string' ) query = self . query_data . get ( 'query' ) if not query : raise ValueError ( 'field `query` for {} is required' . format ( query_name )) elif not isinstance ( query , str ): raise ValueError ( 'field `query` for {} must be a string' . format ( query_name )) columns = self . query_data . get ( 'columns' ) if not columns : raise ValueError ( 'field `columns` for {} is required' . format ( query_name )) elif not isinstance ( columns , list ): raise ValueError ( 'field `columns` for {} must be a list' . format ( query_name )) tags = self . query_data . get ( 'tags' , []) if tags is not None and not isinstance ( tags , list ): raise ValueError ( 'field `tags` for {} must be a list' . format ( query_name )) # Keep track of all defined names sources = {} column_data = [] for i , column in enumerate ( columns , 1 ): # Columns can be ignored via configuration. if not column : column_data . append (( None , None )) continue elif not isinstance ( column , dict ): raise ValueError ( 'column # {} of {} is not a mapping' . format ( i , query_name )) column_name = column . get ( 'name' ) if not column_name : raise ValueError ( 'field `name` for column # {} of {} is required' . format ( i , query_name )) elif not isinstance ( column_name , str ): raise ValueError ( 'field `name` for column # {} of {} must be a string' . format ( i , query_name )) elif column_name in sources : raise ValueError ( 'the name {} of {} was already defined in {} # {} ' . format ( column_name , query_name , sources [ column_name ][ 'type' ], sources [ column_name ][ 'index' ] ) ) sources [ column_name ] = { 'type' : 'column' , 'index' : i } column_type = column . get ( 'type' ) if not column_type : raise ValueError ( 'field `type` for column {} of {} is required' . format ( column_name , query_name )) elif not isinstance ( column_type , str ): raise ValueError ( 'field `type` for column {} of {} must be a string' . format ( column_name , query_name )) elif column_type == 'source' : column_data . append (( column_name , ( None , None ))) continue elif column_type not in column_transformers : raise ValueError ( 'unknown type ` {} ` for column {} of {} ' . format ( column_type , column_name , query_name )) modifiers = { key : value for key , value in column . items () if key not in ( 'name' , 'type' )} try : transformer = column_transformers [ column_type ]( column_transformers , column_name , ** modifiers ) except Exception as e : error = 'error compiling type ` {} ` for column {} of {} : {} ' . format ( column_type , column_name , query_name , e ) # Prepend helpful error text. # # When an exception is raised in the context of another one, both will be printed. To avoid # this we set the context to None. https://www.python.org/dev/peps/pep-0409/ raise_from ( type ( e )( error ), None ) else : if column_type in ( 'tag' , 'tag_list' ): column_data . append (( column_name , ( column_type , transformer ))) else : # All these would actually submit data. As that is the default case, we represent it as # a reference to None since if we use e.g. `value` it would never be checked anyway. column_data . append (( column_name , ( None , transformer ))) submission_transformers = column_transformers . copy () submission_transformers . pop ( 'tag' ) submission_transformers . pop ( 'tag_list' ) extras = self . query_data . get ( 'extras' , []) if not isinstance ( extras , list ): raise ValueError ( 'field `extras` for {} must be a list' . format ( query_name )) extra_data = [] for i , extra in enumerate ( extras , 1 ): if not isinstance ( extra , dict ): raise ValueError ( 'extra # {} of {} is not a mapping' . format ( i , query_name )) extra_name = extra . get ( 'name' ) if not extra_name : raise ValueError ( 'field `name` for extra # {} of {} is required' . format ( i , query_name )) elif not isinstance ( extra_name , str ): raise ValueError ( 'field `name` for extra # {} of {} must be a string' . format ( i , query_name )) elif extra_name in sources : raise ValueError ( 'the name {} of {} was already defined in {} # {} ' . format ( extra_name , query_name , sources [ extra_name ][ 'type' ], sources [ extra_name ][ 'index' ] ) ) sources [ extra_name ] = { 'type' : 'extra' , 'index' : i } extra_type = extra . get ( 'type' ) if not extra_type : if 'expression' in extra : extra_type = 'expression' else : raise ValueError ( 'field `type` for extra {} of {} is required' . format ( extra_name , query_name )) elif not isinstance ( extra_type , str ): raise ValueError ( 'field `type` for extra {} of {} must be a string' . format ( extra_name , query_name )) elif extra_type not in extra_transformers and extra_type not in submission_transformers : raise ValueError ( 'unknown type ` {} ` for extra {} of {} ' . format ( extra_type , extra_name , query_name )) transformer_factory = extra_transformers . get ( extra_type , submission_transformers . get ( extra_type )) extra_source = extra . get ( 'source' ) if extra_type in submission_transformers : if not extra_source : raise ValueError ( 'field `source` for extra {} of {} is required' . format ( extra_name , query_name )) modifiers = { key : value for key , value in extra . items () if key not in ( 'name' , 'type' , 'source' )} else : modifiers = { key : value for key , value in extra . items () if key not in ( 'name' , 'type' )} modifiers [ 'sources' ] = sources try : transformer = transformer_factory ( submission_transformers , extra_name , ** modifiers ) except Exception as e : error = 'error compiling type ` {} ` for extra {} of {} : {} ' . format ( extra_type , extra_name , query_name , e ) raise_from ( type ( e )( error ), None ) else : if extra_type in submission_transformers : transformer = create_extra_transformer ( transformer , extra_source ) extra_data . append (( extra_name , transformer )) self . name = query_name self . query = query self . columns = tuple ( column_data ) self . extras = tuple ( extra_data ) self . tags = tags del self . query_data datadog_checks.base.utils.db.core.QueryManager \u00b6 This class is in charge of running any number of Query instances for a single Check instance. You will most often see it created during Check initialization like this: self . _query_manager = QueryManager ( self , self . execute_query , queries = [ queries . SomeQuery1 , queries . SomeQuery2 , queries . SomeQuery3 , queries . SomeQuery4 , queries . SomeQuery5 , ], tags = self . instance . get ( 'tags' , []), error_handler = self . _error_sanitizer , ) self . check_initializations . append ( self . _query_manager . compile_queries ) __init__ ( self , check , executor , queries = None , tags = None , error_handler = None , hostname = None ) special \u00b6 check ( AgentCheck ) - an instance of a Check executor ( callable ) - a callable accepting a str query as its sole argument and returning a sequence representing either the full result set or an iterator over the result set queries ( List[Query] ) - a list of Query instances tags ( List[str] ) - a list of tags to associate with every submission error_handler ( callable ) - a callable accepting a str error as its sole argument and returning a sanitized string, useful for scrubbing potentially sensitive information libraries emit Source code in def __init__ ( self , check , # type: AgentCheck executor , # type: Callable[[str], Union[Sequence, Iterable]] queries = None , # type: List[str] tags = None , # type: List[str] error_handler = None , # type: Callable[[str], str] hostname = None , # type: str ): # type: (...) -> QueryManager \"\"\" - **check** (_AgentCheck_) - an instance of a Check - **executor** (_callable_) - a callable accepting a `str` query as its sole argument and returning a sequence representing either the full result set or an iterator over the result set - **queries** (_List[Query]_) - a list of `Query` instances - **tags** (_List[str]_) - a list of tags to associate with every submission - **error_handler** (_callable_) - a callable accepting a `str` error as its sole argument and returning a sanitized string, useful for scrubbing potentially sensitive information libraries emit \"\"\" self . check = check # type: AgentCheck self . executor = executor # type: Callable[[str], Union[Sequence, Iterable]] self . tags = tags or [] self . error_handler = error_handler self . queries = [ Query ( payload ) for payload in queries or []] # type: List[Query] self . hostname = hostname # type: str custom_queries = list ( self . check . instance . get ( 'custom_queries' , [])) # type: List[str] use_global_custom_queries = self . check . instance . get ( 'use_global_custom_queries' , True ) # type: str # Handle overrides if use_global_custom_queries == 'extend' : custom_queries . extend ( self . check . init_config . get ( 'global_custom_queries' , [])) elif ( not custom_queries and 'global_custom_queries' in self . check . init_config and is_affirmative ( use_global_custom_queries ) ): custom_queries = self . check . init_config . get ( 'global_custom_queries' , []) # Deduplicate for i , custom_query in enumerate ( iter_unique ( custom_queries ), 1 ): query = Query ( custom_query ) query . query_data . setdefault ( 'name' , 'custom query # {} ' . format ( i )) self . queries . append ( query ) compile_queries ( self ) \u00b6 This method compiles every Query object. Source code in def compile_queries ( self ): \"\"\"This method compiles every `Query` object.\"\"\" column_transformers = COLUMN_TRANSFORMERS . copy () for submission_method , transformer_name in SUBMISSION_METHODS . items (): method = getattr ( self . check , submission_method ) # Save each method in the initializer -> callable format column_transformers [ transformer_name ] = create_submission_transformer ( method ) for query in self . queries : query . compile ( column_transformers , EXTRA_TRANSFORMERS . copy ()) execute ( self , extra_tags = None ) \u00b6 This method is what you call every check run. Source code in def execute ( self , extra_tags = None ): \"\"\"This method is what you call every check run.\"\"\" logger = self . check . log if extra_tags : global_tags = list ( extra_tags ) global_tags . extend ( self . tags ) else : global_tags = self . tags for query in self . queries : query_name = query . name query_columns = query . columns query_extras = query . extras query_tags = query . tags num_columns = len ( query_columns ) try : rows = self . execute_query ( query . query ) except Exception as e : if self . error_handler : logger . error ( 'Error querying %s : %s ' , query_name , self . error_handler ( str ( e ))) else : logger . error ( 'Error querying %s : %s ' , query_name , e ) continue for row in rows : if not row : logger . debug ( 'Query %s returned an empty result' , query_name ) continue if num_columns != len ( row ): logger . error ( 'Query %s expected %d column %s , got %d ' , query_name , num_columns , 's' if num_columns > 1 else '' , len ( row ), ) continue sources = {} submission_queue = [] tags = list ( global_tags ) tags . extend ( query_tags ) for ( column_name , transformer ), value in zip ( query_columns , row ): # Columns can be ignored via configuration if not column_name : continue sources [ column_name ] = value column_type , transformer = transformer # The transformer can be None for `source` types. Those such columns do not submit # anything but are collected into the row values for other columns to reference. if transformer is None : continue elif column_type == 'tag' : tags . append ( transformer ( None , value )) elif column_type == 'tag_list' : tags . extend ( transformer ( None , value )) else : submission_queue . append (( transformer , value )) for transformer , value in submission_queue : transformer ( sources , value , tags = tags , hostname = self . hostname ) for name , transformer in query_extras : try : result = transformer ( sources , tags = tags , hostname = self . hostname ) except Exception as e : logger . error ( 'Error transforming %s : %s ' , name , e ) continue else : if result is not None : sources [ name ] = result execute_query ( self , query ) \u00b6 Called by execute , this triggers query execution to check for errors immediately in a way that is compatible with any library. If there are no errors, this is guaranteed to return an iterator over the result set. Source code in def execute_query ( self , query ): \"\"\" Called by `execute`, this triggers query execution to check for errors immediately in a way that is compatible with any library. If there are no errors, this is guaranteed to return an iterator over the result set. \"\"\" rows = self . executor ( query ) if rows is None : return iter ([]) else : rows = iter ( rows ) # Ensure we trigger query execution try : first_row = next ( rows ) except StopIteration : return iter ([]) return chain (( first_row ,), rows ) Transformers \u00b6 datadog_checks.base.utils.db.transform.ColumnTransformers \u00b6 match ( transformers , column_name , ** modifiers ) \u00b6 This is used for querying unstructured data. For example, say you want to collect the fields named foo and bar . Typically, they would be stored like: foo bar 4 2 and would be queried like: SELECT foo , bar FROM ... Often, you will instead find data stored in the following format: metric value foo 4 bar 2 and would be queried like: SELECT metric , value FROM ... In this case, the metric column stores the name with which to match on and its value is stored in a separate column. The required items modifier is a mapping of matched names to column data values. Consider the values to be exactly the same as the entries in the columns top level field. You must also define a source modifier either for this transformer itself or in the values of items (which will take precedence). The source will be treated as the value of the match. Say this is your configuration: query : SELECT source1, source2, metric FROM TABLE columns : - name : value1 type : source - name : value2 type : source - name : metric_name type : match source : value1 items : foo : name : test.foo type : gauge source : value2 bar : name : test.bar type : monotonic_gauge and the result set is: source1 source2 metric 1 2 foo 3 4 baz 5 6 bar Here's what would be submitted: foo - test.foo as a gauge with a value of 2 bar - test.bar.total as a gauge and test.bar.count as a monotonic_count , both with a value of 5 baz - nothing since it was not defined as a match item Source code in def get_match ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" This is used for querying unstructured data. For example, say you want to collect the fields named `foo` and `bar`. Typically, they would be stored like: | foo | bar | | --- | --- | | 4 | 2 | and would be queried like: ```sql SELECT foo, bar FROM ... ``` Often, you will instead find data stored in the following format: | metric | value | | ------ | ----- | | foo | 4 | | bar | 2 | and would be queried like: ```sql SELECT metric, value FROM ... ``` In this case, the `metric` column stores the name with which to match on and its `value` is stored in a separate column. The required `items` modifier is a mapping of matched names to column data values. Consider the values to be exactly the same as the entries in the `columns` top level field. You must also define a `source` modifier either for this transformer itself or in the values of `items` (which will take precedence). The source will be treated as the value of the match. Say this is your configuration: ```yaml query: SELECT source1, source2, metric FROM TABLE columns: - name: value1 type: source - name: value2 type: source - name: metric_name type: match source: value1 items: foo: name: test.foo type: gauge source: value2 bar: name: test.bar type: monotonic_gauge ``` and the result set is: | source1 | source2 | metric | | ------- | ------- | ------ | | 1 | 2 | foo | | 3 | 4 | baz | | 5 | 6 | bar | Here's what would be submitted: - `foo` - `test.foo` as a `gauge` with a value of `2` - `bar` - `test.bar.total` as a `gauge` and `test.bar.count` as a `monotonic_count`, both with a value of `5` - `baz` - nothing since it was not defined as a match item \"\"\" # Do work in a separate function to avoid having to `del` a bunch of variables compiled_items = _compile_match_items ( transformers , modifiers ) def match ( sources , value , ** kwargs ): if value in compiled_items : source , transformer = compiled_items [ value ] transformer ( sources , sources [ source ], ** kwargs ) return match monotonic_gauge ( transformers , column_name , ** modifiers ) \u00b6 Send the result as both a gauge suffixed by .total and a monotonic_count suffixed by .count . Source code in def get_monotonic_gauge ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" Send the result as both a `gauge` suffixed by `.total` and a `monotonic_count` suffixed by `.count`. \"\"\" gauge = transformers [ 'gauge' ]( transformers , ' {} .total' . format ( column_name ), ** modifiers ) monotonic_count = transformers [ 'monotonic_count' ]( transformers , ' {} .count' . format ( column_name ), ** modifiers ) def monotonic_gauge ( _ , value , ** kwargs ): gauge ( _ , value , ** kwargs ) monotonic_count ( _ , value , ** kwargs ) return monotonic_gauge service_check ( transformers , column_name , ** modifiers ) \u00b6 Submit a service check. The required modifier status_map is a mapping of values to statuses. Valid statuses include: OK WARNING CRITICAL UNKNOWN Any encountered values that are not defined will be sent as UNKNOWN . Source code in def get_service_check ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" Submit a service check. The required modifier `status_map` is a mapping of values to statuses. Valid statuses include: - `OK` - `WARNING` - `CRITICAL` - `UNKNOWN` Any encountered values that are not defined will be sent as `UNKNOWN`. \"\"\" # Do work in a separate function to avoid having to `del` a bunch of variables status_map = _compile_service_check_statuses ( modifiers ) service_check_method = transformers [ '__service_check' ]( transformers , column_name , ** modifiers ) def service_check ( _ , value , ** kwargs ): service_check_method ( _ , status_map . get ( value , ServiceCheck . UNKNOWN ), ** kwargs ) return service_check tag ( transformers , column_name , ** modifiers ) \u00b6 Convert a column to a tag that will be used in every subsequent submission. For example, if you named the column env and the column returned the value prod1 , all submissions from that row will be tagged by env:prod1 . This also accepts an optional modifier called boolean that when set to true will transform the result to the string true or false . So for example if you named the column alive and the result was the number 0 the tag will be alive:false . Source code in def get_tag ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> str \"\"\" Convert a column to a tag that will be used in every subsequent submission. For example, if you named the column `env` and the column returned the value `prod1`, all submissions from that row will be tagged by `env:prod1`. This also accepts an optional modifier called `boolean` that when set to `true` will transform the result to the string `true` or `false`. So for example if you named the column `alive` and the result was the number `0` the tag will be `alive:false`. \"\"\" template = ' {} :{{}}' . format ( column_name ) boolean = is_affirmative ( modifiers . pop ( 'boolean' , None )) def tag ( _ , value , ** kwargs ): if boolean : value = str ( is_affirmative ( value )) . lower () return template . format ( value ) return tag tag_list ( transformers , column_name , ** modifiers ) \u00b6 Convert a column to a list of tags that will be used in every submission. Tag name is determined by column_name . The column value represents a list of values. It is expected to be either a list of strings, or a comma-separated string. For example, if the column is named server_tag and the column returned the value 'us,primary' , then all submissions for that row will be tagged by server_tag:us and server_tag:primary . Source code in def get_tag_list ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], List[str]] \"\"\" Convert a column to a list of tags that will be used in every submission. Tag name is determined by `column_name`. The column value represents a list of values. It is expected to be either a list of strings, or a comma-separated string. For example, if the column is named `server_tag` and the column returned the value `'us,primary'`, then all submissions for that row will be tagged by `server_tag:us` and `server_tag:primary`. \"\"\" template = ' %s : {} ' % column_name def tag_list ( _ , value , ** kwargs ): if isinstance ( value , str ): value = [ v . strip () for v in value . split ( ',' )] return [ template . format ( v ) for v in value ] return tag_list temporal_percent ( transformers , column_name , ** modifiers ) \u00b6 Send the result as percentage of time since the last check run as a rate . For example, say the result is a forever increasing counter representing the total time spent pausing for garbage collection since start up. That number by itself is quite useless, but as a percentage of time spent pausing since the previous collection interval it becomes a useful metric. There is one required parameter called scale that indicates what unit of time the result should be considered. Valid values are: second millisecond microsecond nanosecond You may also define the unit as an integer number of parts compared to seconds e.g. millisecond is equivalent to 1000 . Source code in def get_temporal_percent ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" Send the result as percentage of time since the last check run as a `rate`. For example, say the result is a forever increasing counter representing the total time spent pausing for garbage collection since start up. That number by itself is quite useless, but as a percentage of time spent pausing since the previous collection interval it becomes a useful metric. There is one required parameter called `scale` that indicates what unit of time the result should be considered. Valid values are: - `second` - `millisecond` - `microsecond` - `nanosecond` You may also define the unit as an integer number of parts compared to seconds e.g. `millisecond` is equivalent to `1000`. \"\"\" scale = modifiers . pop ( 'scale' , None ) if scale is None : raise ValueError ( 'the `scale` parameter is required' ) if isinstance ( scale , str ): scale = constants . TIME_UNITS . get ( scale . lower ()) if scale is None : raise ValueError ( 'the `scale` parameter must be one of: {} ' . format ( ' | ' . join ( sorted ( constants . TIME_UNITS ))) ) elif not isinstance ( scale , int ): raise ValueError ( 'the `scale` parameter must be an integer representing parts of a second e.g. 1000 for millisecond' ) rate = transformers [ 'rate' ]( transformers , column_name , ** modifiers ) def temporal_percent ( _ , value , ** kwargs ): rate ( _ , total_time_to_temporal_percent ( float ( value ), scale = scale ), ** kwargs ) return temporal_percent time_elapsed ( transformers , column_name , ** modifiers ) \u00b6 Send the number of seconds elapsed from a time in the past as a gauge . For example, if the result is an instance of datetime.datetime representing 5 seconds ago, then this would submit with a value of 5 . The optional modifier format indicates what format the result is in. By default it is native , assuming the underlying library provides timestamps as datetime objects. If the value is a UNIX timestamp you can set the format modifier to unix_time . If the value is a string representation of a date, you must provide the expected timestamp format using the supported codes . Examples: columns : - name : time_since_x type : time_elapsed format : native # default value and can be omitted - name : time_since_y type : time_elapsed format : unix_time - name : time_since_z type : time_elapsed format : \"%d/%m/%Y %H:%M:%S\" Note The code %z (lower case) is not supported on Windows. Source code in def get_time_elapsed ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" Send the number of seconds elapsed from a time in the past as a `gauge`. For example, if the result is an instance of [datetime.datetime](https://docs.python.org/3/library/datetime.html#datetime.datetime) representing 5 seconds ago, then this would submit with a value of `5`. The optional modifier `format` indicates what format the result is in. By default it is `native`, assuming the underlying library provides timestamps as `datetime` objects. If the value is a UNIX timestamp you can set the `format` modifier to `unix_time`. If the value is a string representation of a date, you must provide the expected timestamp format using the [supported codes](https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes). Example: ```yaml columns: - name: time_since_x type: time_elapsed format: native # default value and can be omitted - name: time_since_y type: time_elapsed format: unix_time - name: time_since_z type: time_elapsed format: \"%d/%m/%Y %H:%M:%S\" ``` !!! note The code `%z` (lower case) is not supported on Windows. \"\"\" time_format = modifiers . pop ( 'format' , 'native' ) if not isinstance ( time_format , str ): raise ValueError ( 'the `format` parameter must be a string' ) gauge = transformers [ 'gauge' ]( transformers , column_name , ** modifiers ) if time_format == 'native' : def time_elapsed ( _ , value , ** kwargs ): value = ensure_aware_datetime ( value ) gauge ( _ , ( datetime . now ( value . tzinfo ) - value ) . total_seconds (), ** kwargs ) elif time_format == 'unix_time' : def time_elapsed ( _ , value , ** kwargs ): gauge ( _ , time . time () - value , ** kwargs ) else : def time_elapsed ( _ , value , ** kwargs ): value = ensure_aware_datetime ( datetime . strptime ( value , time_format )) gauge ( _ , ( datetime . now ( value . tzinfo ) - value ) . total_seconds (), ** kwargs ) return time_elapsed datadog_checks.base.utils.db.transform.ExtraTransformers \u00b6 Every column transformer (except tag ) is supported at this level, the only difference being one must set a source to retrieve the desired value. So for example here: columns : - name : foo.bar type : rate extras : - name : foo.current type : gauge source : foo.bar the metric foo.current will be sent as a gauge will the value of foo.bar . expression ( transformers , name , ** modifiers ) \u00b6 This allows the evaluation of a limited subset of Python syntax and built-in functions. columns : - name : disk.total type : gauge - name : disk.used type : gauge extras : - name : disk.free expression : disk.total - disk.used submit_type : gauge For brevity, if the expression attribute exists and type does not then it is assumed the type is expression . The submit_type can be any transformer and any extra options are passed down to it. The result of every expression is stored, so in lieu of a submit_type the above example could also be written as: columns : - name : disk.total type : gauge - name : disk.used type : gauge extras : - name : free expression : disk.total - disk.used - name : disk.free type : gauge source : free The order matters though, so for example the following will fail: columns : - name : disk.total type : gauge - name : disk.used type : gauge extras : - name : disk.free type : gauge source : free - name : free expression : disk.total - disk.used since the source free does not yet exist. Source code in def get_expression ( transformers , name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], Any] \"\"\" This allows the evaluation of a limited subset of Python syntax and built-in functions. ```yaml columns: - name: disk.total type: gauge - name: disk.used type: gauge extras: - name: disk.free expression: disk.total - disk.used submit_type: gauge ``` For brevity, if the `expression` attribute exists and `type` does not then it is assumed the type is `expression`. The `submit_type` can be any transformer and any extra options are passed down to it. The result of every expression is stored, so in lieu of a `submit_type` the above example could also be written as: ```yaml columns: - name: disk.total type: gauge - name: disk.used type: gauge extras: - name: free expression: disk.total - disk.used - name: disk.free type: gauge source: free ``` The order matters though, so for example the following will fail: ```yaml columns: - name: disk.total type: gauge - name: disk.used type: gauge extras: - name: disk.free type: gauge source: free - name: free expression: disk.total - disk.used ``` since the source `free` does not yet exist. \"\"\" available_sources = modifiers . pop ( 'sources' ) expression = modifiers . pop ( 'expression' , None ) if expression is None : raise ValueError ( 'the `expression` parameter is required' ) elif not isinstance ( expression , str ): raise ValueError ( 'the `expression` parameter must be a string' ) elif not expression : raise ValueError ( 'the `expression` parameter must not be empty' ) if not modifiers . pop ( 'verbose' , False ): # Sort the sources in reverse order of length to prevent greedy matching available_sources = sorted ( available_sources , key = lambda s : - len ( s )) # Escape special characters, mostly for the possible dots in metric names available_sources = list ( map ( re . escape , available_sources )) # Finally, utilize the order by relying on the guarantees provided by the alternation operator available_sources = '|' . join ( available_sources ) expression = re . sub ( SOURCE_PATTERN . format ( available_sources ), # Replace by the particular source that matched lambda match_obj : 'SOURCES[\" {} \"]' . format ( match_obj . group ( 1 )), expression , ) expression = compile ( expression , filename = name , mode = 'eval' ) del available_sources if 'submit_type' in modifiers : if modifiers [ 'submit_type' ] not in transformers : raise ValueError ( 'unknown submit_type ` {} `' . format ( modifiers [ 'submit_type' ])) submit_method = transformers [ modifiers . pop ( 'submit_type' )]( transformers , name , ** modifiers ) submit_method = create_extra_transformer ( submit_method ) def execute_expression ( sources , ** kwargs ): result = eval ( expression , ALLOWED_GLOBALS , { 'SOURCES' : sources }) submit_method ( sources , result , ** kwargs ) return result else : def execute_expression ( sources , ** kwargs ): return eval ( expression , ALLOWED_GLOBALS , { 'SOURCES' : sources }) return execute_expression percent ( transformers , name , ** modifiers ) \u00b6 Send a percentage based on 2 sources as a gauge . The required modifiers are part and total . For example, if you have this configuration: columns : - name : disk.total type : gauge - name : disk.used type : gauge extras : - name : disk.utilized type : percent part : disk.used total : disk.total then the extra metric disk.utilized would be sent as a gauge calculated as disk.used / disk.total * 100 . If the source of total is 0 , then the submitted value will always be sent as 0 too. Source code in def get_percent ( transformers , name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" Send a percentage based on 2 sources as a `gauge`. The required modifiers are `part` and `total`. For example, if you have this configuration: ```yaml columns: - name: disk.total type: gauge - name: disk.used type: gauge extras: - name: disk.utilized type: percent part: disk.used total: disk.total ``` then the extra metric `disk.utilized` would be sent as a `gauge` calculated as `disk.used / disk.total * 100`. If the source of `total` is `0`, then the submitted value will always be sent as `0` too. \"\"\" available_sources = modifiers . pop ( 'sources' ) part = modifiers . pop ( 'part' , None ) if part is None : raise ValueError ( 'the `part` parameter is required' ) elif not isinstance ( part , str ): raise ValueError ( 'the `part` parameter must be a string' ) elif part not in available_sources : raise ValueError ( 'the `part` parameter ` {} ` is not an available source' . format ( part )) total = modifiers . pop ( 'total' , None ) if total is None : raise ValueError ( 'the `total` parameter is required' ) elif not isinstance ( total , str ): raise ValueError ( 'the `total` parameter must be a string' ) elif total not in available_sources : raise ValueError ( 'the `total` parameter ` {} ` is not an available source' . format ( total )) del available_sources gauge = transformers [ 'gauge' ]( transformers , name , ** modifiers ) gauge = create_extra_transformer ( gauge ) def percent ( sources , ** kwargs ): gauge ( sources , compute_percent ( sources [ part ], sources [ total ]), ** kwargs ) return percent","title":"Databases"},{"location":"base/databases/#databases","text":"No matter the database you wish to monitor, the base package provides a standard way to define and collect data from arbitrary queries. The core premise is that you define a function that accepts a query (usually a str ) and it returns a sequence of equal length results.","title":"Databases"},{"location":"base/databases/#interface","text":"All the functionality is exposed by the Query and QueryManager classes.","title":"Interface"},{"location":"base/databases/#datadog_checks.base.utils.db.query.Query","text":"This class accepts a single dict argument which is necessary to run the query. The representation is based on our custom_queries format originally designed and implemented in !1528 . It is now part of all our database integrations and other products have since adopted this format.","title":"Query"},{"location":"base/databases/#datadog_checks.base.utils.db.query.Query.__init__","text":"Source code in def __init__ ( self , query_data ): # type: (Dict[str, Any]) -> Query self . query_data = deepcopy ( query_data or {}) # type: Dict[str, Any] self . name = None # type: str self . query = None # type: str self . columns = None # type: List[str] self . extras = None # type: List[Dict[str, str]] self . tags = None # type: List[str]","title":"__init__()"},{"location":"base/databases/#datadog_checks.base.utils.db.query.Query.compile","text":"This idempotent method will be called by QueryManager.compile_queries so you should never need to call it directly. Source code in def compile ( self , column_transformers , # type: Dict[str, Callable[[Dict[str, Callable], str, Any], Any]] extra_transformers , # type: Dict[str, Callable[[Dict[str, Callable], str, Any], Any]] ): # type: (...) -> None \"\"\" This idempotent method will be called by `QueryManager.compile_queries` so you should never need to call it directly. \"\"\" # Check for previous compilation if self . name is not None : return query_name = self . query_data . get ( 'name' ) if not query_name : raise ValueError ( 'query field `name` is required' ) elif not isinstance ( query_name , str ): raise ValueError ( 'query field `name` must be a string' ) query = self . query_data . get ( 'query' ) if not query : raise ValueError ( 'field `query` for {} is required' . format ( query_name )) elif not isinstance ( query , str ): raise ValueError ( 'field `query` for {} must be a string' . format ( query_name )) columns = self . query_data . get ( 'columns' ) if not columns : raise ValueError ( 'field `columns` for {} is required' . format ( query_name )) elif not isinstance ( columns , list ): raise ValueError ( 'field `columns` for {} must be a list' . format ( query_name )) tags = self . query_data . get ( 'tags' , []) if tags is not None and not isinstance ( tags , list ): raise ValueError ( 'field `tags` for {} must be a list' . format ( query_name )) # Keep track of all defined names sources = {} column_data = [] for i , column in enumerate ( columns , 1 ): # Columns can be ignored via configuration. if not column : column_data . append (( None , None )) continue elif not isinstance ( column , dict ): raise ValueError ( 'column # {} of {} is not a mapping' . format ( i , query_name )) column_name = column . get ( 'name' ) if not column_name : raise ValueError ( 'field `name` for column # {} of {} is required' . format ( i , query_name )) elif not isinstance ( column_name , str ): raise ValueError ( 'field `name` for column # {} of {} must be a string' . format ( i , query_name )) elif column_name in sources : raise ValueError ( 'the name {} of {} was already defined in {} # {} ' . format ( column_name , query_name , sources [ column_name ][ 'type' ], sources [ column_name ][ 'index' ] ) ) sources [ column_name ] = { 'type' : 'column' , 'index' : i } column_type = column . get ( 'type' ) if not column_type : raise ValueError ( 'field `type` for column {} of {} is required' . format ( column_name , query_name )) elif not isinstance ( column_type , str ): raise ValueError ( 'field `type` for column {} of {} must be a string' . format ( column_name , query_name )) elif column_type == 'source' : column_data . append (( column_name , ( None , None ))) continue elif column_type not in column_transformers : raise ValueError ( 'unknown type ` {} ` for column {} of {} ' . format ( column_type , column_name , query_name )) modifiers = { key : value for key , value in column . items () if key not in ( 'name' , 'type' )} try : transformer = column_transformers [ column_type ]( column_transformers , column_name , ** modifiers ) except Exception as e : error = 'error compiling type ` {} ` for column {} of {} : {} ' . format ( column_type , column_name , query_name , e ) # Prepend helpful error text. # # When an exception is raised in the context of another one, both will be printed. To avoid # this we set the context to None. https://www.python.org/dev/peps/pep-0409/ raise_from ( type ( e )( error ), None ) else : if column_type in ( 'tag' , 'tag_list' ): column_data . append (( column_name , ( column_type , transformer ))) else : # All these would actually submit data. As that is the default case, we represent it as # a reference to None since if we use e.g. `value` it would never be checked anyway. column_data . append (( column_name , ( None , transformer ))) submission_transformers = column_transformers . copy () submission_transformers . pop ( 'tag' ) submission_transformers . pop ( 'tag_list' ) extras = self . query_data . get ( 'extras' , []) if not isinstance ( extras , list ): raise ValueError ( 'field `extras` for {} must be a list' . format ( query_name )) extra_data = [] for i , extra in enumerate ( extras , 1 ): if not isinstance ( extra , dict ): raise ValueError ( 'extra # {} of {} is not a mapping' . format ( i , query_name )) extra_name = extra . get ( 'name' ) if not extra_name : raise ValueError ( 'field `name` for extra # {} of {} is required' . format ( i , query_name )) elif not isinstance ( extra_name , str ): raise ValueError ( 'field `name` for extra # {} of {} must be a string' . format ( i , query_name )) elif extra_name in sources : raise ValueError ( 'the name {} of {} was already defined in {} # {} ' . format ( extra_name , query_name , sources [ extra_name ][ 'type' ], sources [ extra_name ][ 'index' ] ) ) sources [ extra_name ] = { 'type' : 'extra' , 'index' : i } extra_type = extra . get ( 'type' ) if not extra_type : if 'expression' in extra : extra_type = 'expression' else : raise ValueError ( 'field `type` for extra {} of {} is required' . format ( extra_name , query_name )) elif not isinstance ( extra_type , str ): raise ValueError ( 'field `type` for extra {} of {} must be a string' . format ( extra_name , query_name )) elif extra_type not in extra_transformers and extra_type not in submission_transformers : raise ValueError ( 'unknown type ` {} ` for extra {} of {} ' . format ( extra_type , extra_name , query_name )) transformer_factory = extra_transformers . get ( extra_type , submission_transformers . get ( extra_type )) extra_source = extra . get ( 'source' ) if extra_type in submission_transformers : if not extra_source : raise ValueError ( 'field `source` for extra {} of {} is required' . format ( extra_name , query_name )) modifiers = { key : value for key , value in extra . items () if key not in ( 'name' , 'type' , 'source' )} else : modifiers = { key : value for key , value in extra . items () if key not in ( 'name' , 'type' )} modifiers [ 'sources' ] = sources try : transformer = transformer_factory ( submission_transformers , extra_name , ** modifiers ) except Exception as e : error = 'error compiling type ` {} ` for extra {} of {} : {} ' . format ( extra_type , extra_name , query_name , e ) raise_from ( type ( e )( error ), None ) else : if extra_type in submission_transformers : transformer = create_extra_transformer ( transformer , extra_source ) extra_data . append (( extra_name , transformer )) self . name = query_name self . query = query self . columns = tuple ( column_data ) self . extras = tuple ( extra_data ) self . tags = tags del self . query_data","title":"compile()"},{"location":"base/databases/#datadog_checks.base.utils.db.core.QueryManager","text":"This class is in charge of running any number of Query instances for a single Check instance. You will most often see it created during Check initialization like this: self . _query_manager = QueryManager ( self , self . execute_query , queries = [ queries . SomeQuery1 , queries . SomeQuery2 , queries . SomeQuery3 , queries . SomeQuery4 , queries . SomeQuery5 , ], tags = self . instance . get ( 'tags' , []), error_handler = self . _error_sanitizer , ) self . check_initializations . append ( self . _query_manager . compile_queries )","title":"QueryManager"},{"location":"base/databases/#datadog_checks.base.utils.db.core.QueryManager.__init__","text":"check ( AgentCheck ) - an instance of a Check executor ( callable ) - a callable accepting a str query as its sole argument and returning a sequence representing either the full result set or an iterator over the result set queries ( List[Query] ) - a list of Query instances tags ( List[str] ) - a list of tags to associate with every submission error_handler ( callable ) - a callable accepting a str error as its sole argument and returning a sanitized string, useful for scrubbing potentially sensitive information libraries emit Source code in def __init__ ( self , check , # type: AgentCheck executor , # type: Callable[[str], Union[Sequence, Iterable]] queries = None , # type: List[str] tags = None , # type: List[str] error_handler = None , # type: Callable[[str], str] hostname = None , # type: str ): # type: (...) -> QueryManager \"\"\" - **check** (_AgentCheck_) - an instance of a Check - **executor** (_callable_) - a callable accepting a `str` query as its sole argument and returning a sequence representing either the full result set or an iterator over the result set - **queries** (_List[Query]_) - a list of `Query` instances - **tags** (_List[str]_) - a list of tags to associate with every submission - **error_handler** (_callable_) - a callable accepting a `str` error as its sole argument and returning a sanitized string, useful for scrubbing potentially sensitive information libraries emit \"\"\" self . check = check # type: AgentCheck self . executor = executor # type: Callable[[str], Union[Sequence, Iterable]] self . tags = tags or [] self . error_handler = error_handler self . queries = [ Query ( payload ) for payload in queries or []] # type: List[Query] self . hostname = hostname # type: str custom_queries = list ( self . check . instance . get ( 'custom_queries' , [])) # type: List[str] use_global_custom_queries = self . check . instance . get ( 'use_global_custom_queries' , True ) # type: str # Handle overrides if use_global_custom_queries == 'extend' : custom_queries . extend ( self . check . init_config . get ( 'global_custom_queries' , [])) elif ( not custom_queries and 'global_custom_queries' in self . check . init_config and is_affirmative ( use_global_custom_queries ) ): custom_queries = self . check . init_config . get ( 'global_custom_queries' , []) # Deduplicate for i , custom_query in enumerate ( iter_unique ( custom_queries ), 1 ): query = Query ( custom_query ) query . query_data . setdefault ( 'name' , 'custom query # {} ' . format ( i )) self . queries . append ( query )","title":"__init__()"},{"location":"base/databases/#datadog_checks.base.utils.db.core.QueryManager.compile_queries","text":"This method compiles every Query object. Source code in def compile_queries ( self ): \"\"\"This method compiles every `Query` object.\"\"\" column_transformers = COLUMN_TRANSFORMERS . copy () for submission_method , transformer_name in SUBMISSION_METHODS . items (): method = getattr ( self . check , submission_method ) # Save each method in the initializer -> callable format column_transformers [ transformer_name ] = create_submission_transformer ( method ) for query in self . queries : query . compile ( column_transformers , EXTRA_TRANSFORMERS . copy ())","title":"compile_queries()"},{"location":"base/databases/#datadog_checks.base.utils.db.core.QueryManager.execute","text":"This method is what you call every check run. Source code in def execute ( self , extra_tags = None ): \"\"\"This method is what you call every check run.\"\"\" logger = self . check . log if extra_tags : global_tags = list ( extra_tags ) global_tags . extend ( self . tags ) else : global_tags = self . tags for query in self . queries : query_name = query . name query_columns = query . columns query_extras = query . extras query_tags = query . tags num_columns = len ( query_columns ) try : rows = self . execute_query ( query . query ) except Exception as e : if self . error_handler : logger . error ( 'Error querying %s : %s ' , query_name , self . error_handler ( str ( e ))) else : logger . error ( 'Error querying %s : %s ' , query_name , e ) continue for row in rows : if not row : logger . debug ( 'Query %s returned an empty result' , query_name ) continue if num_columns != len ( row ): logger . error ( 'Query %s expected %d column %s , got %d ' , query_name , num_columns , 's' if num_columns > 1 else '' , len ( row ), ) continue sources = {} submission_queue = [] tags = list ( global_tags ) tags . extend ( query_tags ) for ( column_name , transformer ), value in zip ( query_columns , row ): # Columns can be ignored via configuration if not column_name : continue sources [ column_name ] = value column_type , transformer = transformer # The transformer can be None for `source` types. Those such columns do not submit # anything but are collected into the row values for other columns to reference. if transformer is None : continue elif column_type == 'tag' : tags . append ( transformer ( None , value )) elif column_type == 'tag_list' : tags . extend ( transformer ( None , value )) else : submission_queue . append (( transformer , value )) for transformer , value in submission_queue : transformer ( sources , value , tags = tags , hostname = self . hostname ) for name , transformer in query_extras : try : result = transformer ( sources , tags = tags , hostname = self . hostname ) except Exception as e : logger . error ( 'Error transforming %s : %s ' , name , e ) continue else : if result is not None : sources [ name ] = result","title":"execute()"},{"location":"base/databases/#datadog_checks.base.utils.db.core.QueryManager.execute_query","text":"Called by execute , this triggers query execution to check for errors immediately in a way that is compatible with any library. If there are no errors, this is guaranteed to return an iterator over the result set. Source code in def execute_query ( self , query ): \"\"\" Called by `execute`, this triggers query execution to check for errors immediately in a way that is compatible with any library. If there are no errors, this is guaranteed to return an iterator over the result set. \"\"\" rows = self . executor ( query ) if rows is None : return iter ([]) else : rows = iter ( rows ) # Ensure we trigger query execution try : first_row = next ( rows ) except StopIteration : return iter ([]) return chain (( first_row ,), rows )","title":"execute_query()"},{"location":"base/databases/#transformers","text":"","title":"Transformers"},{"location":"base/databases/#datadog_checks.base.utils.db.transform.ColumnTransformers","text":"","title":"ColumnTransformers"},{"location":"base/databases/#datadog_checks.base.utils.db.transform.ColumnTransformers.match","text":"This is used for querying unstructured data. For example, say you want to collect the fields named foo and bar . Typically, they would be stored like: foo bar 4 2 and would be queried like: SELECT foo , bar FROM ... Often, you will instead find data stored in the following format: metric value foo 4 bar 2 and would be queried like: SELECT metric , value FROM ... In this case, the metric column stores the name with which to match on and its value is stored in a separate column. The required items modifier is a mapping of matched names to column data values. Consider the values to be exactly the same as the entries in the columns top level field. You must also define a source modifier either for this transformer itself or in the values of items (which will take precedence). The source will be treated as the value of the match. Say this is your configuration: query : SELECT source1, source2, metric FROM TABLE columns : - name : value1 type : source - name : value2 type : source - name : metric_name type : match source : value1 items : foo : name : test.foo type : gauge source : value2 bar : name : test.bar type : monotonic_gauge and the result set is: source1 source2 metric 1 2 foo 3 4 baz 5 6 bar Here's what would be submitted: foo - test.foo as a gauge with a value of 2 bar - test.bar.total as a gauge and test.bar.count as a monotonic_count , both with a value of 5 baz - nothing since it was not defined as a match item Source code in def get_match ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" This is used for querying unstructured data. For example, say you want to collect the fields named `foo` and `bar`. Typically, they would be stored like: | foo | bar | | --- | --- | | 4 | 2 | and would be queried like: ```sql SELECT foo, bar FROM ... ``` Often, you will instead find data stored in the following format: | metric | value | | ------ | ----- | | foo | 4 | | bar | 2 | and would be queried like: ```sql SELECT metric, value FROM ... ``` In this case, the `metric` column stores the name with which to match on and its `value` is stored in a separate column. The required `items` modifier is a mapping of matched names to column data values. Consider the values to be exactly the same as the entries in the `columns` top level field. You must also define a `source` modifier either for this transformer itself or in the values of `items` (which will take precedence). The source will be treated as the value of the match. Say this is your configuration: ```yaml query: SELECT source1, source2, metric FROM TABLE columns: - name: value1 type: source - name: value2 type: source - name: metric_name type: match source: value1 items: foo: name: test.foo type: gauge source: value2 bar: name: test.bar type: monotonic_gauge ``` and the result set is: | source1 | source2 | metric | | ------- | ------- | ------ | | 1 | 2 | foo | | 3 | 4 | baz | | 5 | 6 | bar | Here's what would be submitted: - `foo` - `test.foo` as a `gauge` with a value of `2` - `bar` - `test.bar.total` as a `gauge` and `test.bar.count` as a `monotonic_count`, both with a value of `5` - `baz` - nothing since it was not defined as a match item \"\"\" # Do work in a separate function to avoid having to `del` a bunch of variables compiled_items = _compile_match_items ( transformers , modifiers ) def match ( sources , value , ** kwargs ): if value in compiled_items : source , transformer = compiled_items [ value ] transformer ( sources , sources [ source ], ** kwargs ) return match","title":"match()"},{"location":"base/databases/#datadog_checks.base.utils.db.transform.ColumnTransformers.monotonic_gauge","text":"Send the result as both a gauge suffixed by .total and a monotonic_count suffixed by .count . Source code in def get_monotonic_gauge ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" Send the result as both a `gauge` suffixed by `.total` and a `monotonic_count` suffixed by `.count`. \"\"\" gauge = transformers [ 'gauge' ]( transformers , ' {} .total' . format ( column_name ), ** modifiers ) monotonic_count = transformers [ 'monotonic_count' ]( transformers , ' {} .count' . format ( column_name ), ** modifiers ) def monotonic_gauge ( _ , value , ** kwargs ): gauge ( _ , value , ** kwargs ) monotonic_count ( _ , value , ** kwargs ) return monotonic_gauge","title":"monotonic_gauge()"},{"location":"base/databases/#datadog_checks.base.utils.db.transform.ColumnTransformers.service_check","text":"Submit a service check. The required modifier status_map is a mapping of values to statuses. Valid statuses include: OK WARNING CRITICAL UNKNOWN Any encountered values that are not defined will be sent as UNKNOWN . Source code in def get_service_check ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" Submit a service check. The required modifier `status_map` is a mapping of values to statuses. Valid statuses include: - `OK` - `WARNING` - `CRITICAL` - `UNKNOWN` Any encountered values that are not defined will be sent as `UNKNOWN`. \"\"\" # Do work in a separate function to avoid having to `del` a bunch of variables status_map = _compile_service_check_statuses ( modifiers ) service_check_method = transformers [ '__service_check' ]( transformers , column_name , ** modifiers ) def service_check ( _ , value , ** kwargs ): service_check_method ( _ , status_map . get ( value , ServiceCheck . UNKNOWN ), ** kwargs ) return service_check","title":"service_check()"},{"location":"base/databases/#datadog_checks.base.utils.db.transform.ColumnTransformers.tag","text":"Convert a column to a tag that will be used in every subsequent submission. For example, if you named the column env and the column returned the value prod1 , all submissions from that row will be tagged by env:prod1 . This also accepts an optional modifier called boolean that when set to true will transform the result to the string true or false . So for example if you named the column alive and the result was the number 0 the tag will be alive:false . Source code in def get_tag ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> str \"\"\" Convert a column to a tag that will be used in every subsequent submission. For example, if you named the column `env` and the column returned the value `prod1`, all submissions from that row will be tagged by `env:prod1`. This also accepts an optional modifier called `boolean` that when set to `true` will transform the result to the string `true` or `false`. So for example if you named the column `alive` and the result was the number `0` the tag will be `alive:false`. \"\"\" template = ' {} :{{}}' . format ( column_name ) boolean = is_affirmative ( modifiers . pop ( 'boolean' , None )) def tag ( _ , value , ** kwargs ): if boolean : value = str ( is_affirmative ( value )) . lower () return template . format ( value ) return tag","title":"tag()"},{"location":"base/databases/#datadog_checks.base.utils.db.transform.ColumnTransformers.tag_list","text":"Convert a column to a list of tags that will be used in every submission. Tag name is determined by column_name . The column value represents a list of values. It is expected to be either a list of strings, or a comma-separated string. For example, if the column is named server_tag and the column returned the value 'us,primary' , then all submissions for that row will be tagged by server_tag:us and server_tag:primary . Source code in def get_tag_list ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], List[str]] \"\"\" Convert a column to a list of tags that will be used in every submission. Tag name is determined by `column_name`. The column value represents a list of values. It is expected to be either a list of strings, or a comma-separated string. For example, if the column is named `server_tag` and the column returned the value `'us,primary'`, then all submissions for that row will be tagged by `server_tag:us` and `server_tag:primary`. \"\"\" template = ' %s : {} ' % column_name def tag_list ( _ , value , ** kwargs ): if isinstance ( value , str ): value = [ v . strip () for v in value . split ( ',' )] return [ template . format ( v ) for v in value ] return tag_list","title":"tag_list()"},{"location":"base/databases/#datadog_checks.base.utils.db.transform.ColumnTransformers.temporal_percent","text":"Send the result as percentage of time since the last check run as a rate . For example, say the result is a forever increasing counter representing the total time spent pausing for garbage collection since start up. That number by itself is quite useless, but as a percentage of time spent pausing since the previous collection interval it becomes a useful metric. There is one required parameter called scale that indicates what unit of time the result should be considered. Valid values are: second millisecond microsecond nanosecond You may also define the unit as an integer number of parts compared to seconds e.g. millisecond is equivalent to 1000 . Source code in def get_temporal_percent ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" Send the result as percentage of time since the last check run as a `rate`. For example, say the result is a forever increasing counter representing the total time spent pausing for garbage collection since start up. That number by itself is quite useless, but as a percentage of time spent pausing since the previous collection interval it becomes a useful metric. There is one required parameter called `scale` that indicates what unit of time the result should be considered. Valid values are: - `second` - `millisecond` - `microsecond` - `nanosecond` You may also define the unit as an integer number of parts compared to seconds e.g. `millisecond` is equivalent to `1000`. \"\"\" scale = modifiers . pop ( 'scale' , None ) if scale is None : raise ValueError ( 'the `scale` parameter is required' ) if isinstance ( scale , str ): scale = constants . TIME_UNITS . get ( scale . lower ()) if scale is None : raise ValueError ( 'the `scale` parameter must be one of: {} ' . format ( ' | ' . join ( sorted ( constants . TIME_UNITS ))) ) elif not isinstance ( scale , int ): raise ValueError ( 'the `scale` parameter must be an integer representing parts of a second e.g. 1000 for millisecond' ) rate = transformers [ 'rate' ]( transformers , column_name , ** modifiers ) def temporal_percent ( _ , value , ** kwargs ): rate ( _ , total_time_to_temporal_percent ( float ( value ), scale = scale ), ** kwargs ) return temporal_percent","title":"temporal_percent()"},{"location":"base/databases/#datadog_checks.base.utils.db.transform.ColumnTransformers.time_elapsed","text":"Send the number of seconds elapsed from a time in the past as a gauge . For example, if the result is an instance of datetime.datetime representing 5 seconds ago, then this would submit with a value of 5 . The optional modifier format indicates what format the result is in. By default it is native , assuming the underlying library provides timestamps as datetime objects. If the value is a UNIX timestamp you can set the format modifier to unix_time . If the value is a string representation of a date, you must provide the expected timestamp format using the supported codes . Examples: columns : - name : time_since_x type : time_elapsed format : native # default value and can be omitted - name : time_since_y type : time_elapsed format : unix_time - name : time_since_z type : time_elapsed format : \"%d/%m/%Y %H:%M:%S\" Note The code %z (lower case) is not supported on Windows. Source code in def get_time_elapsed ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" Send the number of seconds elapsed from a time in the past as a `gauge`. For example, if the result is an instance of [datetime.datetime](https://docs.python.org/3/library/datetime.html#datetime.datetime) representing 5 seconds ago, then this would submit with a value of `5`. The optional modifier `format` indicates what format the result is in. By default it is `native`, assuming the underlying library provides timestamps as `datetime` objects. If the value is a UNIX timestamp you can set the `format` modifier to `unix_time`. If the value is a string representation of a date, you must provide the expected timestamp format using the [supported codes](https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes). Example: ```yaml columns: - name: time_since_x type: time_elapsed format: native # default value and can be omitted - name: time_since_y type: time_elapsed format: unix_time - name: time_since_z type: time_elapsed format: \"%d/%m/%Y %H:%M:%S\" ``` !!! note The code `%z` (lower case) is not supported on Windows. \"\"\" time_format = modifiers . pop ( 'format' , 'native' ) if not isinstance ( time_format , str ): raise ValueError ( 'the `format` parameter must be a string' ) gauge = transformers [ 'gauge' ]( transformers , column_name , ** modifiers ) if time_format == 'native' : def time_elapsed ( _ , value , ** kwargs ): value = ensure_aware_datetime ( value ) gauge ( _ , ( datetime . now ( value . tzinfo ) - value ) . total_seconds (), ** kwargs ) elif time_format == 'unix_time' : def time_elapsed ( _ , value , ** kwargs ): gauge ( _ , time . time () - value , ** kwargs ) else : def time_elapsed ( _ , value , ** kwargs ): value = ensure_aware_datetime ( datetime . strptime ( value , time_format )) gauge ( _ , ( datetime . now ( value . tzinfo ) - value ) . total_seconds (), ** kwargs ) return time_elapsed","title":"time_elapsed()"},{"location":"base/databases/#datadog_checks.base.utils.db.transform.ExtraTransformers","text":"Every column transformer (except tag ) is supported at this level, the only difference being one must set a source to retrieve the desired value. So for example here: columns : - name : foo.bar type : rate extras : - name : foo.current type : gauge source : foo.bar the metric foo.current will be sent as a gauge will the value of foo.bar .","title":"ExtraTransformers"},{"location":"base/databases/#datadog_checks.base.utils.db.transform.ExtraTransformers.expression","text":"This allows the evaluation of a limited subset of Python syntax and built-in functions. columns : - name : disk.total type : gauge - name : disk.used type : gauge extras : - name : disk.free expression : disk.total - disk.used submit_type : gauge For brevity, if the expression attribute exists and type does not then it is assumed the type is expression . The submit_type can be any transformer and any extra options are passed down to it. The result of every expression is stored, so in lieu of a submit_type the above example could also be written as: columns : - name : disk.total type : gauge - name : disk.used type : gauge extras : - name : free expression : disk.total - disk.used - name : disk.free type : gauge source : free The order matters though, so for example the following will fail: columns : - name : disk.total type : gauge - name : disk.used type : gauge extras : - name : disk.free type : gauge source : free - name : free expression : disk.total - disk.used since the source free does not yet exist. Source code in def get_expression ( transformers , name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], Any] \"\"\" This allows the evaluation of a limited subset of Python syntax and built-in functions. ```yaml columns: - name: disk.total type: gauge - name: disk.used type: gauge extras: - name: disk.free expression: disk.total - disk.used submit_type: gauge ``` For brevity, if the `expression` attribute exists and `type` does not then it is assumed the type is `expression`. The `submit_type` can be any transformer and any extra options are passed down to it. The result of every expression is stored, so in lieu of a `submit_type` the above example could also be written as: ```yaml columns: - name: disk.total type: gauge - name: disk.used type: gauge extras: - name: free expression: disk.total - disk.used - name: disk.free type: gauge source: free ``` The order matters though, so for example the following will fail: ```yaml columns: - name: disk.total type: gauge - name: disk.used type: gauge extras: - name: disk.free type: gauge source: free - name: free expression: disk.total - disk.used ``` since the source `free` does not yet exist. \"\"\" available_sources = modifiers . pop ( 'sources' ) expression = modifiers . pop ( 'expression' , None ) if expression is None : raise ValueError ( 'the `expression` parameter is required' ) elif not isinstance ( expression , str ): raise ValueError ( 'the `expression` parameter must be a string' ) elif not expression : raise ValueError ( 'the `expression` parameter must not be empty' ) if not modifiers . pop ( 'verbose' , False ): # Sort the sources in reverse order of length to prevent greedy matching available_sources = sorted ( available_sources , key = lambda s : - len ( s )) # Escape special characters, mostly for the possible dots in metric names available_sources = list ( map ( re . escape , available_sources )) # Finally, utilize the order by relying on the guarantees provided by the alternation operator available_sources = '|' . join ( available_sources ) expression = re . sub ( SOURCE_PATTERN . format ( available_sources ), # Replace by the particular source that matched lambda match_obj : 'SOURCES[\" {} \"]' . format ( match_obj . group ( 1 )), expression , ) expression = compile ( expression , filename = name , mode = 'eval' ) del available_sources if 'submit_type' in modifiers : if modifiers [ 'submit_type' ] not in transformers : raise ValueError ( 'unknown submit_type ` {} `' . format ( modifiers [ 'submit_type' ])) submit_method = transformers [ modifiers . pop ( 'submit_type' )]( transformers , name , ** modifiers ) submit_method = create_extra_transformer ( submit_method ) def execute_expression ( sources , ** kwargs ): result = eval ( expression , ALLOWED_GLOBALS , { 'SOURCES' : sources }) submit_method ( sources , result , ** kwargs ) return result else : def execute_expression ( sources , ** kwargs ): return eval ( expression , ALLOWED_GLOBALS , { 'SOURCES' : sources }) return execute_expression","title":"expression()"},{"location":"base/databases/#datadog_checks.base.utils.db.transform.ExtraTransformers.percent","text":"Send a percentage based on 2 sources as a gauge . The required modifiers are part and total . For example, if you have this configuration: columns : - name : disk.total type : gauge - name : disk.used type : gauge extras : - name : disk.utilized type : percent part : disk.used total : disk.total then the extra metric disk.utilized would be sent as a gauge calculated as disk.used / disk.total * 100 . If the source of total is 0 , then the submitted value will always be sent as 0 too. Source code in def get_percent ( transformers , name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" Send a percentage based on 2 sources as a `gauge`. The required modifiers are `part` and `total`. For example, if you have this configuration: ```yaml columns: - name: disk.total type: gauge - name: disk.used type: gauge extras: - name: disk.utilized type: percent part: disk.used total: disk.total ``` then the extra metric `disk.utilized` would be sent as a `gauge` calculated as `disk.used / disk.total * 100`. If the source of `total` is `0`, then the submitted value will always be sent as `0` too. \"\"\" available_sources = modifiers . pop ( 'sources' ) part = modifiers . pop ( 'part' , None ) if part is None : raise ValueError ( 'the `part` parameter is required' ) elif not isinstance ( part , str ): raise ValueError ( 'the `part` parameter must be a string' ) elif part not in available_sources : raise ValueError ( 'the `part` parameter ` {} ` is not an available source' . format ( part )) total = modifiers . pop ( 'total' , None ) if total is None : raise ValueError ( 'the `total` parameter is required' ) elif not isinstance ( total , str ): raise ValueError ( 'the `total` parameter must be a string' ) elif total not in available_sources : raise ValueError ( 'the `total` parameter ` {} ` is not an available source' . format ( total )) del available_sources gauge = transformers [ 'gauge' ]( transformers , name , ** modifiers ) gauge = create_extra_transformer ( gauge ) def percent ( sources , ** kwargs ): gauge ( sources , compute_percent ( sources [ part ], sources [ total ]), ** kwargs ) return percent","title":"percent()"},{"location":"base/http/","text":"HTTP \u00b6 Whenever you need to make HTTP requests, the base class provides a convenience member that has the same interface as the popular requests library and ensures consistent behavior across all integrations. The wrapper automatically parses and uses configuration from the instance , init_config , and Agent config. Also, this is only done once during initialization and cached to reduce the overhead of every call. For example, to make a GET request you would use: response = self . http . get ( url ) and the wrapper will pass the right things to requests . All methods accept optional keyword arguments like stream , etc. Any method-level option will override configuration. So for example if tls_verify was set to false and you do self.http.get(url, verify=True) , then SSL certificates will be verified on that particular request. You can use the keyword argument persist to override persist_connections . There is also support for non-standard or legacy configurations with the HTTP_CONFIG_REMAPPER class attribute. For example: class MyCheck ( AgentCheck ): HTTP_CONFIG_REMAPPER = { 'disable_ssl_validation' : { 'name' : 'tls_verify' , 'default' : False , 'invert' : True , }, ... } ... Support for Unix socket is provided via requests-unixsocket and allows making UDS requests on the unix:// scheme (not supported on Windows until Python adds support for AF_UNIX , see ticket ): url = 'unix:///var/run/docker.sock' response = self . http . get ( url ) Options \u00b6 Some options can be set globally in init_config (with instances taking precedence). For complete documentation of every option, see the associated configuration templates for the instances and init_config sections. auth_token auth_type aws_host aws_region aws_service connect_timeout extra_headers headers kerberos_auth kerberos_cache kerberos_delegate kerberos_force_initiate kerberos_hostname kerberos_keytab kerberos_principal log_requests ntlm_domain password persist_connections proxy read_timeout skip_proxy tls_ca_cert tls_cert tls_use_host_header tls_ignore_warning tls_private_key tls_verify timeout use_legacy_auth_encoding username Future \u00b6 Support for configuring cookies! Since they can be set globally, per-domain, and even per-path, the configuration may be complex if not thought out adequately. We'll discuss options for what that might look like. Only our spark and cisco_aci checks currently set cookies, and that is based on code logic, not configuration.","title":"HTTP"},{"location":"base/http/#http","text":"Whenever you need to make HTTP requests, the base class provides a convenience member that has the same interface as the popular requests library and ensures consistent behavior across all integrations. The wrapper automatically parses and uses configuration from the instance , init_config , and Agent config. Also, this is only done once during initialization and cached to reduce the overhead of every call. For example, to make a GET request you would use: response = self . http . get ( url ) and the wrapper will pass the right things to requests . All methods accept optional keyword arguments like stream , etc. Any method-level option will override configuration. So for example if tls_verify was set to false and you do self.http.get(url, verify=True) , then SSL certificates will be verified on that particular request. You can use the keyword argument persist to override persist_connections . There is also support for non-standard or legacy configurations with the HTTP_CONFIG_REMAPPER class attribute. For example: class MyCheck ( AgentCheck ): HTTP_CONFIG_REMAPPER = { 'disable_ssl_validation' : { 'name' : 'tls_verify' , 'default' : False , 'invert' : True , }, ... } ... Support for Unix socket is provided via requests-unixsocket and allows making UDS requests on the unix:// scheme (not supported on Windows until Python adds support for AF_UNIX , see ticket ): url = 'unix:///var/run/docker.sock' response = self . http . get ( url )","title":"HTTP"},{"location":"base/http/#options","text":"Some options can be set globally in init_config (with instances taking precedence). For complete documentation of every option, see the associated configuration templates for the instances and init_config sections. auth_token auth_type aws_host aws_region aws_service connect_timeout extra_headers headers kerberos_auth kerberos_cache kerberos_delegate kerberos_force_initiate kerberos_hostname kerberos_keytab kerberos_principal log_requests ntlm_domain password persist_connections proxy read_timeout skip_proxy tls_ca_cert tls_cert tls_use_host_header tls_ignore_warning tls_private_key tls_verify timeout use_legacy_auth_encoding username","title":"Options"},{"location":"base/http/#future","text":"Support for configuring cookies! Since they can be set globally, per-domain, and even per-path, the configuration may be complex if not thought out adequately. We'll discuss options for what that might look like. Only our spark and cisco_aci checks currently set cookies, and that is based on code logic, not configuration.","title":"Future"},{"location":"base/metadata/","text":"Metadata \u00b6 Often, you will want to collect mostly unstructured data that doesn't map well to tags, like fine-grained product version information. The base class provides a method that handles such cases. The collected data is captured by flares , displayed on the Agent's status page , and will eventually be queryable in-app . Interface \u00b6 The set_metadata method of the base class updates cached metadata values, which are then sent by the Agent at regular intervals. It requires 2 arguments: name - The name of the metadata. value - The value for the metadata. If name has no transformer defined then the raw value will be submitted and therefore it must be a str . The method also accepts arbitrary keyword arguments that are forwarded to any defined transformers. Transformers \u00b6 Custom transformers may be defined via a class level attribute METADATA_TRANSFORMERS . This is a mapping of metadata names to functions. When you call self . set_metadata ( name , value , ** options ) , if name is in this mapping then the corresponding function will be called with the value , and the return value(s) will be collected instead. Transformer functions must satisfy the following signature: def transform_ < NAME > ( value : Any , options : dict ) -> Union [ str , Dict [ str , str ]]: If the return type is str , then it will be sent as the value for name . If the return type is a mapping type, then each key will be considered a name and will be sent with its ( str ) value. For example, the following would collect an entity named square with a value of '25' : from datadog_checks.base import AgentCheck class AwesomeCheck ( AgentCheck ): METADATA_TRANSFORMERS = { 'square' : lambda value , options : str ( int ( value ) ** 2 ) } def check ( self , instance ): self . set_metadata ( 'square' , '5' ) There are a few default transformers, which can be overridden by custom transformers. transform_config ( self , config , options ) \u00b6 Note You should never need to collect configuration data directly, but instead define 2 class level attributes that will be used as whitelists of fields to allow: METADATA_DEFAULT_CONFIG_INSTANCE METADATA_DEFAULT_CONFIG_INIT_CONFIG This transforms a dict of arbitrary user configuration. A section must be defined indicating what the configuration represents e.g. init_config . The metadata name submitted will become config.
. The value will be a JSON str with the root being an array. There will be one map element for every allowed field. Every map may have 2 entries: is_set - a boolean indicating whether or not the field exists value - the value of the field. this is only set if the field exists and the value is a primitive type ( None | bool | float | int | str ) The allowed fields are derived from the optional whitelist and blacklist . By default, nothing will be sent. User configuration can override defaults allowing complete, granular control of metadata submissions. In any section, one may set metadata_whitelist and/or metadata_blacklist which will override their keyword argument counterparts. In following our standard, blacklists take precedence over whitelists. Blacklists are special in that each item is considered a regular expression. Source code in def transform_config ( self , config , options ): \"\"\" !!! note You should never need to collect configuration data directly, but instead define 2 class level attributes that will be used as whitelists of fields to allow: - `METADATA_DEFAULT_CONFIG_INSTANCE` - `METADATA_DEFAULT_CONFIG_INIT_CONFIG` This transforms a `dict` of arbitrary user configuration. A `section` must be defined indicating what the configuration represents e.g. `init_config`. The metadata name submitted will become `config.
`. The value will be a JSON `str` with the root being an array. There will be one map element for every allowed field. Every map may have 2 entries: 1. `is_set` - a boolean indicating whether or not the field exists 2. `value` - the value of the field. this is only set if the field exists and the value is a primitive type (`None` | `bool` | `float` | `int` | `str`) The allowed fields are derived from the optional `whitelist` and `blacklist`. By default, nothing will be sent. User configuration can override defaults allowing complete, granular control of metadata submissions. In any section, one may set `metadata_whitelist` and/or `metadata_blacklist` which will override their keyword argument counterparts. In following our standard, blacklists take precedence over whitelists. Blacklists are special in that each item is considered a regular expression. \"\"\" section = options . get ( 'section' ) if section is None : raise ValueError ( 'The `section` option is required' ) # Although we define the default fields to send in code i.e. the default whitelist, there # may be cases where a subclass (for example of OpenMetricsBaseCheck) would want to ignore # just a few fields, hence for convenience we have the ability to also pass a blacklist. whitelist = config . get ( 'metadata_whitelist' , options . get ( 'whitelist' )) or () blacklist = config . get ( 'metadata_blacklist' , options . get ( 'blacklist' , DEFAULT_BLACKLIST )) or () blacklist = re . compile ( '|' . join ( blacklist ), re . IGNORECASE ) transformed_data = {} data = [] for field in whitelist : if blacklist . search ( field ): self . logger . debug ( 'Skipping metadata submission of blacklisted field ` %s ` in section ` %s `' , field , section ) continue field_data = {} if field in config : field_data [ 'is_set' ] = True value = config [ field ] if is_primitive ( value ): field_data [ 'value' ] = value else : self . logger . debug ( 'Skipping metadata submission of non-primitive type ` %s ` for field ` %s ` in section ` %s `' , type ( value ) . __name__ , field , section , ) else : field_data [ 'is_set' ] = False data . append ( field_data ) if data : # To avoid the backend having to parse a potentially unbounded number of unique keys, we # send `config.` rather than `config..` since # the number of sections is finite (currently only `instance` and `init_config`). transformed_data [ 'config. {} ' . format ( section )] = json . dumps ( data ) return transformed_data transform_version ( self , version , options ) \u00b6 Transforms a version like 1.2.3-rc.4+5 to its constituent parts. In all cases, the metadata names version.raw and version.scheme will be collected. If a scheme is defined then it will be looked up from our known schemes. If no scheme is defined then it will default to semver . The supported schemes are: regex - A pattern must also be defined. The pattern must be a str or a pre-compiled re.Pattern . Any matching named subgroups will then be sent as version. . In this case, the check name will be used as the value of version.scheme unless final_scheme is also set, which will take precedence. parts - A part_map must also be defined. Each key in this mapping will be considered a name and will be sent with its ( str ) value. semver - This is essentially the same as regex with the pattern set to the standard regular expression for semantic versioning. Taking the example above, calling self . set_metadata ( 'version' , '1.2.3-rc.4+5' ) would produce: name value version.raw 1.2.3-rc.4+5 version.scheme semver version.major 1 version.minor 2 version.patch 3 version.release rc.4 version.build 5 Source code in def transform_version ( self , version , options ): \"\"\" Transforms a version like `1.2.3-rc.4+5` to its constituent parts. In all cases, the metadata names `version.raw` and `version.scheme` will be collected. If a `scheme` is defined then it will be looked up from our known schemes. If no scheme is defined then it will default to `semver`. The supported schemes are: - `regex` - A `pattern` must also be defined. The pattern must be a `str` or a pre-compiled `re.Pattern`. Any matching named subgroups will then be sent as `version.`. In this case, the check name will be used as the value of `version.scheme` unless `final_scheme` is also set, which will take precedence. - `parts` - A `part_map` must also be defined. Each key in this mapping will be considered a `name` and will be sent with its (`str`) value. - `semver` - This is essentially the same as `regex` with the `pattern` set to the standard regular expression for semantic versioning. Taking the example above, calling `#!python self.set_metadata('version', '1.2.3-rc.4+5')` would produce: | name | value | | --- | --- | | `version.raw` | `1.2.3-rc.4+5` | | `version.scheme` | `semver` | | `version.major` | `1` | | `version.minor` | `2` | | `version.patch` | `3` | | `version.release` | `rc.4` | | `version.build` | `5` | \"\"\" scheme , version_parts = parse_version ( version , options ) if scheme == 'regex' or scheme == 'parts' : scheme = options . get ( 'final_scheme' , self . check_name ) data = { 'version. {} ' . format ( part_name ): part_value for part_name , part_value in iteritems ( version_parts )} data [ 'version.raw' ] = version data [ 'version.scheme' ] = scheme return data","title":"Metadata"},{"location":"base/metadata/#metadata","text":"Often, you will want to collect mostly unstructured data that doesn't map well to tags, like fine-grained product version information. The base class provides a method that handles such cases. The collected data is captured by flares , displayed on the Agent's status page , and will eventually be queryable in-app .","title":"Metadata"},{"location":"base/metadata/#interface","text":"The set_metadata method of the base class updates cached metadata values, which are then sent by the Agent at regular intervals. It requires 2 arguments: name - The name of the metadata. value - The value for the metadata. If name has no transformer defined then the raw value will be submitted and therefore it must be a str . The method also accepts arbitrary keyword arguments that are forwarded to any defined transformers.","title":"Interface"},{"location":"base/metadata/#transformers","text":"Custom transformers may be defined via a class level attribute METADATA_TRANSFORMERS . This is a mapping of metadata names to functions. When you call self . set_metadata ( name , value , ** options ) , if name is in this mapping then the corresponding function will be called with the value , and the return value(s) will be collected instead. Transformer functions must satisfy the following signature: def transform_ < NAME > ( value : Any , options : dict ) -> Union [ str , Dict [ str , str ]]: If the return type is str , then it will be sent as the value for name . If the return type is a mapping type, then each key will be considered a name and will be sent with its ( str ) value. For example, the following would collect an entity named square with a value of '25' : from datadog_checks.base import AgentCheck class AwesomeCheck ( AgentCheck ): METADATA_TRANSFORMERS = { 'square' : lambda value , options : str ( int ( value ) ** 2 ) } def check ( self , instance ): self . set_metadata ( 'square' , '5' ) There are a few default transformers, which can be overridden by custom transformers.","title":"Transformers"},{"location":"base/metadata/#datadog_checks.base.utils.metadata.core.MetadataManager.transform_config","text":"Note You should never need to collect configuration data directly, but instead define 2 class level attributes that will be used as whitelists of fields to allow: METADATA_DEFAULT_CONFIG_INSTANCE METADATA_DEFAULT_CONFIG_INIT_CONFIG This transforms a dict of arbitrary user configuration. A section must be defined indicating what the configuration represents e.g. init_config . The metadata name submitted will become config.
. The value will be a JSON str with the root being an array. There will be one map element for every allowed field. Every map may have 2 entries: is_set - a boolean indicating whether or not the field exists value - the value of the field. this is only set if the field exists and the value is a primitive type ( None | bool | float | int | str ) The allowed fields are derived from the optional whitelist and blacklist . By default, nothing will be sent. User configuration can override defaults allowing complete, granular control of metadata submissions. In any section, one may set metadata_whitelist and/or metadata_blacklist which will override their keyword argument counterparts. In following our standard, blacklists take precedence over whitelists. Blacklists are special in that each item is considered a regular expression. Source code in def transform_config ( self , config , options ): \"\"\" !!! note You should never need to collect configuration data directly, but instead define 2 class level attributes that will be used as whitelists of fields to allow: - `METADATA_DEFAULT_CONFIG_INSTANCE` - `METADATA_DEFAULT_CONFIG_INIT_CONFIG` This transforms a `dict` of arbitrary user configuration. A `section` must be defined indicating what the configuration represents e.g. `init_config`. The metadata name submitted will become `config.
`. The value will be a JSON `str` with the root being an array. There will be one map element for every allowed field. Every map may have 2 entries: 1. `is_set` - a boolean indicating whether or not the field exists 2. `value` - the value of the field. this is only set if the field exists and the value is a primitive type (`None` | `bool` | `float` | `int` | `str`) The allowed fields are derived from the optional `whitelist` and `blacklist`. By default, nothing will be sent. User configuration can override defaults allowing complete, granular control of metadata submissions. In any section, one may set `metadata_whitelist` and/or `metadata_blacklist` which will override their keyword argument counterparts. In following our standard, blacklists take precedence over whitelists. Blacklists are special in that each item is considered a regular expression. \"\"\" section = options . get ( 'section' ) if section is None : raise ValueError ( 'The `section` option is required' ) # Although we define the default fields to send in code i.e. the default whitelist, there # may be cases where a subclass (for example of OpenMetricsBaseCheck) would want to ignore # just a few fields, hence for convenience we have the ability to also pass a blacklist. whitelist = config . get ( 'metadata_whitelist' , options . get ( 'whitelist' )) or () blacklist = config . get ( 'metadata_blacklist' , options . get ( 'blacklist' , DEFAULT_BLACKLIST )) or () blacklist = re . compile ( '|' . join ( blacklist ), re . IGNORECASE ) transformed_data = {} data = [] for field in whitelist : if blacklist . search ( field ): self . logger . debug ( 'Skipping metadata submission of blacklisted field ` %s ` in section ` %s `' , field , section ) continue field_data = {} if field in config : field_data [ 'is_set' ] = True value = config [ field ] if is_primitive ( value ): field_data [ 'value' ] = value else : self . logger . debug ( 'Skipping metadata submission of non-primitive type ` %s ` for field ` %s ` in section ` %s `' , type ( value ) . __name__ , field , section , ) else : field_data [ 'is_set' ] = False data . append ( field_data ) if data : # To avoid the backend having to parse a potentially unbounded number of unique keys, we # send `config.` rather than `config..` since # the number of sections is finite (currently only `instance` and `init_config`). transformed_data [ 'config. {} ' . format ( section )] = json . dumps ( data ) return transformed_data","title":"transform_config()"},{"location":"base/metadata/#datadog_checks.base.utils.metadata.core.MetadataManager.transform_version","text":"Transforms a version like 1.2.3-rc.4+5 to its constituent parts. In all cases, the metadata names version.raw and version.scheme will be collected. If a scheme is defined then it will be looked up from our known schemes. If no scheme is defined then it will default to semver . The supported schemes are: regex - A pattern must also be defined. The pattern must be a str or a pre-compiled re.Pattern . Any matching named subgroups will then be sent as version. . In this case, the check name will be used as the value of version.scheme unless final_scheme is also set, which will take precedence. parts - A part_map must also be defined. Each key in this mapping will be considered a name and will be sent with its ( str ) value. semver - This is essentially the same as regex with the pattern set to the standard regular expression for semantic versioning. Taking the example above, calling self . set_metadata ( 'version' , '1.2.3-rc.4+5' ) would produce: name value version.raw 1.2.3-rc.4+5 version.scheme semver version.major 1 version.minor 2 version.patch 3 version.release rc.4 version.build 5 Source code in def transform_version ( self , version , options ): \"\"\" Transforms a version like `1.2.3-rc.4+5` to its constituent parts. In all cases, the metadata names `version.raw` and `version.scheme` will be collected. If a `scheme` is defined then it will be looked up from our known schemes. If no scheme is defined then it will default to `semver`. The supported schemes are: - `regex` - A `pattern` must also be defined. The pattern must be a `str` or a pre-compiled `re.Pattern`. Any matching named subgroups will then be sent as `version.`. In this case, the check name will be used as the value of `version.scheme` unless `final_scheme` is also set, which will take precedence. - `parts` - A `part_map` must also be defined. Each key in this mapping will be considered a `name` and will be sent with its (`str`) value. - `semver` - This is essentially the same as `regex` with the `pattern` set to the standard regular expression for semantic versioning. Taking the example above, calling `#!python self.set_metadata('version', '1.2.3-rc.4+5')` would produce: | name | value | | --- | --- | | `version.raw` | `1.2.3-rc.4+5` | | `version.scheme` | `semver` | | `version.major` | `1` | | `version.minor` | `2` | | `version.patch` | `3` | | `version.release` | `rc.4` | | `version.build` | `5` | \"\"\" scheme , version_parts = parse_version ( version , options ) if scheme == 'regex' or scheme == 'parts' : scheme = options . get ( 'final_scheme' , self . check_name ) data = { 'version. {} ' . format ( part_name ): part_value for part_name , part_value in iteritems ( version_parts )} data [ 'version.raw' ] = version data [ 'version.scheme' ] = scheme return data","title":"transform_version()"},{"location":"base/prometheus/","text":"Prometheus \u00b6 Prometheus is an open source monitoring system for timeseries metric data. Many Datadog integrations collect metrics based on Prometheus exported data sets. Prometheus-based integrations use the OpenMetrics exposition format to collect metrics. Interface \u00b6 All functionality is exposed by the OpenMetricsBaseCheck and OpenMetricsScraperMixin classes. datadog_checks.base.checks.openmetrics.base_check.OpenMetricsBaseCheck \u00b6 OpenMetricsBaseCheck is a class that helps scrape endpoints that emit Prometheus metrics only with YAML configurations. Minimal example configuration: instances: - prometheus_url: http://example.com/endpoint namespace: \"foobar\" metrics: - bar - foo Agent 6 signature: OpenMetricsBaseCheck(name, init_config, instances, default_instances=None, default_namespace=None) __init__ ( self , * args , ** kwargs ) special \u00b6 The base class for any Prometheus-based integration. Source code in def __init__ ( self , * args , ** kwargs ): \"\"\" The base class for any Prometheus-based integration. \"\"\" args = list ( args ) default_instances = kwargs . pop ( 'default_instances' , None ) or {} default_namespace = kwargs . pop ( 'default_namespace' , None ) legacy_kwargs_in_args = args [ 4 :] del args [ 4 :] if len ( legacy_kwargs_in_args ) > 0 : default_instances = legacy_kwargs_in_args [ 0 ] or {} if len ( legacy_kwargs_in_args ) > 1 : default_namespace = legacy_kwargs_in_args [ 1 ] super ( OpenMetricsBaseCheck , self ) . __init__ ( * args , ** kwargs ) self . config_map = {} self . _http_handlers = {} self . default_instances = default_instances self . default_namespace = default_namespace # pre-generate the scraper configurations if 'instances' in kwargs : instances = kwargs [ 'instances' ] elif len ( args ) == 4 : # instances from agent 5 signature instances = args [ 3 ] elif isinstance ( args [ 2 ], ( tuple , list )): # instances from agent 6 signature instances = args [ 2 ] else : instances = None if instances is not None : for instance in instances : self . get_scraper_config ( instance ) check ( self , instance ) \u00b6 Source code in def check ( self , instance ): # Get the configuration for this specific instance scraper_config = self . get_scraper_config ( instance ) # We should be specifying metrics for checks that are vanilla OpenMetricsBaseCheck-based if not scraper_config [ 'metrics_mapper' ]: raise CheckException ( \"You have to collect at least one metric from the endpoint: {} \" . format ( scraper_config [ 'prometheus_url' ]) ) self . process ( scraper_config ) get_scraper_config ( self , instance ) \u00b6 Validates the instance configuration and creates a scraper configuration for a new instance. If the endpoint already has a corresponding configuration, return the cached configuration. Source code in def get_scraper_config ( self , instance ): \"\"\" Validates the instance configuration and creates a scraper configuration for a new instance. If the endpoint already has a corresponding configuration, return the cached configuration. \"\"\" endpoint = instance . get ( 'prometheus_url' ) if endpoint is None : raise CheckException ( \"Unable to find prometheus URL in config file.\" ) # If we've already created the corresponding scraper configuration, return it if endpoint in self . config_map : return self . config_map [ endpoint ] # Otherwise, we create the scraper configuration config = self . create_scraper_configuration ( instance ) # Add this configuration to the config_map self . config_map [ endpoint ] = config return config datadog_checks.base.checks.openmetrics.mixins.OpenMetricsScraperMixin \u00b6 create_scraper_configuration ( self , instance = None ) \u00b6 Creates a scraper configuration. If instance does not specify a value for a configuration option, the value will default to the init_config . Otherwise, the default_instance value will be used. A default mixin configuration will be returned if there is no instance. Source code in def create_scraper_configuration ( self , instance = None ): \"\"\" Creates a scraper configuration. If instance does not specify a value for a configuration option, the value will default to the `init_config`. Otherwise, the `default_instance` value will be used. A default mixin configuration will be returned if there is no instance. \"\"\" if 'openmetrics_endpoint' in instance : raise CheckException ( 'The setting `openmetrics_endpoint` is only available for Agent version 7 or later' ) # We can choose to create a default mixin configuration for an empty instance if instance is None : instance = {} # Supports new configuration options config = copy . deepcopy ( instance ) # Set the endpoint endpoint = instance . get ( 'prometheus_url' ) if instance and endpoint is None : raise CheckException ( \"You have to define a prometheus_url for each prometheus instance\" ) config [ 'prometheus_url' ] = endpoint # `NAMESPACE` is the prefix metrics will have. Need to be hardcoded in the # child check class. namespace = instance . get ( 'namespace' ) # Check if we have a namespace if instance and namespace is None : if self . default_namespace is None : raise CheckException ( \"You have to define a namespace for each prometheus check\" ) namespace = self . default_namespace config [ 'namespace' ] = namespace # Retrieve potential default instance settings for the namespace default_instance = self . default_instances . get ( namespace , {}) # `metrics_mapper` is a dictionary where the keys are the metrics to capture # and the values are the corresponding metrics names to have in datadog. # Note: it is empty in the parent class but will need to be # overloaded/hardcoded in the final check not to be counted as custom metric. # Metrics are preprocessed if no mapping metrics_mapper = {} # We merge list and dictionaries from optional defaults & instance settings metrics = default_instance . get ( 'metrics' , []) + instance . get ( 'metrics' , []) for metric in metrics : if isinstance ( metric , string_types ): metrics_mapper [ metric ] = metric else : metrics_mapper . update ( metric ) config [ 'metrics_mapper' ] = metrics_mapper # `_wildcards_re` is a Pattern object used to match metric wildcards config [ '_wildcards_re' ] = None wildcards = set () for metric in config [ 'metrics_mapper' ]: if \"*\" in metric : wildcards . add ( translate ( metric )) if wildcards : config [ '_wildcards_re' ] = compile ( '|' . join ( wildcards )) # `prometheus_metrics_prefix` allows to specify a prefix that all # prometheus metrics should have. This can be used when the prometheus # endpoint we are scrapping allows to add a custom prefix to it's # metrics. config [ 'prometheus_metrics_prefix' ] = instance . get ( 'prometheus_metrics_prefix' , default_instance . get ( 'prometheus_metrics_prefix' , '' ) ) # `label_joins` holds the configuration for extracting 1:1 labels from # a target metric to all metric matching the label, example: # self.label_joins = { # 'kube_pod_info': { # 'labels_to_match': ['pod'], # 'labels_to_get': ['node', 'host_ip'] # } # } config [ 'label_joins' ] = default_instance . get ( 'label_joins' , {}) config [ 'label_joins' ] . update ( instance . get ( 'label_joins' , {})) # `_label_mapping` holds the additionals label info to add for a specific # label value, example: # self._label_mapping = { # 'pod': { # 'dd-agent-9s1l1': { # \"node\": \"yolo\", # \"host_ip\": \"yey\" # } # } # } config [ '_label_mapping' ] = {} # `_active_label_mapping` holds a dictionary of label values found during the run # to cleanup the label_mapping of unused values, example: # self._active_label_mapping = { # 'pod': { # 'dd-agent-9s1l1': True # } # } config [ '_active_label_mapping' ] = {} # `_watched_labels` holds the sets of labels to watch for enrichment config [ '_watched_labels' ] = {} config [ '_dry_run' ] = True # Some metrics are ignored because they are duplicates or introduce a # very high cardinality. Metrics included in this list will be silently # skipped without a 'Unable to handle metric' debug line in the logs config [ 'ignore_metrics' ] = instance . get ( 'ignore_metrics' , default_instance . get ( 'ignore_metrics' , [])) config [ '_ignored_metrics' ] = set () # `_ignored_re` is a Pattern object used to match ignored metric patterns config [ '_ignored_re' ] = None ignored_patterns = set () # Separate ignored metric names and ignored patterns in different sets for faster lookup later for metric in config [ 'ignore_metrics' ]: if '*' in metric : ignored_patterns . add ( translate ( metric )) else : config [ '_ignored_metrics' ] . add ( metric ) if ignored_patterns : config [ '_ignored_re' ] = compile ( '|' . join ( ignored_patterns )) # Ignore metrics based on label keys or specific label values config [ 'ignore_metrics_by_labels' ] = instance . get ( 'ignore_metrics_by_labels' , default_instance . get ( 'ignore_metrics_by_labels' , {}) ) # If you want to send the buckets as tagged values when dealing with histograms, # set send_histograms_buckets to True, set to False otherwise. config [ 'send_histograms_buckets' ] = is_affirmative ( instance . get ( 'send_histograms_buckets' , default_instance . get ( 'send_histograms_buckets' , True )) ) # If you want the bucket to be non cumulative and to come with upper/lower bound tags # set non_cumulative_buckets to True, enabled when distribution metrics are enabled. config [ 'non_cumulative_buckets' ] = is_affirmative ( instance . get ( 'non_cumulative_buckets' , default_instance . get ( 'non_cumulative_buckets' , False )) ) # Send histograms as datadog distribution metrics config [ 'send_distribution_buckets' ] = is_affirmative ( instance . get ( 'send_distribution_buckets' , default_instance . get ( 'send_distribution_buckets' , False )) ) # Non cumulative buckets are mandatory for distribution metrics if config [ 'send_distribution_buckets' ] is True : config [ 'non_cumulative_buckets' ] = True # If you want to send `counter` metrics as monotonic counts, set this value to True. # Set to False if you want to instead send those metrics as `gauge`. config [ 'send_monotonic_counter' ] = is_affirmative ( instance . get ( 'send_monotonic_counter' , default_instance . get ( 'send_monotonic_counter' , True )) ) # If you want `counter` metrics to be submitted as both gauges and monotonic counts. Set this value to True. config [ 'send_monotonic_with_gauge' ] = is_affirmative ( instance . get ( 'send_monotonic_with_gauge' , default_instance . get ( 'send_monotonic_with_gauge' , False )) ) config [ 'send_distribution_counts_as_monotonic' ] = is_affirmative ( instance . get ( 'send_distribution_counts_as_monotonic' , default_instance . get ( 'send_distribution_counts_as_monotonic' , False ), ) ) config [ 'send_distribution_sums_as_monotonic' ] = is_affirmative ( instance . get ( 'send_distribution_sums_as_monotonic' , default_instance . get ( 'send_distribution_sums_as_monotonic' , False ), ) ) # If the `labels_mapper` dictionary is provided, the metrics labels names # in the `labels_mapper` will use the corresponding value as tag name # when sending the gauges. config [ 'labels_mapper' ] = default_instance . get ( 'labels_mapper' , {}) config [ 'labels_mapper' ] . update ( instance . get ( 'labels_mapper' , {})) # Rename bucket \"le\" label to \"upper_bound\" config [ 'labels_mapper' ][ 'le' ] = 'upper_bound' # `exclude_labels` is an array of labels names to exclude. Those labels # will just not be added as tags when submitting the metric. config [ 'exclude_labels' ] = default_instance . get ( 'exclude_labels' , []) + instance . get ( 'exclude_labels' , []) # `type_overrides` is a dictionary where the keys are prometheus metric names # and the values are a metric type (name as string) to use instead of the one # listed in the payload. It can be used to force a type on untyped metrics. # Note: it is empty in the parent class but will need to be # overloaded/hardcoded in the final check not to be counted as custom metric. config [ 'type_overrides' ] = default_instance . get ( 'type_overrides' , {}) config [ 'type_overrides' ] . update ( instance . get ( 'type_overrides' , {})) # `_type_override_patterns` is a dictionary where we store Pattern objects # that match metric names as keys, and their corresponding metric type overrrides as values. config [ '_type_override_patterns' ] = {} with_wildcards = set () for metric , type in iteritems ( config [ 'type_overrides' ]): if '*' in metric : config [ '_type_override_patterns' ][ compile ( translate ( metric ))] = type with_wildcards . add ( metric ) # cleanup metric names with wildcards from the 'type_overrides' dict for metric in with_wildcards : del config [ 'type_overrides' ][ metric ] # Some metrics are retrieved from differents hosts and often # a label can hold this information, this transfers it to the hostname config [ 'label_to_hostname' ] = instance . get ( 'label_to_hostname' , default_instance . get ( 'label_to_hostname' , None )) # In combination to label_as_hostname, allows to add a common suffix to the hostnames # submitted. This can be used for instance to discriminate hosts between clusters. config [ 'label_to_hostname_suffix' ] = instance . get ( 'label_to_hostname_suffix' , default_instance . get ( 'label_to_hostname_suffix' , None ) ) # Add a 'health' service check for the prometheus endpoint config [ 'health_service_check' ] = is_affirmative ( instance . get ( 'health_service_check' , default_instance . get ( 'health_service_check' , True )) ) # Can either be only the path to the certificate and thus you should specify the private key # or it can be the path to a file containing both the certificate & the private key config [ 'ssl_cert' ] = instance . get ( 'ssl_cert' , default_instance . get ( 'ssl_cert' , None )) # Needed if the certificate does not include the private key # # /!\\ The private key to your local certificate must be unencrypted. # Currently, Requests does not support using encrypted keys. config [ 'ssl_private_key' ] = instance . get ( 'ssl_private_key' , default_instance . get ( 'ssl_private_key' , None )) # The path to the trusted CA used for generating custom certificates config [ 'ssl_ca_cert' ] = instance . get ( 'ssl_ca_cert' , default_instance . get ( 'ssl_ca_cert' , None )) # Whether or not to validate SSL certificates config [ 'ssl_verify' ] = is_affirmative ( instance . get ( 'ssl_verify' , default_instance . get ( 'ssl_verify' , True ))) # Extra http headers to be sent when polling endpoint config [ 'extra_headers' ] = default_instance . get ( 'extra_headers' , {}) config [ 'extra_headers' ] . update ( instance . get ( 'extra_headers' , {})) # Timeout used during the network request config [ 'prometheus_timeout' ] = instance . get ( 'prometheus_timeout' , default_instance . get ( 'prometheus_timeout' , 10 ) ) # Authentication used when polling endpoint config [ 'username' ] = instance . get ( 'username' , default_instance . get ( 'username' , None )) config [ 'password' ] = instance . get ( 'password' , default_instance . get ( 'password' , None )) # Custom tags that will be sent with each metric config [ 'custom_tags' ] = instance . get ( 'tags' , []) # Some tags can be ignored to reduce the cardinality. # This can be useful for cost optimization in containerized environments # when the openmetrics check is configured to collect custom metrics. # Even when the Agent's Tagger is configured to add low-cardinality tags only, # some tags can still generate unwanted metric contexts (e.g pod annotations as tags). ignore_tags = instance . get ( 'ignore_tags' , default_instance . get ( 'ignore_tags' , [])) if ignore_tags : ignored_tags_re = compile ( '|' . join ( set ( ignore_tags ))) config [ 'custom_tags' ] = [ tag for tag in config [ 'custom_tags' ] if not ignored_tags_re . search ( tag )] # Additional tags to be sent with each metric config [ '_metric_tags' ] = [] # List of strings to filter the input text payload on. If any line contains # one of these strings, it will be filtered out before being parsed. # INTERNAL FEATURE, might be removed in future versions config [ '_text_filter_blacklist' ] = [] # Whether or not to use the service account bearer token for authentication # if 'bearer_token_path' is not set, we use /var/run/secrets/kubernetes.io/serviceaccount/token # as a default path to get the token. config [ 'bearer_token_auth' ] = is_affirmative ( instance . get ( 'bearer_token_auth' , default_instance . get ( 'bearer_token_auth' , False )) ) # Can be used to get a service account bearer token from files # other than /var/run/secrets/kubernetes.io/serviceaccount/token # 'bearer_token_auth' should be enabled. config [ 'bearer_token_path' ] = instance . get ( 'bearer_token_path' , default_instance . get ( 'bearer_token_path' , None )) # The service account bearer token to be used for authentication config [ '_bearer_token' ] = self . _get_bearer_token ( config [ 'bearer_token_auth' ], config [ 'bearer_token_path' ]) config [ 'telemetry' ] = is_affirmative ( instance . get ( 'telemetry' , default_instance . get ( 'telemetry' , False ))) # The metric name services use to indicate build information config [ 'metadata_metric_name' ] = instance . get ( 'metadata_metric_name' , default_instance . get ( 'metadata_metric_name' ) ) # Map of metadata key names to label names config [ 'metadata_label_map' ] = instance . get ( 'metadata_label_map' , default_instance . get ( 'metadata_label_map' , {}) ) config [ '_default_metric_transformers' ] = {} if config [ 'metadata_metric_name' ] and config [ 'metadata_label_map' ]: config [ '_default_metric_transformers' ][ config [ 'metadata_metric_name' ]] = self . transform_metadata # Whether or not to enable flushing of the first value of monotonic counts config [ '_successfully_executed' ] = False return config parse_metric_family ( self , response , scraper_config ) \u00b6 Parse the MetricFamily from a valid requests.Response object to provide a MetricFamily object. The text format uses iter_lines() generator. Source code in def parse_metric_family ( self , response , scraper_config ): \"\"\" Parse the MetricFamily from a valid `requests.Response` object to provide a MetricFamily object. The text format uses iter_lines() generator. \"\"\" if response . encoding is None : response . encoding = 'utf-8' input_gen = response . iter_lines ( chunk_size = self . REQUESTS_CHUNK_SIZE , decode_unicode = True ) if scraper_config [ '_text_filter_blacklist' ]: input_gen = self . _text_filter_input ( input_gen , scraper_config ) for metric in text_fd_to_metric_families ( input_gen ): self . _send_telemetry_counter ( self . TELEMETRY_COUNTER_METRICS_INPUT_COUNT , len ( metric . samples ), scraper_config ) type_override = scraper_config [ 'type_overrides' ] . get ( metric . name ) if type_override : metric . type = type_override elif scraper_config [ '_type_override_patterns' ]: for pattern , new_type in iteritems ( scraper_config [ '_type_override_patterns' ]): if pattern . search ( metric . name ): metric . type = new_type break if metric . type not in self . METRIC_TYPES : continue metric . name = self . _remove_metric_prefix ( metric . name , scraper_config ) yield metric poll ( self , scraper_config , headers = None ) \u00b6 Returns a valid requests.Response , otherwise raise requests.HTTPError if the status code of the response isn't valid - see response.raise_for_status() The caller needs to close the requests.Response. Custom headers can be added to the default headers. Source code in def poll ( self , scraper_config , headers = None ): \"\"\" Returns a valid `requests.Response`, otherwise raise requests.HTTPError if the status code of the response isn't valid - see `response.raise_for_status()` The caller needs to close the requests.Response. Custom headers can be added to the default headers. \"\"\" endpoint = scraper_config . get ( 'prometheus_url' ) # Should we send a service check for when we make a request health_service_check = scraper_config [ 'health_service_check' ] service_check_name = self . _metric_name_with_namespace ( 'prometheus.health' , scraper_config ) service_check_tags = [ 'endpoint: {} ' . format ( endpoint )] service_check_tags . extend ( scraper_config [ 'custom_tags' ]) try : response = self . send_request ( endpoint , scraper_config , headers ) except requests . exceptions . SSLError : self . log . error ( \"Invalid SSL settings for requesting %s endpoint\" , endpoint ) raise except IOError : if health_service_check : self . service_check ( service_check_name , AgentCheck . CRITICAL , tags = service_check_tags ) raise try : response . raise_for_status () if health_service_check : self . service_check ( service_check_name , AgentCheck . OK , tags = service_check_tags ) return response except requests . HTTPError : response . close () if health_service_check : self . service_check ( service_check_name , AgentCheck . CRITICAL , tags = service_check_tags ) raise process ( self , scraper_config , metric_transformers = None ) \u00b6 Polls the data from Prometheus and submits them as Datadog metrics. endpoint is the metrics endpoint to use to poll metrics from Prometheus Note that if the instance has a tags attribute, it will be pushed automatically as additional custom tags and added to the metrics Source code in def process ( self , scraper_config , metric_transformers = None ): \"\"\" Polls the data from Prometheus and submits them as Datadog metrics. `endpoint` is the metrics endpoint to use to poll metrics from Prometheus Note that if the instance has a `tags` attribute, it will be pushed automatically as additional custom tags and added to the metrics \"\"\" transformers = scraper_config [ '_default_metric_transformers' ] . copy () if metric_transformers : transformers . update ( metric_transformers ) for metric in self . scrape_metrics ( scraper_config ): self . process_metric ( metric , scraper_config , metric_transformers = transformers ) scraper_config [ '_successfully_executed' ] = True process_metric ( self , metric , scraper_config , metric_transformers = None ) \u00b6 Handle a Prometheus metric according to the following flow: - search scraper_config['metrics_mapper'] for a prometheus.metric to datadog.metric mapping - call check method with the same name as the metric - log info if none of the above worked metric_transformers is a dict of : Source code in def process_metric ( self , metric , scraper_config , metric_transformers = None ): \"\"\" Handle a Prometheus metric according to the following flow: - search `scraper_config['metrics_mapper']` for a prometheus.metric to datadog.metric mapping - call check method with the same name as the metric - log info if none of the above worked `metric_transformers` is a dict of `:` \"\"\" # If targeted metric, store labels self . _store_labels ( metric , scraper_config ) if scraper_config [ 'ignore_metrics' ]: if metric . name in scraper_config [ '_ignored_metrics' ]: self . _send_telemetry_counter ( self . TELEMETRY_COUNTER_METRICS_IGNORE_COUNT , len ( metric . samples ), scraper_config ) return # Ignore the metric if scraper_config [ '_ignored_re' ] and scraper_config [ '_ignored_re' ] . search ( metric . name ): # Metric must be ignored scraper_config [ '_ignored_metrics' ] . add ( metric . name ) self . _send_telemetry_counter ( self . TELEMETRY_COUNTER_METRICS_IGNORE_COUNT , len ( metric . samples ), scraper_config ) return # Ignore the metric self . _send_telemetry_counter ( self . TELEMETRY_COUNTER_METRICS_PROCESS_COUNT , len ( metric . samples ), scraper_config ) if self . _filter_metric ( metric , scraper_config ): return # Ignore the metric # Filter metric to see if we can enrich with joined labels self . _join_labels ( metric , scraper_config ) if scraper_config [ '_dry_run' ]: return try : self . submit_openmetric ( scraper_config [ 'metrics_mapper' ][ metric . name ], metric , scraper_config ) except KeyError : if metric_transformers is not None and metric . name in metric_transformers : try : # Get the transformer function for this specific metric transformer = metric_transformers [ metric . name ] transformer ( metric , scraper_config ) except Exception as err : self . log . warning ( 'Error handling metric: %s - error: %s ' , metric . name , err ) return # check for wilcards in transformers for transformer_name , transformer in iteritems ( metric_transformers ): if transformer_name . endswith ( '*' ) and metric . name . startswith ( transformer_name [: - 1 ]): transformer ( metric , scraper_config , transformer_name ) # try matching wildcards if scraper_config [ '_wildcards_re' ] and scraper_config [ '_wildcards_re' ] . search ( metric . name ): self . submit_openmetric ( metric . name , metric , scraper_config ) return self . log . debug ( 'Skipping metric ` %s ` as it is not defined in the metrics mapper, ' 'has no transformer function, nor does it match any wildcards.' , metric . name , ) scrape_metrics ( self , scraper_config ) \u00b6 Poll the data from Prometheus and return the metrics as a generator. Source code in def scrape_metrics ( self , scraper_config ): \"\"\" Poll the data from Prometheus and return the metrics as a generator. \"\"\" response = self . poll ( scraper_config ) if scraper_config [ 'telemetry' ]: if 'content-length' in response . headers : content_len = int ( response . headers [ 'content-length' ]) else : content_len = len ( response . content ) self . _send_telemetry_gauge ( self . TELEMETRY_GAUGE_MESSAGE_SIZE , content_len , scraper_config ) try : # no dry run if no label joins if not scraper_config [ 'label_joins' ]: scraper_config [ '_dry_run' ] = False elif not scraper_config [ '_watched_labels' ]: watched = scraper_config [ '_watched_labels' ] watched [ 'sets' ] = {} watched [ 'keys' ] = {} watched [ 'singles' ] = set () for key , val in iteritems ( scraper_config [ 'label_joins' ]): labels = [] if 'labels_to_match' in val : labels = val [ 'labels_to_match' ] elif 'label_to_match' in val : self . log . warning ( \"`label_to_match` is being deprecated, please use `labels_to_match`\" ) if isinstance ( val [ 'label_to_match' ], list ): labels = val [ 'label_to_match' ] else : labels = [ val [ 'label_to_match' ]] if labels : s = frozenset ( labels ) watched [ 'sets' ][ key ] = s watched [ 'keys' ][ key ] = ',' . join ( s ) if len ( labels ) == 1 : watched [ 'singles' ] . add ( labels [ 0 ]) for metric in self . parse_metric_family ( response , scraper_config ): yield metric # Set dry run off scraper_config [ '_dry_run' ] = False # Garbage collect unused mapping and reset active labels for metric , mapping in list ( iteritems ( scraper_config [ '_label_mapping' ])): for key in list ( mapping ): if ( metric in scraper_config [ '_active_label_mapping' ] and key not in scraper_config [ '_active_label_mapping' ][ metric ] ): del scraper_config [ '_label_mapping' ][ metric ][ key ] scraper_config [ '_active_label_mapping' ] = {} finally : response . close () submit_openmetric ( self , metric_name , metric , scraper_config , hostname = None ) \u00b6 For each sample in the metric, report it as a gauge with all labels as tags except if a labels dict is passed, in which case keys are label names we'll extract and corresponding values are tag names we'll use (eg: {'node': 'node'}). Histograms generate a set of values instead of a unique metric. send_histograms_buckets is used to specify if you want to send the buckets as tagged values when dealing with histograms. custom_tags is an array of tag:value that will be added to the metric when sending the gauge to Datadog. Source code in def submit_openmetric ( self , metric_name , metric , scraper_config , hostname = None ): \"\"\" For each sample in the metric, report it as a gauge with all labels as tags except if a labels `dict` is passed, in which case keys are label names we'll extract and corresponding values are tag names we'll use (eg: {'node': 'node'}). Histograms generate a set of values instead of a unique metric. `send_histograms_buckets` is used to specify if you want to send the buckets as tagged values when dealing with histograms. `custom_tags` is an array of `tag:value` that will be added to the metric when sending the gauge to Datadog. \"\"\" if metric . type in [ \"gauge\" , \"counter\" , \"rate\" ]: metric_name_with_namespace = self . _metric_name_with_namespace ( metric_name , scraper_config ) for sample in metric . samples : if self . _ignore_metrics_by_label ( scraper_config , metric_name , sample ): continue val = sample [ self . SAMPLE_VALUE ] if not self . _is_value_valid ( val ): self . log . debug ( \"Metric value is not supported for metric %s \" , sample [ self . SAMPLE_NAME ]) continue custom_hostname = self . _get_hostname ( hostname , sample , scraper_config ) # Determine the tags to send tags = self . _metric_tags ( metric_name , val , sample , scraper_config , hostname = custom_hostname ) if metric . type == \"counter\" and scraper_config [ 'send_monotonic_counter' ]: self . monotonic_count ( metric_name_with_namespace , val , tags = tags , hostname = custom_hostname , flush_first_value = scraper_config [ '_successfully_executed' ], ) elif metric . type == \"rate\" : self . rate ( metric_name_with_namespace , val , tags = tags , hostname = custom_hostname ) else : self . gauge ( metric_name_with_namespace , val , tags = tags , hostname = custom_hostname ) # Metric is a \"counter\" but legacy behavior has \"send_as_monotonic\" defaulted to False # Submit metric as monotonic_count with appended name if metric . type == \"counter\" and scraper_config [ 'send_monotonic_with_gauge' ]: self . monotonic_count ( metric_name_with_namespace + '.total' , val , tags = tags , hostname = custom_hostname , flush_first_value = scraper_config [ '_successfully_executed' ], ) elif metric . type == \"histogram\" : self . _submit_gauges_from_histogram ( metric_name , metric , scraper_config ) elif metric . type == \"summary\" : self . _submit_gauges_from_summary ( metric_name , metric , scraper_config ) else : self . log . error ( \"Metric type %s unsupported for metric %s .\" , metric . type , metric_name ) Options \u00b6 Some options can be set globally in init_config (with instances taking precedence). For complete documentation of every option, see the associated configuration templates for the instances and init_config sections. All HTTP options are also supported. prometheus_url namespace metrics prometheus_metrics_prefix health_service_check label_to_hostname label_joins labels_mapper type_overrides send_histograms_buckets send_distribution_buckets send_monotonic_counter send_monotonic_with_gauge send_distribution_counts_as_monotonic send_distribution_sums_as_monotonic exclude_labels bearer_token_auth bearer_token_path ignore_metrics Prometheus to Datadog metric types \u00b6 The Openmetrics Base Check supports various configurations for submitting Prometheus metrics to Datadog. We currently support Prometheus gauge , counter , histogram , and summary metric types. Gauge \u00b6 A gauge metric represents a single numerical value that can arbitrarily go up or down. Prometheus gauge metrics are submitted as Datadog gauge metrics. Counter \u00b6 A Prometheus counter is a cumulative metric that represents a single monotonically increasing counter whose value can only increase or be reset to zero on restart. Config Option Value Datadog Metric Submitted send_monotonic_counter true (default) monotonic_count false gauge Histogram \u00b6 A Prometheus histogram samples observations and counts them in configurable buckets along with a sum of all observed values. Histogram metrics ending in: _sum represent the total sum of all observed values. Generally sums are like counters but it's also possible for a negative observation which would not behave like a typical always increasing counter. _count represent the total number of events that have been observed. _bucket represent the cumulative counters for the observation buckets. Note that buckets are only submitted if send_histogram_buckets is enabled. Subtype Config Option Value Datadog Metric Submitted send_distribution_buckets true The entire histogram can be submitted as a single distribution metric . If the option is enabled, none of the subtype metrics will be submitted. _sum send_distribution_sums_as_monotonic false (default) gauge true monotonic_count _count send_distribution_counts_as_monotonic false (default) gauge true monotonic_count _bucket non_cumulative_buckets false (default) gauge true monotonic_count under .count metric name if send_distribution_counts_as_monotonic is enabled. Otherwise, gauge . Summary \u00b6 Prometheus summary metrics are similar to histograms but allow configurable quantiles. Summary metrics ending in: _sum represent the total sum of all observed values. Generally sums are like counters but it's also possible for a negative observation which would not behave like a typical always increasing counter. _count represent the total number of events that have been observed. metrics with labels like {quantile=\"<\u03c6>\"} represent the streaming quantiles of observed events. Subtype Config Option Value Datadog Metric Submitted _sum send_distribution_sums_as_monotonic false (default) gauge true monotonic_count _count send_distribution_counts_as_monotonic false (default) gauge true monotonic_count _quantile gauge","title":"Prometheus"},{"location":"base/prometheus/#prometheus","text":"Prometheus is an open source monitoring system for timeseries metric data. Many Datadog integrations collect metrics based on Prometheus exported data sets. Prometheus-based integrations use the OpenMetrics exposition format to collect metrics.","title":"Prometheus"},{"location":"base/prometheus/#interface","text":"All functionality is exposed by the OpenMetricsBaseCheck and OpenMetricsScraperMixin classes.","title":"Interface"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.base_check.OpenMetricsBaseCheck","text":"OpenMetricsBaseCheck is a class that helps scrape endpoints that emit Prometheus metrics only with YAML configurations. Minimal example configuration: instances: - prometheus_url: http://example.com/endpoint namespace: \"foobar\" metrics: - bar - foo Agent 6 signature: OpenMetricsBaseCheck(name, init_config, instances, default_instances=None, default_namespace=None)","title":"OpenMetricsBaseCheck"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.base_check.OpenMetricsBaseCheck.__init__","text":"The base class for any Prometheus-based integration. Source code in def __init__ ( self , * args , ** kwargs ): \"\"\" The base class for any Prometheus-based integration. \"\"\" args = list ( args ) default_instances = kwargs . pop ( 'default_instances' , None ) or {} default_namespace = kwargs . pop ( 'default_namespace' , None ) legacy_kwargs_in_args = args [ 4 :] del args [ 4 :] if len ( legacy_kwargs_in_args ) > 0 : default_instances = legacy_kwargs_in_args [ 0 ] or {} if len ( legacy_kwargs_in_args ) > 1 : default_namespace = legacy_kwargs_in_args [ 1 ] super ( OpenMetricsBaseCheck , self ) . __init__ ( * args , ** kwargs ) self . config_map = {} self . _http_handlers = {} self . default_instances = default_instances self . default_namespace = default_namespace # pre-generate the scraper configurations if 'instances' in kwargs : instances = kwargs [ 'instances' ] elif len ( args ) == 4 : # instances from agent 5 signature instances = args [ 3 ] elif isinstance ( args [ 2 ], ( tuple , list )): # instances from agent 6 signature instances = args [ 2 ] else : instances = None if instances is not None : for instance in instances : self . get_scraper_config ( instance )","title":"__init__()"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.base_check.OpenMetricsBaseCheck.check","text":"Source code in def check ( self , instance ): # Get the configuration for this specific instance scraper_config = self . get_scraper_config ( instance ) # We should be specifying metrics for checks that are vanilla OpenMetricsBaseCheck-based if not scraper_config [ 'metrics_mapper' ]: raise CheckException ( \"You have to collect at least one metric from the endpoint: {} \" . format ( scraper_config [ 'prometheus_url' ]) ) self . process ( scraper_config )","title":"check()"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.base_check.OpenMetricsBaseCheck.get_scraper_config","text":"Validates the instance configuration and creates a scraper configuration for a new instance. If the endpoint already has a corresponding configuration, return the cached configuration. Source code in def get_scraper_config ( self , instance ): \"\"\" Validates the instance configuration and creates a scraper configuration for a new instance. If the endpoint already has a corresponding configuration, return the cached configuration. \"\"\" endpoint = instance . get ( 'prometheus_url' ) if endpoint is None : raise CheckException ( \"Unable to find prometheus URL in config file.\" ) # If we've already created the corresponding scraper configuration, return it if endpoint in self . config_map : return self . config_map [ endpoint ] # Otherwise, we create the scraper configuration config = self . create_scraper_configuration ( instance ) # Add this configuration to the config_map self . config_map [ endpoint ] = config return config","title":"get_scraper_config()"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.mixins.OpenMetricsScraperMixin","text":"","title":"OpenMetricsScraperMixin"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.mixins.OpenMetricsScraperMixin.create_scraper_configuration","text":"Creates a scraper configuration. If instance does not specify a value for a configuration option, the value will default to the init_config . Otherwise, the default_instance value will be used. A default mixin configuration will be returned if there is no instance. Source code in def create_scraper_configuration ( self , instance = None ): \"\"\" Creates a scraper configuration. If instance does not specify a value for a configuration option, the value will default to the `init_config`. Otherwise, the `default_instance` value will be used. A default mixin configuration will be returned if there is no instance. \"\"\" if 'openmetrics_endpoint' in instance : raise CheckException ( 'The setting `openmetrics_endpoint` is only available for Agent version 7 or later' ) # We can choose to create a default mixin configuration for an empty instance if instance is None : instance = {} # Supports new configuration options config = copy . deepcopy ( instance ) # Set the endpoint endpoint = instance . get ( 'prometheus_url' ) if instance and endpoint is None : raise CheckException ( \"You have to define a prometheus_url for each prometheus instance\" ) config [ 'prometheus_url' ] = endpoint # `NAMESPACE` is the prefix metrics will have. Need to be hardcoded in the # child check class. namespace = instance . get ( 'namespace' ) # Check if we have a namespace if instance and namespace is None : if self . default_namespace is None : raise CheckException ( \"You have to define a namespace for each prometheus check\" ) namespace = self . default_namespace config [ 'namespace' ] = namespace # Retrieve potential default instance settings for the namespace default_instance = self . default_instances . get ( namespace , {}) # `metrics_mapper` is a dictionary where the keys are the metrics to capture # and the values are the corresponding metrics names to have in datadog. # Note: it is empty in the parent class but will need to be # overloaded/hardcoded in the final check not to be counted as custom metric. # Metrics are preprocessed if no mapping metrics_mapper = {} # We merge list and dictionaries from optional defaults & instance settings metrics = default_instance . get ( 'metrics' , []) + instance . get ( 'metrics' , []) for metric in metrics : if isinstance ( metric , string_types ): metrics_mapper [ metric ] = metric else : metrics_mapper . update ( metric ) config [ 'metrics_mapper' ] = metrics_mapper # `_wildcards_re` is a Pattern object used to match metric wildcards config [ '_wildcards_re' ] = None wildcards = set () for metric in config [ 'metrics_mapper' ]: if \"*\" in metric : wildcards . add ( translate ( metric )) if wildcards : config [ '_wildcards_re' ] = compile ( '|' . join ( wildcards )) # `prometheus_metrics_prefix` allows to specify a prefix that all # prometheus metrics should have. This can be used when the prometheus # endpoint we are scrapping allows to add a custom prefix to it's # metrics. config [ 'prometheus_metrics_prefix' ] = instance . get ( 'prometheus_metrics_prefix' , default_instance . get ( 'prometheus_metrics_prefix' , '' ) ) # `label_joins` holds the configuration for extracting 1:1 labels from # a target metric to all metric matching the label, example: # self.label_joins = { # 'kube_pod_info': { # 'labels_to_match': ['pod'], # 'labels_to_get': ['node', 'host_ip'] # } # } config [ 'label_joins' ] = default_instance . get ( 'label_joins' , {}) config [ 'label_joins' ] . update ( instance . get ( 'label_joins' , {})) # `_label_mapping` holds the additionals label info to add for a specific # label value, example: # self._label_mapping = { # 'pod': { # 'dd-agent-9s1l1': { # \"node\": \"yolo\", # \"host_ip\": \"yey\" # } # } # } config [ '_label_mapping' ] = {} # `_active_label_mapping` holds a dictionary of label values found during the run # to cleanup the label_mapping of unused values, example: # self._active_label_mapping = { # 'pod': { # 'dd-agent-9s1l1': True # } # } config [ '_active_label_mapping' ] = {} # `_watched_labels` holds the sets of labels to watch for enrichment config [ '_watched_labels' ] = {} config [ '_dry_run' ] = True # Some metrics are ignored because they are duplicates or introduce a # very high cardinality. Metrics included in this list will be silently # skipped without a 'Unable to handle metric' debug line in the logs config [ 'ignore_metrics' ] = instance . get ( 'ignore_metrics' , default_instance . get ( 'ignore_metrics' , [])) config [ '_ignored_metrics' ] = set () # `_ignored_re` is a Pattern object used to match ignored metric patterns config [ '_ignored_re' ] = None ignored_patterns = set () # Separate ignored metric names and ignored patterns in different sets for faster lookup later for metric in config [ 'ignore_metrics' ]: if '*' in metric : ignored_patterns . add ( translate ( metric )) else : config [ '_ignored_metrics' ] . add ( metric ) if ignored_patterns : config [ '_ignored_re' ] = compile ( '|' . join ( ignored_patterns )) # Ignore metrics based on label keys or specific label values config [ 'ignore_metrics_by_labels' ] = instance . get ( 'ignore_metrics_by_labels' , default_instance . get ( 'ignore_metrics_by_labels' , {}) ) # If you want to send the buckets as tagged values when dealing with histograms, # set send_histograms_buckets to True, set to False otherwise. config [ 'send_histograms_buckets' ] = is_affirmative ( instance . get ( 'send_histograms_buckets' , default_instance . get ( 'send_histograms_buckets' , True )) ) # If you want the bucket to be non cumulative and to come with upper/lower bound tags # set non_cumulative_buckets to True, enabled when distribution metrics are enabled. config [ 'non_cumulative_buckets' ] = is_affirmative ( instance . get ( 'non_cumulative_buckets' , default_instance . get ( 'non_cumulative_buckets' , False )) ) # Send histograms as datadog distribution metrics config [ 'send_distribution_buckets' ] = is_affirmative ( instance . get ( 'send_distribution_buckets' , default_instance . get ( 'send_distribution_buckets' , False )) ) # Non cumulative buckets are mandatory for distribution metrics if config [ 'send_distribution_buckets' ] is True : config [ 'non_cumulative_buckets' ] = True # If you want to send `counter` metrics as monotonic counts, set this value to True. # Set to False if you want to instead send those metrics as `gauge`. config [ 'send_monotonic_counter' ] = is_affirmative ( instance . get ( 'send_monotonic_counter' , default_instance . get ( 'send_monotonic_counter' , True )) ) # If you want `counter` metrics to be submitted as both gauges and monotonic counts. Set this value to True. config [ 'send_monotonic_with_gauge' ] = is_affirmative ( instance . get ( 'send_monotonic_with_gauge' , default_instance . get ( 'send_monotonic_with_gauge' , False )) ) config [ 'send_distribution_counts_as_monotonic' ] = is_affirmative ( instance . get ( 'send_distribution_counts_as_monotonic' , default_instance . get ( 'send_distribution_counts_as_monotonic' , False ), ) ) config [ 'send_distribution_sums_as_monotonic' ] = is_affirmative ( instance . get ( 'send_distribution_sums_as_monotonic' , default_instance . get ( 'send_distribution_sums_as_monotonic' , False ), ) ) # If the `labels_mapper` dictionary is provided, the metrics labels names # in the `labels_mapper` will use the corresponding value as tag name # when sending the gauges. config [ 'labels_mapper' ] = default_instance . get ( 'labels_mapper' , {}) config [ 'labels_mapper' ] . update ( instance . get ( 'labels_mapper' , {})) # Rename bucket \"le\" label to \"upper_bound\" config [ 'labels_mapper' ][ 'le' ] = 'upper_bound' # `exclude_labels` is an array of labels names to exclude. Those labels # will just not be added as tags when submitting the metric. config [ 'exclude_labels' ] = default_instance . get ( 'exclude_labels' , []) + instance . get ( 'exclude_labels' , []) # `type_overrides` is a dictionary where the keys are prometheus metric names # and the values are a metric type (name as string) to use instead of the one # listed in the payload. It can be used to force a type on untyped metrics. # Note: it is empty in the parent class but will need to be # overloaded/hardcoded in the final check not to be counted as custom metric. config [ 'type_overrides' ] = default_instance . get ( 'type_overrides' , {}) config [ 'type_overrides' ] . update ( instance . get ( 'type_overrides' , {})) # `_type_override_patterns` is a dictionary where we store Pattern objects # that match metric names as keys, and their corresponding metric type overrrides as values. config [ '_type_override_patterns' ] = {} with_wildcards = set () for metric , type in iteritems ( config [ 'type_overrides' ]): if '*' in metric : config [ '_type_override_patterns' ][ compile ( translate ( metric ))] = type with_wildcards . add ( metric ) # cleanup metric names with wildcards from the 'type_overrides' dict for metric in with_wildcards : del config [ 'type_overrides' ][ metric ] # Some metrics are retrieved from differents hosts and often # a label can hold this information, this transfers it to the hostname config [ 'label_to_hostname' ] = instance . get ( 'label_to_hostname' , default_instance . get ( 'label_to_hostname' , None )) # In combination to label_as_hostname, allows to add a common suffix to the hostnames # submitted. This can be used for instance to discriminate hosts between clusters. config [ 'label_to_hostname_suffix' ] = instance . get ( 'label_to_hostname_suffix' , default_instance . get ( 'label_to_hostname_suffix' , None ) ) # Add a 'health' service check for the prometheus endpoint config [ 'health_service_check' ] = is_affirmative ( instance . get ( 'health_service_check' , default_instance . get ( 'health_service_check' , True )) ) # Can either be only the path to the certificate and thus you should specify the private key # or it can be the path to a file containing both the certificate & the private key config [ 'ssl_cert' ] = instance . get ( 'ssl_cert' , default_instance . get ( 'ssl_cert' , None )) # Needed if the certificate does not include the private key # # /!\\ The private key to your local certificate must be unencrypted. # Currently, Requests does not support using encrypted keys. config [ 'ssl_private_key' ] = instance . get ( 'ssl_private_key' , default_instance . get ( 'ssl_private_key' , None )) # The path to the trusted CA used for generating custom certificates config [ 'ssl_ca_cert' ] = instance . get ( 'ssl_ca_cert' , default_instance . get ( 'ssl_ca_cert' , None )) # Whether or not to validate SSL certificates config [ 'ssl_verify' ] = is_affirmative ( instance . get ( 'ssl_verify' , default_instance . get ( 'ssl_verify' , True ))) # Extra http headers to be sent when polling endpoint config [ 'extra_headers' ] = default_instance . get ( 'extra_headers' , {}) config [ 'extra_headers' ] . update ( instance . get ( 'extra_headers' , {})) # Timeout used during the network request config [ 'prometheus_timeout' ] = instance . get ( 'prometheus_timeout' , default_instance . get ( 'prometheus_timeout' , 10 ) ) # Authentication used when polling endpoint config [ 'username' ] = instance . get ( 'username' , default_instance . get ( 'username' , None )) config [ 'password' ] = instance . get ( 'password' , default_instance . get ( 'password' , None )) # Custom tags that will be sent with each metric config [ 'custom_tags' ] = instance . get ( 'tags' , []) # Some tags can be ignored to reduce the cardinality. # This can be useful for cost optimization in containerized environments # when the openmetrics check is configured to collect custom metrics. # Even when the Agent's Tagger is configured to add low-cardinality tags only, # some tags can still generate unwanted metric contexts (e.g pod annotations as tags). ignore_tags = instance . get ( 'ignore_tags' , default_instance . get ( 'ignore_tags' , [])) if ignore_tags : ignored_tags_re = compile ( '|' . join ( set ( ignore_tags ))) config [ 'custom_tags' ] = [ tag for tag in config [ 'custom_tags' ] if not ignored_tags_re . search ( tag )] # Additional tags to be sent with each metric config [ '_metric_tags' ] = [] # List of strings to filter the input text payload on. If any line contains # one of these strings, it will be filtered out before being parsed. # INTERNAL FEATURE, might be removed in future versions config [ '_text_filter_blacklist' ] = [] # Whether or not to use the service account bearer token for authentication # if 'bearer_token_path' is not set, we use /var/run/secrets/kubernetes.io/serviceaccount/token # as a default path to get the token. config [ 'bearer_token_auth' ] = is_affirmative ( instance . get ( 'bearer_token_auth' , default_instance . get ( 'bearer_token_auth' , False )) ) # Can be used to get a service account bearer token from files # other than /var/run/secrets/kubernetes.io/serviceaccount/token # 'bearer_token_auth' should be enabled. config [ 'bearer_token_path' ] = instance . get ( 'bearer_token_path' , default_instance . get ( 'bearer_token_path' , None )) # The service account bearer token to be used for authentication config [ '_bearer_token' ] = self . _get_bearer_token ( config [ 'bearer_token_auth' ], config [ 'bearer_token_path' ]) config [ 'telemetry' ] = is_affirmative ( instance . get ( 'telemetry' , default_instance . get ( 'telemetry' , False ))) # The metric name services use to indicate build information config [ 'metadata_metric_name' ] = instance . get ( 'metadata_metric_name' , default_instance . get ( 'metadata_metric_name' ) ) # Map of metadata key names to label names config [ 'metadata_label_map' ] = instance . get ( 'metadata_label_map' , default_instance . get ( 'metadata_label_map' , {}) ) config [ '_default_metric_transformers' ] = {} if config [ 'metadata_metric_name' ] and config [ 'metadata_label_map' ]: config [ '_default_metric_transformers' ][ config [ 'metadata_metric_name' ]] = self . transform_metadata # Whether or not to enable flushing of the first value of monotonic counts config [ '_successfully_executed' ] = False return config","title":"create_scraper_configuration()"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.mixins.OpenMetricsScraperMixin.parse_metric_family","text":"Parse the MetricFamily from a valid requests.Response object to provide a MetricFamily object. The text format uses iter_lines() generator. Source code in def parse_metric_family ( self , response , scraper_config ): \"\"\" Parse the MetricFamily from a valid `requests.Response` object to provide a MetricFamily object. The text format uses iter_lines() generator. \"\"\" if response . encoding is None : response . encoding = 'utf-8' input_gen = response . iter_lines ( chunk_size = self . REQUESTS_CHUNK_SIZE , decode_unicode = True ) if scraper_config [ '_text_filter_blacklist' ]: input_gen = self . _text_filter_input ( input_gen , scraper_config ) for metric in text_fd_to_metric_families ( input_gen ): self . _send_telemetry_counter ( self . TELEMETRY_COUNTER_METRICS_INPUT_COUNT , len ( metric . samples ), scraper_config ) type_override = scraper_config [ 'type_overrides' ] . get ( metric . name ) if type_override : metric . type = type_override elif scraper_config [ '_type_override_patterns' ]: for pattern , new_type in iteritems ( scraper_config [ '_type_override_patterns' ]): if pattern . search ( metric . name ): metric . type = new_type break if metric . type not in self . METRIC_TYPES : continue metric . name = self . _remove_metric_prefix ( metric . name , scraper_config ) yield metric","title":"parse_metric_family()"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.mixins.OpenMetricsScraperMixin.poll","text":"Returns a valid requests.Response , otherwise raise requests.HTTPError if the status code of the response isn't valid - see response.raise_for_status() The caller needs to close the requests.Response. Custom headers can be added to the default headers. Source code in def poll ( self , scraper_config , headers = None ): \"\"\" Returns a valid `requests.Response`, otherwise raise requests.HTTPError if the status code of the response isn't valid - see `response.raise_for_status()` The caller needs to close the requests.Response. Custom headers can be added to the default headers. \"\"\" endpoint = scraper_config . get ( 'prometheus_url' ) # Should we send a service check for when we make a request health_service_check = scraper_config [ 'health_service_check' ] service_check_name = self . _metric_name_with_namespace ( 'prometheus.health' , scraper_config ) service_check_tags = [ 'endpoint: {} ' . format ( endpoint )] service_check_tags . extend ( scraper_config [ 'custom_tags' ]) try : response = self . send_request ( endpoint , scraper_config , headers ) except requests . exceptions . SSLError : self . log . error ( \"Invalid SSL settings for requesting %s endpoint\" , endpoint ) raise except IOError : if health_service_check : self . service_check ( service_check_name , AgentCheck . CRITICAL , tags = service_check_tags ) raise try : response . raise_for_status () if health_service_check : self . service_check ( service_check_name , AgentCheck . OK , tags = service_check_tags ) return response except requests . HTTPError : response . close () if health_service_check : self . service_check ( service_check_name , AgentCheck . CRITICAL , tags = service_check_tags ) raise","title":"poll()"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.mixins.OpenMetricsScraperMixin.process","text":"Polls the data from Prometheus and submits them as Datadog metrics. endpoint is the metrics endpoint to use to poll metrics from Prometheus Note that if the instance has a tags attribute, it will be pushed automatically as additional custom tags and added to the metrics Source code in def process ( self , scraper_config , metric_transformers = None ): \"\"\" Polls the data from Prometheus and submits them as Datadog metrics. `endpoint` is the metrics endpoint to use to poll metrics from Prometheus Note that if the instance has a `tags` attribute, it will be pushed automatically as additional custom tags and added to the metrics \"\"\" transformers = scraper_config [ '_default_metric_transformers' ] . copy () if metric_transformers : transformers . update ( metric_transformers ) for metric in self . scrape_metrics ( scraper_config ): self . process_metric ( metric , scraper_config , metric_transformers = transformers ) scraper_config [ '_successfully_executed' ] = True","title":"process()"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.mixins.OpenMetricsScraperMixin.process_metric","text":"Handle a Prometheus metric according to the following flow: - search scraper_config['metrics_mapper'] for a prometheus.metric to datadog.metric mapping - call check method with the same name as the metric - log info if none of the above worked metric_transformers is a dict of : Source code in def process_metric ( self , metric , scraper_config , metric_transformers = None ): \"\"\" Handle a Prometheus metric according to the following flow: - search `scraper_config['metrics_mapper']` for a prometheus.metric to datadog.metric mapping - call check method with the same name as the metric - log info if none of the above worked `metric_transformers` is a dict of `:` \"\"\" # If targeted metric, store labels self . _store_labels ( metric , scraper_config ) if scraper_config [ 'ignore_metrics' ]: if metric . name in scraper_config [ '_ignored_metrics' ]: self . _send_telemetry_counter ( self . TELEMETRY_COUNTER_METRICS_IGNORE_COUNT , len ( metric . samples ), scraper_config ) return # Ignore the metric if scraper_config [ '_ignored_re' ] and scraper_config [ '_ignored_re' ] . search ( metric . name ): # Metric must be ignored scraper_config [ '_ignored_metrics' ] . add ( metric . name ) self . _send_telemetry_counter ( self . TELEMETRY_COUNTER_METRICS_IGNORE_COUNT , len ( metric . samples ), scraper_config ) return # Ignore the metric self . _send_telemetry_counter ( self . TELEMETRY_COUNTER_METRICS_PROCESS_COUNT , len ( metric . samples ), scraper_config ) if self . _filter_metric ( metric , scraper_config ): return # Ignore the metric # Filter metric to see if we can enrich with joined labels self . _join_labels ( metric , scraper_config ) if scraper_config [ '_dry_run' ]: return try : self . submit_openmetric ( scraper_config [ 'metrics_mapper' ][ metric . name ], metric , scraper_config ) except KeyError : if metric_transformers is not None and metric . name in metric_transformers : try : # Get the transformer function for this specific metric transformer = metric_transformers [ metric . name ] transformer ( metric , scraper_config ) except Exception as err : self . log . warning ( 'Error handling metric: %s - error: %s ' , metric . name , err ) return # check for wilcards in transformers for transformer_name , transformer in iteritems ( metric_transformers ): if transformer_name . endswith ( '*' ) and metric . name . startswith ( transformer_name [: - 1 ]): transformer ( metric , scraper_config , transformer_name ) # try matching wildcards if scraper_config [ '_wildcards_re' ] and scraper_config [ '_wildcards_re' ] . search ( metric . name ): self . submit_openmetric ( metric . name , metric , scraper_config ) return self . log . debug ( 'Skipping metric ` %s ` as it is not defined in the metrics mapper, ' 'has no transformer function, nor does it match any wildcards.' , metric . name , )","title":"process_metric()"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.mixins.OpenMetricsScraperMixin.scrape_metrics","text":"Poll the data from Prometheus and return the metrics as a generator. Source code in def scrape_metrics ( self , scraper_config ): \"\"\" Poll the data from Prometheus and return the metrics as a generator. \"\"\" response = self . poll ( scraper_config ) if scraper_config [ 'telemetry' ]: if 'content-length' in response . headers : content_len = int ( response . headers [ 'content-length' ]) else : content_len = len ( response . content ) self . _send_telemetry_gauge ( self . TELEMETRY_GAUGE_MESSAGE_SIZE , content_len , scraper_config ) try : # no dry run if no label joins if not scraper_config [ 'label_joins' ]: scraper_config [ '_dry_run' ] = False elif not scraper_config [ '_watched_labels' ]: watched = scraper_config [ '_watched_labels' ] watched [ 'sets' ] = {} watched [ 'keys' ] = {} watched [ 'singles' ] = set () for key , val in iteritems ( scraper_config [ 'label_joins' ]): labels = [] if 'labels_to_match' in val : labels = val [ 'labels_to_match' ] elif 'label_to_match' in val : self . log . warning ( \"`label_to_match` is being deprecated, please use `labels_to_match`\" ) if isinstance ( val [ 'label_to_match' ], list ): labels = val [ 'label_to_match' ] else : labels = [ val [ 'label_to_match' ]] if labels : s = frozenset ( labels ) watched [ 'sets' ][ key ] = s watched [ 'keys' ][ key ] = ',' . join ( s ) if len ( labels ) == 1 : watched [ 'singles' ] . add ( labels [ 0 ]) for metric in self . parse_metric_family ( response , scraper_config ): yield metric # Set dry run off scraper_config [ '_dry_run' ] = False # Garbage collect unused mapping and reset active labels for metric , mapping in list ( iteritems ( scraper_config [ '_label_mapping' ])): for key in list ( mapping ): if ( metric in scraper_config [ '_active_label_mapping' ] and key not in scraper_config [ '_active_label_mapping' ][ metric ] ): del scraper_config [ '_label_mapping' ][ metric ][ key ] scraper_config [ '_active_label_mapping' ] = {} finally : response . close ()","title":"scrape_metrics()"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.mixins.OpenMetricsScraperMixin.submit_openmetric","text":"For each sample in the metric, report it as a gauge with all labels as tags except if a labels dict is passed, in which case keys are label names we'll extract and corresponding values are tag names we'll use (eg: {'node': 'node'}). Histograms generate a set of values instead of a unique metric. send_histograms_buckets is used to specify if you want to send the buckets as tagged values when dealing with histograms. custom_tags is an array of tag:value that will be added to the metric when sending the gauge to Datadog. Source code in def submit_openmetric ( self , metric_name , metric , scraper_config , hostname = None ): \"\"\" For each sample in the metric, report it as a gauge with all labels as tags except if a labels `dict` is passed, in which case keys are label names we'll extract and corresponding values are tag names we'll use (eg: {'node': 'node'}). Histograms generate a set of values instead of a unique metric. `send_histograms_buckets` is used to specify if you want to send the buckets as tagged values when dealing with histograms. `custom_tags` is an array of `tag:value` that will be added to the metric when sending the gauge to Datadog. \"\"\" if metric . type in [ \"gauge\" , \"counter\" , \"rate\" ]: metric_name_with_namespace = self . _metric_name_with_namespace ( metric_name , scraper_config ) for sample in metric . samples : if self . _ignore_metrics_by_label ( scraper_config , metric_name , sample ): continue val = sample [ self . SAMPLE_VALUE ] if not self . _is_value_valid ( val ): self . log . debug ( \"Metric value is not supported for metric %s \" , sample [ self . SAMPLE_NAME ]) continue custom_hostname = self . _get_hostname ( hostname , sample , scraper_config ) # Determine the tags to send tags = self . _metric_tags ( metric_name , val , sample , scraper_config , hostname = custom_hostname ) if metric . type == \"counter\" and scraper_config [ 'send_monotonic_counter' ]: self . monotonic_count ( metric_name_with_namespace , val , tags = tags , hostname = custom_hostname , flush_first_value = scraper_config [ '_successfully_executed' ], ) elif metric . type == \"rate\" : self . rate ( metric_name_with_namespace , val , tags = tags , hostname = custom_hostname ) else : self . gauge ( metric_name_with_namespace , val , tags = tags , hostname = custom_hostname ) # Metric is a \"counter\" but legacy behavior has \"send_as_monotonic\" defaulted to False # Submit metric as monotonic_count with appended name if metric . type == \"counter\" and scraper_config [ 'send_monotonic_with_gauge' ]: self . monotonic_count ( metric_name_with_namespace + '.total' , val , tags = tags , hostname = custom_hostname , flush_first_value = scraper_config [ '_successfully_executed' ], ) elif metric . type == \"histogram\" : self . _submit_gauges_from_histogram ( metric_name , metric , scraper_config ) elif metric . type == \"summary\" : self . _submit_gauges_from_summary ( metric_name , metric , scraper_config ) else : self . log . error ( \"Metric type %s unsupported for metric %s .\" , metric . type , metric_name )","title":"submit_openmetric()"},{"location":"base/prometheus/#options","text":"Some options can be set globally in init_config (with instances taking precedence). For complete documentation of every option, see the associated configuration templates for the instances and init_config sections. All HTTP options are also supported. prometheus_url namespace metrics prometheus_metrics_prefix health_service_check label_to_hostname label_joins labels_mapper type_overrides send_histograms_buckets send_distribution_buckets send_monotonic_counter send_monotonic_with_gauge send_distribution_counts_as_monotonic send_distribution_sums_as_monotonic exclude_labels bearer_token_auth bearer_token_path ignore_metrics","title":"Options"},{"location":"base/prometheus/#prometheus-to-datadog-metric-types","text":"The Openmetrics Base Check supports various configurations for submitting Prometheus metrics to Datadog. We currently support Prometheus gauge , counter , histogram , and summary metric types.","title":"Prometheus to Datadog metric types"},{"location":"base/prometheus/#gauge","text":"A gauge metric represents a single numerical value that can arbitrarily go up or down. Prometheus gauge metrics are submitted as Datadog gauge metrics.","title":"Gauge"},{"location":"base/prometheus/#counter","text":"A Prometheus counter is a cumulative metric that represents a single monotonically increasing counter whose value can only increase or be reset to zero on restart. Config Option Value Datadog Metric Submitted send_monotonic_counter true (default) monotonic_count false gauge","title":"Counter"},{"location":"base/prometheus/#histogram","text":"A Prometheus histogram samples observations and counts them in configurable buckets along with a sum of all observed values. Histogram metrics ending in: _sum represent the total sum of all observed values. Generally sums are like counters but it's also possible for a negative observation which would not behave like a typical always increasing counter. _count represent the total number of events that have been observed. _bucket represent the cumulative counters for the observation buckets. Note that buckets are only submitted if send_histogram_buckets is enabled. Subtype Config Option Value Datadog Metric Submitted send_distribution_buckets true The entire histogram can be submitted as a single distribution metric . If the option is enabled, none of the subtype metrics will be submitted. _sum send_distribution_sums_as_monotonic false (default) gauge true monotonic_count _count send_distribution_counts_as_monotonic false (default) gauge true monotonic_count _bucket non_cumulative_buckets false (default) gauge true monotonic_count under .count metric name if send_distribution_counts_as_monotonic is enabled. Otherwise, gauge .","title":"Histogram"},{"location":"base/prometheus/#summary","text":"Prometheus summary metrics are similar to histograms but allow configurable quantiles. Summary metrics ending in: _sum represent the total sum of all observed values. Generally sums are like counters but it's also possible for a negative observation which would not behave like a typical always increasing counter. _count represent the total number of events that have been observed. metrics with labels like {quantile=\"<\u03c6>\"} represent the streaming quantiles of observed events. Subtype Config Option Value Datadog Metric Submitted _sum send_distribution_sums_as_monotonic false (default) gauge true monotonic_count _count send_distribution_counts_as_monotonic false (default) gauge true monotonic_count _quantile gauge","title":"Summary"},{"location":"base/tls/","text":"TLS/SSL \u00b6 TLS/SSL is widely used to provide communications over a secure network. Many of the software that Datadog supports has features to allow TLS/SSL. Therefore, the Datadog Agent may need to connect with TLS/SSL to get metrics. Getting started \u00b6 For Agent v7.24+, checks compatible with TLS/SSL should not manually create a raw ssl.SSLContext . Instead, check implementations should use AgentCheck.get_tls_context() to obtain a TLS/SSL context. get_tls_context() allows a few optional parameters which may be helpful when developing integrations. datadog_checks . base . checks . base . AgentCheck . get_tls_context ( self , refresh = False , overrides = None ) \u00b6 Creates and cache an SSLContext instance based on user configuration. Note that user configuration can be overridden by using overrides . This should only be applied to older integration that manually set config values. Since: Agent 7.24 Source code in def get_tls_context ( self , refresh = False , overrides = None ): # type: (bool, Dict[AnyStr, Any]) -> ssl.SSLContext \"\"\" Creates and cache an SSLContext instance based on user configuration. Note that user configuration can be overridden by using `overrides`. This should only be applied to older integration that manually set config values. Since: Agent 7.24 \"\"\" if not hasattr ( self , '_tls_context_wrapper' ): self . _tls_context_wrapper = TlsContextWrapper ( self . instance or {}, self . TLS_CONFIG_REMAPPER , overrides = overrides ) if refresh : self . _tls_context_wrapper . refresh_tls_context () return self . _tls_context_wrapper . tls_context","title":"TLS/SSL"},{"location":"base/tls/#tlsssl","text":"TLS/SSL is widely used to provide communications over a secure network. Many of the software that Datadog supports has features to allow TLS/SSL. Therefore, the Datadog Agent may need to connect with TLS/SSL to get metrics.","title":"TLS/SSL"},{"location":"base/tls/#getting-started","text":"For Agent v7.24+, checks compatible with TLS/SSL should not manually create a raw ssl.SSLContext . Instead, check implementations should use AgentCheck.get_tls_context() to obtain a TLS/SSL context. get_tls_context() allows a few optional parameters which may be helpful when developing integrations.","title":"Getting started"},{"location":"base/tls/#datadog_checks.base.checks.base.AgentCheck.get_tls_context","text":"Creates and cache an SSLContext instance based on user configuration. Note that user configuration can be overridden by using overrides . This should only be applied to older integration that manually set config values. Since: Agent 7.24 Source code in def get_tls_context ( self , refresh = False , overrides = None ): # type: (bool, Dict[AnyStr, Any]) -> ssl.SSLContext \"\"\" Creates and cache an SSLContext instance based on user configuration. Note that user configuration can be overridden by using `overrides`. This should only be applied to older integration that manually set config values. Since: Agent 7.24 \"\"\" if not hasattr ( self , '_tls_context_wrapper' ): self . _tls_context_wrapper = TlsContextWrapper ( self . instance or {}, self . TLS_CONFIG_REMAPPER , overrides = overrides ) if refresh : self . _tls_context_wrapper . refresh_tls_context () return self . _tls_context_wrapper . tls_context","title":"get_tls_context()"},{"location":"ddev/about/","text":"What's in the box? \u00b6 The Dev package , often referred to as its CLI entrypoint ddev , is fundamentally split into 2 parts. Test framework \u00b6 The test framework provides everything necessary to test integrations, such as: Dependencies like pytest , mock , requests , etc. Utilities for consistently handling complex logic or common operations An orchestrator for arbitrary E2E environments CLI \u00b6 The CLI provides the interface through which tests are invoked, E2E environments are managed, and general repository maintenance (such as dependency management) occurs. Separation \u00b6 As the dependencies of the test framework are a subset of what is required for the CLI, the CLI tooling may import from the test framework, but not vice versa. The diagram below shows the import hierarchy between each component. Clicking a node will open that component's location in the source code. graph BT A([Plugins]) click A \"https://github.com/DataDog/integrations-core/tree/master/datadog_checks_dev/datadog_checks/dev/plugin\" \"Test framework plugins location\" B([Test framework]) click B \"https://github.com/DataDog/integrations-core/tree/master/datadog_checks_dev/datadog_checks/dev\" \"Test framework location\" C([CLI]) click C \"https://github.com/DataDog/integrations-core/tree/master/datadog_checks_dev/datadog_checks/dev/tooling\" \"CLI tooling location\" A-->B C-->B var config = { securityLevel: \"loose\", startOnLoad: false, theme: \"dark\", flowchart: { htmlLabels: false }, sequence: { useMaxWidth: false }, class: { textHeight: 16, dividerMargin: 16 } }; mermaid.initialize(config);","title":"What's in the box?"},{"location":"ddev/about/#whats-in-the-box","text":"The Dev package , often referred to as its CLI entrypoint ddev , is fundamentally split into 2 parts.","title":"What's in the box?"},{"location":"ddev/about/#test-framework","text":"The test framework provides everything necessary to test integrations, such as: Dependencies like pytest , mock , requests , etc. Utilities for consistently handling complex logic or common operations An orchestrator for arbitrary E2E environments","title":"Test framework"},{"location":"ddev/about/#cli","text":"The CLI provides the interface through which tests are invoked, E2E environments are managed, and general repository maintenance (such as dependency management) occurs.","title":"CLI"},{"location":"ddev/about/#separation","text":"As the dependencies of the test framework are a subset of what is required for the CLI, the CLI tooling may import from the test framework, but not vice versa. The diagram below shows the import hierarchy between each component. Clicking a node will open that component's location in the source code. graph BT A([Plugins]) click A \"https://github.com/DataDog/integrations-core/tree/master/datadog_checks_dev/datadog_checks/dev/plugin\" \"Test framework plugins location\" B([Test framework]) click B \"https://github.com/DataDog/integrations-core/tree/master/datadog_checks_dev/datadog_checks/dev\" \"Test framework location\" C([CLI]) click C \"https://github.com/DataDog/integrations-core/tree/master/datadog_checks_dev/datadog_checks/dev/tooling\" \"CLI tooling location\" A-->B C-->B var config = { securityLevel: \"loose\", startOnLoad: false, theme: \"dark\", flowchart: { htmlLabels: false }, sequence: { useMaxWidth: false }, class: { textHeight: 16, dividerMargin: 16 } }; mermaid.initialize(config);","title":"Separation"},{"location":"ddev/cli/","text":"ddev \u00b6 Usage: ddev [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --core , -c boolean Work on integrations-core . False --extras , -e boolean Work on integrations-extras . False --agent , -a boolean Work on datadog-agent . False --marketplace , -m boolean Work on marketplace . False --here , -x boolean Work on the current location. False --color / --no-color boolean Whether or not to display colored output (default true). required --quiet , -q boolean Silence output False --debug , -d boolean Include debug output False --version boolean Show the version and exit. False --help boolean Show this message and exit. False ddev agent \u00b6 A collection of tasks related to the Datadog Agent Usage: ddev agent [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev agent changelog \u00b6 Generates a markdown file containing the list of checks that changed for a given Agent release. Agent version numbers are derived inspecting tags on integrations-core so running this tool might provide unexpected results if the repo is not up to date with the Agent release process. If neither --since or --to are passed (the most common use case), the tool will generate the whole changelog since Agent version 6.3.0 (before that point we don't have enough information to build the log). Usage: ddev agent changelog [OPTIONS] Options: Name Type Description Default --since text Initial Agent version 6.3.0 --to text Final Agent version required --write , -w boolean Write to the changelog file, if omitted contents will be printed to stdout False --force , -f boolean Replace an existing file False --help boolean Show this message and exit. False ddev agent integrations \u00b6 Generates a markdown file containing the list of integrations shipped in a given Agent release. Agent version numbers are derived inspecting tags on integrations-core so running this tool might provide unexpected results if the repo is not up to date with the Agent release process. If neither --since or --to are passed (the most common use case), the tool will generate the list for every Agent since version 6.3.0 (before that point we don't have enough information to build the log). Usage: ddev agent integrations [OPTIONS] Options: Name Type Description Default --since text Initial Agent version 6.3.0 --to text Final Agent version required --write , -w boolean Write to file, if omitted contents will be printed to stdout False --force , -f boolean Replace an existing file False --help boolean Show this message and exit. False ddev agent integrations-changelog \u00b6 Update integration CHANGELOG.md by adding the Agent version. Agent version is only added to the integration versions released with a specific Agent release. Usage: ddev agent integrations-changelog [OPTIONS] [CHECKS]... Options: Name Type Description Default --since text Initial Agent version 6.3.0 --to text Final Agent version required --write , -w boolean Write to the changelog file, if omitted contents will be printed to stdout False --help boolean Show this message and exit. False ddev agent requirements \u00b6 Write the requirements-agent-release.txt file at the root of the repo listing all the Agent-based integrations pinned at the version they currently have in HEAD. Usage: ddev agent requirements [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False ddev ci \u00b6 CI related utils. Anything here should be considered experimental. Usage: ddev ci [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev ci setup \u00b6 Run CI setup scripts Usage: ddev ci setup [OPTIONS] [CHECKS]... Options: Name Type Description Default --changed boolean Only target changed checks False --help boolean Show this message and exit. False ddev clean \u00b6 Remove build and test artifacts for the given CHECK. If CHECK is not specified, the current working directory is used. Usage: ddev clean [OPTIONS] [CHECK] Options: Name Type Description Default --compiled-only , -c boolean Remove compiled files only (*.pyc, *.pyd, *.pyo, *.whl, pycache ). False --all , -a boolean Disable the detection of a project's dedicated virtual env and/or editable installation. By default, these will not be considered. False --force , -f boolean If set and the command is run from the root directory, allow removing build and test artifacts (*.egg-info, .benchmarks, .cache, .coverage, .eggs, .pytest_cache, .tox, build, dist). False --verbose , -v boolean Shows removed paths. False --help boolean Show this message and exit. False ddev config \u00b6 Manage the config file Usage: ddev config [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev config edit \u00b6 Edit the config file with your default EDITOR. Usage: ddev config edit [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False ddev config explore \u00b6 Open the config location in your file manager. Usage: ddev config explore [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False ddev config find \u00b6 Show the location of the config file. Usage: ddev config find [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False ddev config restore \u00b6 Restore the config file to default settings. Usage: ddev config restore [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False ddev config set \u00b6 Assigns values to config file entries. If the value is omitted, you will be prompted, with the input hidden if it is sensitive. $ ddev config set github.user foo New setting: [github] user = \"foo\" You can also assign values on a per-org basis. $ ddev config set orgs..api_key New setting: [orgs.] api_key = \"***********\" Usage: ddev config set [OPTIONS] KEY [VALUE] Options: Name Type Description Default --help boolean Show this message and exit. False ddev config show \u00b6 Show the contents of the config file. Usage: ddev config show [OPTIONS] Options: Name Type Description Default --all , -a boolean No not scrub secret fields False --help boolean Show this message and exit. False ddev config update \u00b6 Update the config file with any new fields. Usage: ddev config update [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False ddev create \u00b6 Create scaffolding for a new integration. NAME: The display name of the integration that will appear in documentation. Usage: ddev create [OPTIONS] NAME Options: Name Type Description Default --type , -t choice ( check | jmx | logs | snmp_tile | tile ) The type of integration to create check --location , -l text The directory where files will be written required --non-interactive , -ni boolean Disable prompting for fields False --quiet , -q boolean Show less output False --dry-run , -n boolean Only show what would be created False --help boolean Show this message and exit. False ddev dep \u00b6 Manage dependencies Usage: ddev dep [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev dep freeze \u00b6 Combine all dependencies for the Agent's static environment. Usage: ddev dep freeze [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False ddev dep pin \u00b6 Pin a dependency for all checks that require it. This can also resolve transient dependencies. Setting the version to none will remove the package. You can specify an unlimited number of additional checks to apply the pin for via arguments. Usage: ddev dep pin [OPTIONS] PACKAGE VERSION Options: Name Type Description Default --marker , -m text Environment marker to use required --help boolean Show this message and exit. False ddev docs \u00b6 Manage documentation Usage: ddev docs [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev docs build \u00b6 Build documentation. Usage: ddev docs build [OPTIONS] Options: Name Type Description Default --verbose , -v integer range ( 0 and above) Increase verbosity (can be used additively) 0 --pdf boolean Also export the site as PDF False --help boolean Show this message and exit. False ddev docs deploy \u00b6 Deploy built documentation. Usage: ddev docs deploy [OPTIONS] [BRANCH] Options: Name Type Description Default --yes , -y boolean N/A False --help boolean Show this message and exit. False ddev docs serve \u00b6 Serve and view documentation in a web browser. Usage: ddev docs serve [OPTIONS] Options: Name Type Description Default --no-open , -n boolean Do not open the documentation in a web browser False --verbose , -v integer range ( 0 and above) Increase verbosity (can be used additively) 0 --pdf boolean Also export the site as PDF False --dirty boolean Speed up reload time by only rebuilding edited pages (based on modified time). For development only. False --help boolean Show this message and exit. False ddev env \u00b6 Manage environments Usage: ddev env [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev env check \u00b6 Run an Agent check. Usage: ddev env check [OPTIONS] CHECK [ENV] Options: Name Type Description Default --rate , -r boolean Compute rates by running the check twice with a pause between each run False --times , -t integer Number of times to run the check required --pause integer Number of milliseconds to pause between multiple check runs required --delay , -d integer Delay in milliseconds between running the check and grabbing what was collected required --log-level , -l text Set the log level (default off ) required --json boolean Format the aggregator and check runner output as JSON False --table boolean Format the aggregator and check runner output as tabular False --breakpoint , -b integer Line number to start a PDB session (0: first line, -1: last line) required --config text Path to a JSON check configuration to use required --jmx-list text JMX metrics listing method required --help boolean Show this message and exit. False ddev env edit \u00b6 Start an environment. Usage: ddev env edit [OPTIONS] CHECK ENV Options: Name Type Description Default --editor , -e text Editor to use required --help boolean Show this message and exit. False ddev env ls \u00b6 List active or available environments. Usage: ddev env ls [OPTIONS] [CHECKS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev env prune \u00b6 Remove all configuration for environments. Usage: ddev env prune [OPTIONS] Options: Name Type Description Default --force , -f boolean N/A False --help boolean Show this message and exit. False ddev env reload \u00b6 Restart an Agent to detect environment changes. Usage: ddev env reload [OPTIONS] CHECK [ENV] Options: Name Type Description Default --help boolean Show this message and exit. False ddev env shell \u00b6 Run a shell inside the Agent docker container. Usage: ddev env shell [OPTIONS] CHECK [ENV] Options: Name Type Description Default -c , --exec-command text Optionally execute command inside container, executes after any installs required -v , --install-vim boolean Optionally install editing/viewing tools vim and less False -i , --install-tools text Optionally install custom tools required --help boolean Show this message and exit. False ddev env start \u00b6 Start an environment. Usage: ddev env start [OPTIONS] CHECK ENV Options: Name Type Description Default --agent , -a text The agent build to use e.g. a Docker image like datadog/agent:latest . You can also use the name of an agent defined in the agents configuration section. required --python , -py integer The version of Python to use. Defaults to 3 if no tox Python is specified. required --dev / --prod boolean Whether to use the latest version of a check or what is shipped False --base boolean Whether to use the latest version of the base check or what is shipped False --env-vars , -e text ENV Variable that should be passed to the Agent container. Ex: -e DD_URL=app.datadoghq.com -e DD_API_KEY=123456 required --org-name , -o text The org to use for data submission. required --profile-memory , -pm boolean Whether to collect metrics about memory usage False --dogstatsd boolean Enable dogstatsd port on agent False --help boolean Show this message and exit. False ddev env stop \u00b6 Stop environments, use \"all\" as check argument to stop everything. Usage: ddev env stop [OPTIONS] CHECK [ENV] Options: Name Type Description Default --help boolean Show this message and exit. False ddev env test \u00b6 Test an environment. Usage: ddev env test [OPTIONS] [CHECKS]... Options: Name Type Description Default --agent , -a text The agent build to use e.g. a Docker image like datadog/agent:latest . You can also use the name of an agent defined in the agents configuration section. required --python , -py integer The version of Python to use. Defaults to 3 if no tox Python is specified. required --dev / --prod boolean Whether to use the latest version of a check or what is shipped required --base boolean Whether to use the latest version of the base check or what is shipped False --env-vars , -e text ENV Variable that should be passed to the Agent container. Ex: -e DD_URL=app.datadoghq.com -e DD_API_KEY=123456 required --new-env , -ne boolean Execute setup and tear down actions False --profile-memory , -pm boolean Whether to collect metrics about memory usage False --junit , -j boolean Generate junit reports False --filter , -k text Only run tests matching given substring expression required --changed boolean Only test changed checks False --help boolean Show this message and exit. False ddev meta \u00b6 Anything here should be considered experimental. This meta namespace can be used for an arbitrary number of niche or beta features without bloating the root namespace. Usage: ddev meta [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta catalog \u00b6 Create a catalog with information about integrations Usage: ddev meta catalog [OPTIONS] CHECKS... Options: Name Type Description Default -f , --file text Output to file (it will be overwritten), you can pass \"tmp\" to generate a temporary file required --markdown , -m boolean Output to markdown instead of CSV False --help boolean Show this message and exit. False ddev meta changes \u00b6 Show changes since a specific date. Usage: ddev meta changes [OPTIONS] SINCE Options: Name Type Description Default --out , -o boolean Output to file False --eager boolean Skip validation of commit subjects False --help boolean Show this message and exit. False ddev meta create-example-commits \u00b6 Create branch commits from example repo Usage: ddev meta create-example-commits [OPTIONS] SOURCE_DIR Options: Name Type Description Default --prefix , -p text Optional text to prefix each commit `` --help boolean Show this message and exit. False ddev meta dash \u00b6 Dashboard utilities Usage: ddev meta dash [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta dash export \u00b6 Export a Dashboard as JSON Usage: ddev meta dash export [OPTIONS] URL INTEGRATION Options: Name Type Description Default --author , -a text The owner of this integration's dashboard. Default is 'Datadog' Datadog --help boolean Show this message and exit. False ddev meta jmx \u00b6 JMX utilities Usage: ddev meta jmx [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta jmx query-endpoint \u00b6 Query endpoint for JMX info Usage: ddev meta jmx query-endpoint [OPTIONS] HOST PORT [DOMAIN] Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta prom \u00b6 Prometheus utilities Usage: ddev meta prom [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta prom info \u00b6 Show metric info from a Prometheus endpoint. Example: $ ddev meta prom info :8080/_status/vars Usage: ddev meta prom info [OPTIONS] ENDPOINT Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta prom parse \u00b6 Interactively parse metric info from a Prometheus endpoint and write it to metadata.csv. Usage: ddev meta prom parse [OPTIONS] ENDPOINT CHECK Options: Name Type Description Default --here , -x boolean Output to the current location False --help boolean Show this message and exit. False ddev meta scripts \u00b6 Miscellaneous scripts that may be useful Usage: ddev meta scripts [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta scripts email2ghuser \u00b6 Given an email, attempt to find a Github username associated with the email. $ ddev meta scripts email2ghuser example@datadoghq.com Usage: ddev meta scripts email2ghuser [OPTIONS] EMAIL Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta scripts metrics2md \u00b6 Convert a check's metadata.csv file to a Markdown table, which will be copied to your clipboard. By default it will be compact and only contain the most useful fields. If you wish to use arbitrary metric data, you may set the check to cb to target the current contents of your clipboard. Usage: ddev meta scripts metrics2md [OPTIONS] CHECK [FIELDS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta scripts remove-labels \u00b6 Remove all labels from an issue or pull request. This is useful when there are too many labels and its state cannot be modified (known GitHub issue). $ ddev meta scripts remove-labels 5626 Usage: ddev meta scripts remove-labels [OPTIONS] ISSUE_NUMBER Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta scripts upgrade-python \u00b6 Upgrade the Python version of all test environments. $ ddev meta scripts upgrade-python 3.8 Usage: ddev meta scripts upgrade-python [OPTIONS] NEW_VERSION [OLD_VERSION] Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta snmp \u00b6 SNMP utilities Usage: ddev meta snmp [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta snmp generate-profile-from-mibs \u00b6 Generate an SNMP profile from MIBs. Accepts a directory path containing mib files to be used as source to generate the profile, along with a filter if a device or family of devices support only a subset of oids from a mib. filters is the path to a yaml file containing a collection of MIBs, with their list of MIB node names to be included. For example: RFC1213-MIB : - system - interfaces - ip CISCO-SYSLOG-MIB : [] SNMP-FRAMEWORK-MIB : - snmpEngine Note that each MIB:node_name correspond to exactly one and only one OID. However, some MIBs report legacy nodes that are overwritten. To resolve, edit the MIB by removing legacy values manually before loading them with this profile generator. If a MIB is fully supported, it can be omitted from the filter as MIBs not found in a filter will be fully loaded. If a MIB is not fully supported, it can be listed with an empty node list, as CISCO-SYSLOG-MIB in the example. -a, --aliases is an option to provide the path to a YAML file containing a list of aliases to be used as metric tags for tables, in the following format: aliases : - from : MIB : ENTITY-MIB name : entPhysicalIndex to : MIB : ENTITY-MIB name : entPhysicalName MIBs tables most of the time define a column OID within the table, or from a different table and even different MIB, which value can be used to index entries. This is the INDEX field in row nodes. As an example, entPhysicalContainsTable in ENTITY-MIB entPhysicalContainsEntry OBJECT-TYPE SYNTAX EntPhysicalContainsEntry MAX-ACCESS not-accessible STATUS current DESCRIPTION \"A single container/'containee' relationship.\" INDEX { entPhysicalIndex, entPhysicalChildIndex } ::= { entPhysicalContainsTable 1 } or its json dump, where INDEX is replaced by indices \"entPhysicalContainsEntry\" : { \"name\" : \"entPhysicalContainsEntry\" , \"oid\" : \"1.3.6.1.2.1.47.1.3.3.1\" , \"nodetype\" : \"row\" , \"class\" : \"objecttype\" , \"maxaccess\" : \"not-accessible\" , \"indices\" : [ { \"module\" : \"ENTITY-MIB\" , \"object\" : \"entPhysicalIndex\" , \"implied\" : 0 }, { \"module\" : \"ENTITY-MIB\" , \"object\" : \"entPhysicalChildIndex\" , \"implied\" : 0 } ], \"status\" : \"current\" , \"description\" : \"A single container/'containee' relationship.\" }, Sometimes indexes are columns from another table, and we might want to use another column as it could have more human readable information - we might prefer to see the interface name vs its numerical table index. This can be achieved using metric_tag_aliases Return a list of SNMP metrics and copy its yaml dump to the clipboard Metric tags need to be added manually Usage: ddev meta snmp generate-profile-from-mibs [OPTIONS] [MIB_FILES]... Options: Name Type Description Default -f , --filters text Path to OIDs filter required -a , --aliases text Path to metric tag aliases required --debug , -d boolean Include debug output False --interactive , -i boolean Prompt to confirm before saving to a file False --help boolean Show this message and exit. False ddev meta snmp translate-profile \u00b6 Do OID translation in a SNMP profile. This isn't a plain replacement, as it doesn't preserve comments and indent, but it should automate most of the work. You'll need to install pysnmp and pysnmp-mibs manually beforehand. Usage: ddev meta snmp translate-profile [OPTIONS] PROFILE_PATH Options: Name Type Description Default --mib_source_url text Source url to fetch missing MIBS https://raw.githubusercontent.com/projx/snmp-mibs/master/@mib@ --help boolean Show this message and exit. False ddev meta snmp validate-mib-filenames \u00b6 Validate MIB file names. Frameworks used to load mib files expect MIB file names to match MIB name. Usage: ddev meta snmp validate-mib-filenames [OPTIONS] [MIB_FILES]... Options: Name Type Description Default --interactive , -i boolean Prompt to confirm before renaming all invalid MIB files False --help boolean Show this message and exit. False ddev release \u00b6 Manage the release of checks Usage: ddev release [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev release build \u00b6 Build a wheel for a check as it is on the repo HEAD Usage: ddev release build [OPTIONS] CHECK Options: Name Type Description Default --sdist , -s boolean N/A False --help boolean Show this message and exit. False ddev release changelog \u00b6 Perform the operations needed to update the changelog. This method is supposed to be used by other tasks and not directly. Usage: ddev release changelog [OPTIONS] CHECK VERSION [OLD_VERSION] Options: Name Type Description Default --initial boolean N/A False --organization , -r text N/A DataDog --quiet , -q boolean N/A False --dry-run , -n boolean N/A False --output-file , -o text N/A CHANGELOG.md --tag-prefix , -tp text N/A v --no-semver , -ns boolean N/A False --help boolean Show this message and exit. False ddev release make \u00b6 Perform a set of operations needed to release checks: update the version in __about__.py update the changelog update the requirements-agent-release.txt file update in-toto metadata commit the above changes You can release everything at once by setting the check to all . If you run into issues signing: Ensure you did gpg --import .gpg.pub Usage: ddev release make [OPTIONS] CHECKS... Options: Name Type Description Default --version text N/A required --new boolean Ensure versions are at 1.0.0 False --skip-sign boolean Skip the signing of release metadata False --sign-only boolean Only sign release metadata False --exclude text Comma-separated list of checks to skip required --allow-master boolean Allow ddev to commit directly to master. Forbidden for core. False --help boolean Show this message and exit. False ddev release show \u00b6 To avoid GitHub's public API rate limits, you need to set github.user / github.token in your config file or use the DD_GITHUB_USER / DD_GITHUB_TOKEN environment variables. Usage: ddev release show [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev release show changes \u00b6 Show all the pending PRs for a given check. Usage: ddev release show changes [OPTIONS] CHECK Options: Name Type Description Default --organization , -r text The Github organization the repository belongs to DataDog --tag-pattern text The regex pattern for the format of the tag. Required if the tag doesn't follow semver required --tag-prefix text Specify the prefix of the tag to use if the tag doesn't follow semver required --dry-run , -n boolean Run the command in dry-run mode False --since text The git ref to use instead of auto-detecting the tag to view changes since required --help boolean Show this message and exit. False ddev release show ready \u00b6 Show all the checks that can be released. Usage: ddev release show ready [OPTIONS] Options: Name Type Description Default --quiet , -q boolean N/A False --help boolean Show this message and exit. False ddev release stats \u00b6 A collection of tasks to generate reports about releases Usage: ddev release stats [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev release stats merged-prs \u00b6 Prints the PRs merged between the first RC and the current RC/final build Usage: ddev release stats merged-prs [OPTIONS] Options: Name Type Description Default --from-ref , -f text Reference to start stats on (first RC tagged) required --to-ref , -t text Reference to end stats at (current RC/final tag) required --release-milestone , -r text Github release milestone required --exclude-releases , -e boolean Flag to exclude the release PRs from the list False --export-csv text CSV file where the list will be exported required --help boolean Show this message and exit. False ddev release stats report \u00b6 Prints some release stats we want to track Usage: ddev release stats report [OPTIONS] Options: Name Type Description Default --from-ref , -f text Reference to start stats on (first RC tagged) required --to-ref , -t text Reference to end stats at (current RC/final tag) required --release-milestone , -r text Github release milestone required --help boolean Show this message and exit. False ddev release tag \u00b6 Tag the HEAD of the git repo with the current release number for a specific check. The tag is pushed to origin by default. You can tag everything at once by setting the check to all . Notice: specifying a different version than the one in __about__.py is a maintenance task that should be run under very specific circumstances (e.g. re-align an old release performed on the wrong commit). Usage: ddev release tag [OPTIONS] CHECK [VERSION] Options: Name Type Description Default --push / --no-push boolean N/A True --dry-run , -n boolean N/A False --help boolean Show this message and exit. False ddev release trello \u00b6 Subcommands for interacting with Trello Release boards. To use Trello: 1. Go to https://trello.com/app-key and copy your API key. 2. Run ddev config set trello.key and paste your API key. 3. Go to https://trello.com/1/authorize?key=key&name=name&scope=read,write&expiration=never&response_type=token , where key is your API key and name is the name to give your token, e.g. ReleaseTestingYourName. Authorize access and copy your token. 4. Run ddev config set trello.token and paste your token. Usage: ddev release trello [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev release trello status \u00b6 Print tabular status of Agent Release based on Trello columns. See trello subcommand for details on how to setup access: ddev release trello -h . Usage: ddev release trello status [OPTIONS] Options: Name Type Description Default --verbose , -v boolean Return the detailed results instead of the aggregates False --json , -j boolean Return as raw JSON instead False --clipboard , -c boolean Copy output to clipboard False --help boolean Show this message and exit. False ddev release trello testable \u00b6 Create a Trello card for changes since a previous release (referenced by BASE_REF ) that need to be tested for the next release (referenced by TARGET_REF ). BASE_REF and TARGET_REF can be any valid git references. It practice, you should use either: A tag: 7.16.1 , 7.17.0-rc.4 , ... A release branch: 6.16.x , 7.17.x , ... The master branch. NOTE: using a minor version shorthand (e.g. 7.16 ) is not supported, as it is ambiguous. Example: assuming we are working on the release of 7.17.0, we can... Create cards for changes between a previous Agent release and master (useful when preparing an initial RC): $ ddev release trello testable 7.16.1 origin/master Create cards for changes between a previous RC and master (useful when preparing a new RC, and a separate release branch was not created yet): $ ddev release trello testable 7.17.0-rc.2 origin/master Create cards for changes between a previous RC and a release branch (useful to only review changes in a release branch that has diverged from master ): $ ddev release trello testable 7.17.0-rc.4 7.17.x Create cards for changes between two arbitrary tags, e.g. between RCs: $ ddev release trello testable 7.17.0-rc.4 7.17.0-rc.5 TIP: run with ddev -x release trello testable to force the use of the current directory. To avoid GitHub's public API rate limits, you need to set github.user / github.token in your config file or use the DD_GITHUB_USER / DD_GITHUB_TOKEN environment variables. See trello subcommand for details on how to setup access: ddev release trello -h . Usage: ddev release trello testable [OPTIONS] BASE_REF TARGET_REF Options: Name Type Description Default --milestone text The PR milestone to filter by required --dry-run , -n boolean Only show the changes False --update-rc-builds-cards boolean Update cards in RC builds column with target_ref version False --move-cards boolean Do not create a card for a change, but move the existing card from HAVE BUGS - FIXME or FIXED - Ready to Rebuild to INBOX team False --help boolean Show this message and exit. False ddev release trello update-rc-links \u00b6 Update links to RCs in the QA board Trello cards Usage: ddev release trello update-rc-links [OPTIONS] TARGET_REF Options: Name Type Description Default --help boolean Show this message and exit. False ddev release upload \u00b6 Release a specific check to PyPI as it is on the repo HEAD. Usage: ddev release upload [OPTIONS] CHECK Options: Name Type Description Default --sdist , -s boolean N/A False --dry-run , -n boolean N/A False --help boolean Show this message and exit. False ddev run \u00b6 Run commands in the proper repo. Usage: ddev run [OPTIONS] [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev test \u00b6 Run tests for Agent-based checks. If no checks are specified, this will only test checks that were changed compared to the master branch. You can also select specific comma-separated environments to test like so: $ ddev test mysql:mysql57,maria10130 Usage: ddev test [OPTIONS] [CHECKS]... Options: Name Type Description Default --format-style , -fs boolean Run only the code style formatter False --style , -s boolean Run only style checks False --bench , -b boolean Run only benchmarks False --latest-metrics boolean Only verify support of new metrics False --e2e boolean Run only end-to-end tests False --ddtrace boolean Run tests using dd-trace-py False --cov , -c boolean Measure code coverage False --cov-missing , -cm boolean Show line numbers of statements that were not executed False --junit , -j boolean Generate junit reports False --marker , -m text Only run tests matching given marker expression required --filter , -k text Only run tests matching given substring expression required --pdb boolean Drop to PDB on first failure, then end test session False --debug , -d boolean Set the log level to debug False --verbose , -v integer range ( 0 and above) Increase verbosity (can be used additively) 0 --list , -l boolean List available test environments False --passenv text Additional environment variables to pass down required --changed boolean Only test changed checks False --cov-keep boolean Keep coverage reports False --skip-env boolean Skip environment creation and assume it is already running False --pytest-args , -pa text Additional arguments to pytest required --force-base-unpinned boolean Force using datadog-checks-base as specified by check dep False --force-base-min boolean Force using lowest viable release version of datadog-checks-base False --force-env-rebuild boolean Force creating a new env False --help boolean Show this message and exit. False ddev validate \u00b6 Verify certain aspects of the repo Usage: ddev validate [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate agent-reqs \u00b6 Verify that the checks versions are in sync with the requirements-agent-release.txt file. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate agent-reqs [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate all \u00b6 Run all CI validations for a repo. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate all [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate ci \u00b6 Validate CI infrastructure configuration. Usage: ddev validate ci [OPTIONS] Options: Name Type Description Default --fix boolean Attempt to fix errors False --help boolean Show this message and exit. False ddev validate codeowners \u00b6 Validate that every integration has an entry in the CODEOWNERS file. Usage: ddev validate codeowners [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate config \u00b6 Validate default configuration files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate config [OPTIONS] [CHECK] Options: Name Type Description Default --sync , -s boolean Generate example configuration files based on specifications False --verbose , -v boolean Verbose mode False --help boolean Show this message and exit. False ddev validate dashboards \u00b6 Validate all Dashboard definition files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate dashboards [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate dep \u00b6 This command will: Verify the uniqueness of dependency versions across all checks, or optionally a single check Verify all the dependencies are pinned. Verify the embedded Python environment defined in the base check and requirements listed in every integration are compatible. Verify each check specifies a CHECKS_BASE_REQ variable for datadog-checks-base requirement Optionally verify that the datadog-checks-base requirement is lower-bounded Optionally verify that the datadog-checks-base requirement satisfies specific version Usage: ddev validate dep [OPTIONS] [CHECK] Options: Name Type Description Default --require-base-check-version boolean Require specific version for datadog-checks-base requirement False --min-base-check-version text Specify minimum version for datadog-checks-base requirement, e.g. 11.0.0 required --help boolean Show this message and exit. False ddev validate eula \u00b6 Validate all EULA definition files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate eula [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate http \u00b6 Validate all integrations for usage of http wrapper. Usage: ddev validate http [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate imports \u00b6 Validate proper imports in checks. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate imports [OPTIONS] [CHECK] Options: Name Type Description Default --autofix boolean Apply suggested fix False --help boolean Show this message and exit. False ddev validate jmx-metrics \u00b6 Validate all default JMX metrics definitions. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate jmx-metrics [OPTIONS] [CHECK] Options: Name Type Description Default --verbose , -v boolean Verbose mode False --help boolean Show this message and exit. False ddev validate legacy-signature \u00b6 Validate that no integration uses the legacy signature. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate legacy-signature [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate licenses \u00b6 Validate third-party license list. Usage: ddev validate licenses [OPTIONS] Options: Name Type Description Default --sync , -s boolean Generate the LICENSE-3rdparty.csv file False --help boolean Show this message and exit. False ddev validate manifest \u00b6 Validate manifest.json files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate manifest [OPTIONS] [CHECK] Options: Name Type Description Default --fix boolean Attempt to fix errors False --help boolean Show this message and exit. False ddev validate metadata \u00b6 Validates metadata.csv files If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate metadata [OPTIONS] [CHECK] Options: Name Type Description Default --check-duplicates boolean Output warnings if there are duplicate short names and descriptions False --show-warnings , -w boolean Show warnings in addition to failures False --help boolean Show this message and exit. False ddev validate models \u00b6 Validate configuration data models. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate models [OPTIONS] [CHECK] Options: Name Type Description Default --sync , -s boolean Generate data models based on specifications False --verbose , -v boolean Verbose mode False --help boolean Show this message and exit. False ddev validate package \u00b6 Validate all setup.py files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate package [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate readmes \u00b6 Validates README files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate readmes [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate recommended-monitors \u00b6 Validate all recommended monitors definition files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate recommended-monitors [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate saved-views \u00b6 Validates saved view files If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate saved-views [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate service-checks \u00b6 Validate all service_checks.json files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate service-checks [OPTIONS] [CHECK] Options: Name Type Description Default --sync boolean Generate example configuration files based on specifications False --help boolean Show this message and exit. False","title":"CLI"},{"location":"ddev/cli/#ddev","text":"Usage: ddev [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --core , -c boolean Work on integrations-core . False --extras , -e boolean Work on integrations-extras . False --agent , -a boolean Work on datadog-agent . False --marketplace , -m boolean Work on marketplace . False --here , -x boolean Work on the current location. False --color / --no-color boolean Whether or not to display colored output (default true). required --quiet , -q boolean Silence output False --debug , -d boolean Include debug output False --version boolean Show the version and exit. False --help boolean Show this message and exit. False","title":"ddev"},{"location":"ddev/cli/#ddev-agent","text":"A collection of tasks related to the Datadog Agent Usage: ddev agent [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"agent"},{"location":"ddev/cli/#ddev-agent-changelog","text":"Generates a markdown file containing the list of checks that changed for a given Agent release. Agent version numbers are derived inspecting tags on integrations-core so running this tool might provide unexpected results if the repo is not up to date with the Agent release process. If neither --since or --to are passed (the most common use case), the tool will generate the whole changelog since Agent version 6.3.0 (before that point we don't have enough information to build the log). Usage: ddev agent changelog [OPTIONS] Options: Name Type Description Default --since text Initial Agent version 6.3.0 --to text Final Agent version required --write , -w boolean Write to the changelog file, if omitted contents will be printed to stdout False --force , -f boolean Replace an existing file False --help boolean Show this message and exit. False","title":"changelog"},{"location":"ddev/cli/#ddev-agent-integrations","text":"Generates a markdown file containing the list of integrations shipped in a given Agent release. Agent version numbers are derived inspecting tags on integrations-core so running this tool might provide unexpected results if the repo is not up to date with the Agent release process. If neither --since or --to are passed (the most common use case), the tool will generate the list for every Agent since version 6.3.0 (before that point we don't have enough information to build the log). Usage: ddev agent integrations [OPTIONS] Options: Name Type Description Default --since text Initial Agent version 6.3.0 --to text Final Agent version required --write , -w boolean Write to file, if omitted contents will be printed to stdout False --force , -f boolean Replace an existing file False --help boolean Show this message and exit. False","title":"integrations"},{"location":"ddev/cli/#ddev-agent-integrations-changelog","text":"Update integration CHANGELOG.md by adding the Agent version. Agent version is only added to the integration versions released with a specific Agent release. Usage: ddev agent integrations-changelog [OPTIONS] [CHECKS]... Options: Name Type Description Default --since text Initial Agent version 6.3.0 --to text Final Agent version required --write , -w boolean Write to the changelog file, if omitted contents will be printed to stdout False --help boolean Show this message and exit. False","title":"integrations-changelog"},{"location":"ddev/cli/#ddev-agent-requirements","text":"Write the requirements-agent-release.txt file at the root of the repo listing all the Agent-based integrations pinned at the version they currently have in HEAD. Usage: ddev agent requirements [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"requirements"},{"location":"ddev/cli/#ddev-ci","text":"CI related utils. Anything here should be considered experimental. Usage: ddev ci [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"ci"},{"location":"ddev/cli/#ddev-ci-setup","text":"Run CI setup scripts Usage: ddev ci setup [OPTIONS] [CHECKS]... Options: Name Type Description Default --changed boolean Only target changed checks False --help boolean Show this message and exit. False","title":"setup"},{"location":"ddev/cli/#ddev-clean","text":"Remove build and test artifacts for the given CHECK. If CHECK is not specified, the current working directory is used. Usage: ddev clean [OPTIONS] [CHECK] Options: Name Type Description Default --compiled-only , -c boolean Remove compiled files only (*.pyc, *.pyd, *.pyo, *.whl, pycache ). False --all , -a boolean Disable the detection of a project's dedicated virtual env and/or editable installation. By default, these will not be considered. False --force , -f boolean If set and the command is run from the root directory, allow removing build and test artifacts (*.egg-info, .benchmarks, .cache, .coverage, .eggs, .pytest_cache, .tox, build, dist). False --verbose , -v boolean Shows removed paths. False --help boolean Show this message and exit. False","title":"clean"},{"location":"ddev/cli/#ddev-config","text":"Manage the config file Usage: ddev config [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"config"},{"location":"ddev/cli/#ddev-config-edit","text":"Edit the config file with your default EDITOR. Usage: ddev config edit [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"edit"},{"location":"ddev/cli/#ddev-config-explore","text":"Open the config location in your file manager. Usage: ddev config explore [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"explore"},{"location":"ddev/cli/#ddev-config-find","text":"Show the location of the config file. Usage: ddev config find [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"find"},{"location":"ddev/cli/#ddev-config-restore","text":"Restore the config file to default settings. Usage: ddev config restore [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"restore"},{"location":"ddev/cli/#ddev-config-set","text":"Assigns values to config file entries. If the value is omitted, you will be prompted, with the input hidden if it is sensitive. $ ddev config set github.user foo New setting: [github] user = \"foo\" You can also assign values on a per-org basis. $ ddev config set orgs..api_key New setting: [orgs.] api_key = \"***********\" Usage: ddev config set [OPTIONS] KEY [VALUE] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"set"},{"location":"ddev/cli/#ddev-config-show","text":"Show the contents of the config file. Usage: ddev config show [OPTIONS] Options: Name Type Description Default --all , -a boolean No not scrub secret fields False --help boolean Show this message and exit. False","title":"show"},{"location":"ddev/cli/#ddev-config-update","text":"Update the config file with any new fields. Usage: ddev config update [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"update"},{"location":"ddev/cli/#ddev-create","text":"Create scaffolding for a new integration. NAME: The display name of the integration that will appear in documentation. Usage: ddev create [OPTIONS] NAME Options: Name Type Description Default --type , -t choice ( check | jmx | logs | snmp_tile | tile ) The type of integration to create check --location , -l text The directory where files will be written required --non-interactive , -ni boolean Disable prompting for fields False --quiet , -q boolean Show less output False --dry-run , -n boolean Only show what would be created False --help boolean Show this message and exit. False","title":"create"},{"location":"ddev/cli/#ddev-dep","text":"Manage dependencies Usage: ddev dep [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"dep"},{"location":"ddev/cli/#ddev-dep-freeze","text":"Combine all dependencies for the Agent's static environment. Usage: ddev dep freeze [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"freeze"},{"location":"ddev/cli/#ddev-dep-pin","text":"Pin a dependency for all checks that require it. This can also resolve transient dependencies. Setting the version to none will remove the package. You can specify an unlimited number of additional checks to apply the pin for via arguments. Usage: ddev dep pin [OPTIONS] PACKAGE VERSION Options: Name Type Description Default --marker , -m text Environment marker to use required --help boolean Show this message and exit. False","title":"pin"},{"location":"ddev/cli/#ddev-docs","text":"Manage documentation Usage: ddev docs [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"docs"},{"location":"ddev/cli/#ddev-docs-build","text":"Build documentation. Usage: ddev docs build [OPTIONS] Options: Name Type Description Default --verbose , -v integer range ( 0 and above) Increase verbosity (can be used additively) 0 --pdf boolean Also export the site as PDF False --help boolean Show this message and exit. False","title":"build"},{"location":"ddev/cli/#ddev-docs-deploy","text":"Deploy built documentation. Usage: ddev docs deploy [OPTIONS] [BRANCH] Options: Name Type Description Default --yes , -y boolean N/A False --help boolean Show this message and exit. False","title":"deploy"},{"location":"ddev/cli/#ddev-docs-serve","text":"Serve and view documentation in a web browser. Usage: ddev docs serve [OPTIONS] Options: Name Type Description Default --no-open , -n boolean Do not open the documentation in a web browser False --verbose , -v integer range ( 0 and above) Increase verbosity (can be used additively) 0 --pdf boolean Also export the site as PDF False --dirty boolean Speed up reload time by only rebuilding edited pages (based on modified time). For development only. False --help boolean Show this message and exit. False","title":"serve"},{"location":"ddev/cli/#ddev-env","text":"Manage environments Usage: ddev env [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"env"},{"location":"ddev/cli/#ddev-env-check","text":"Run an Agent check. Usage: ddev env check [OPTIONS] CHECK [ENV] Options: Name Type Description Default --rate , -r boolean Compute rates by running the check twice with a pause between each run False --times , -t integer Number of times to run the check required --pause integer Number of milliseconds to pause between multiple check runs required --delay , -d integer Delay in milliseconds between running the check and grabbing what was collected required --log-level , -l text Set the log level (default off ) required --json boolean Format the aggregator and check runner output as JSON False --table boolean Format the aggregator and check runner output as tabular False --breakpoint , -b integer Line number to start a PDB session (0: first line, -1: last line) required --config text Path to a JSON check configuration to use required --jmx-list text JMX metrics listing method required --help boolean Show this message and exit. False","title":"check"},{"location":"ddev/cli/#ddev-env-edit","text":"Start an environment. Usage: ddev env edit [OPTIONS] CHECK ENV Options: Name Type Description Default --editor , -e text Editor to use required --help boolean Show this message and exit. False","title":"edit"},{"location":"ddev/cli/#ddev-env-ls","text":"List active or available environments. Usage: ddev env ls [OPTIONS] [CHECKS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"ls"},{"location":"ddev/cli/#ddev-env-prune","text":"Remove all configuration for environments. Usage: ddev env prune [OPTIONS] Options: Name Type Description Default --force , -f boolean N/A False --help boolean Show this message and exit. False","title":"prune"},{"location":"ddev/cli/#ddev-env-reload","text":"Restart an Agent to detect environment changes. Usage: ddev env reload [OPTIONS] CHECK [ENV] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"reload"},{"location":"ddev/cli/#ddev-env-shell","text":"Run a shell inside the Agent docker container. Usage: ddev env shell [OPTIONS] CHECK [ENV] Options: Name Type Description Default -c , --exec-command text Optionally execute command inside container, executes after any installs required -v , --install-vim boolean Optionally install editing/viewing tools vim and less False -i , --install-tools text Optionally install custom tools required --help boolean Show this message and exit. False","title":"shell"},{"location":"ddev/cli/#ddev-env-start","text":"Start an environment. Usage: ddev env start [OPTIONS] CHECK ENV Options: Name Type Description Default --agent , -a text The agent build to use e.g. a Docker image like datadog/agent:latest . You can also use the name of an agent defined in the agents configuration section. required --python , -py integer The version of Python to use. Defaults to 3 if no tox Python is specified. required --dev / --prod boolean Whether to use the latest version of a check or what is shipped False --base boolean Whether to use the latest version of the base check or what is shipped False --env-vars , -e text ENV Variable that should be passed to the Agent container. Ex: -e DD_URL=app.datadoghq.com -e DD_API_KEY=123456 required --org-name , -o text The org to use for data submission. required --profile-memory , -pm boolean Whether to collect metrics about memory usage False --dogstatsd boolean Enable dogstatsd port on agent False --help boolean Show this message and exit. False","title":"start"},{"location":"ddev/cli/#ddev-env-stop","text":"Stop environments, use \"all\" as check argument to stop everything. Usage: ddev env stop [OPTIONS] CHECK [ENV] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"stop"},{"location":"ddev/cli/#ddev-env-test","text":"Test an environment. Usage: ddev env test [OPTIONS] [CHECKS]... Options: Name Type Description Default --agent , -a text The agent build to use e.g. a Docker image like datadog/agent:latest . You can also use the name of an agent defined in the agents configuration section. required --python , -py integer The version of Python to use. Defaults to 3 if no tox Python is specified. required --dev / --prod boolean Whether to use the latest version of a check or what is shipped required --base boolean Whether to use the latest version of the base check or what is shipped False --env-vars , -e text ENV Variable that should be passed to the Agent container. Ex: -e DD_URL=app.datadoghq.com -e DD_API_KEY=123456 required --new-env , -ne boolean Execute setup and tear down actions False --profile-memory , -pm boolean Whether to collect metrics about memory usage False --junit , -j boolean Generate junit reports False --filter , -k text Only run tests matching given substring expression required --changed boolean Only test changed checks False --help boolean Show this message and exit. False","title":"test"},{"location":"ddev/cli/#ddev-meta","text":"Anything here should be considered experimental. This meta namespace can be used for an arbitrary number of niche or beta features without bloating the root namespace. Usage: ddev meta [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"meta"},{"location":"ddev/cli/#ddev-meta-catalog","text":"Create a catalog with information about integrations Usage: ddev meta catalog [OPTIONS] CHECKS... Options: Name Type Description Default -f , --file text Output to file (it will be overwritten), you can pass \"tmp\" to generate a temporary file required --markdown , -m boolean Output to markdown instead of CSV False --help boolean Show this message and exit. False","title":"catalog"},{"location":"ddev/cli/#ddev-meta-changes","text":"Show changes since a specific date. Usage: ddev meta changes [OPTIONS] SINCE Options: Name Type Description Default --out , -o boolean Output to file False --eager boolean Skip validation of commit subjects False --help boolean Show this message and exit. False","title":"changes"},{"location":"ddev/cli/#ddev-meta-create-example-commits","text":"Create branch commits from example repo Usage: ddev meta create-example-commits [OPTIONS] SOURCE_DIR Options: Name Type Description Default --prefix , -p text Optional text to prefix each commit `` --help boolean Show this message and exit. False","title":"create-example-commits"},{"location":"ddev/cli/#ddev-meta-dash","text":"Dashboard utilities Usage: ddev meta dash [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"dash"},{"location":"ddev/cli/#ddev-meta-dash-export","text":"Export a Dashboard as JSON Usage: ddev meta dash export [OPTIONS] URL INTEGRATION Options: Name Type Description Default --author , -a text The owner of this integration's dashboard. Default is 'Datadog' Datadog --help boolean Show this message and exit. False","title":"export"},{"location":"ddev/cli/#ddev-meta-jmx","text":"JMX utilities Usage: ddev meta jmx [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"jmx"},{"location":"ddev/cli/#ddev-meta-jmx-query-endpoint","text":"Query endpoint for JMX info Usage: ddev meta jmx query-endpoint [OPTIONS] HOST PORT [DOMAIN] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"query-endpoint"},{"location":"ddev/cli/#ddev-meta-prom","text":"Prometheus utilities Usage: ddev meta prom [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"prom"},{"location":"ddev/cli/#ddev-meta-prom-info","text":"Show metric info from a Prometheus endpoint. Example: $ ddev meta prom info :8080/_status/vars Usage: ddev meta prom info [OPTIONS] ENDPOINT Options: Name Type Description Default --help boolean Show this message and exit. False","title":"info"},{"location":"ddev/cli/#ddev-meta-prom-parse","text":"Interactively parse metric info from a Prometheus endpoint and write it to metadata.csv. Usage: ddev meta prom parse [OPTIONS] ENDPOINT CHECK Options: Name Type Description Default --here , -x boolean Output to the current location False --help boolean Show this message and exit. False","title":"parse"},{"location":"ddev/cli/#ddev-meta-scripts","text":"Miscellaneous scripts that may be useful Usage: ddev meta scripts [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"scripts"},{"location":"ddev/cli/#ddev-meta-scripts-email2ghuser","text":"Given an email, attempt to find a Github username associated with the email. $ ddev meta scripts email2ghuser example@datadoghq.com Usage: ddev meta scripts email2ghuser [OPTIONS] EMAIL Options: Name Type Description Default --help boolean Show this message and exit. False","title":"email2ghuser"},{"location":"ddev/cli/#ddev-meta-scripts-metrics2md","text":"Convert a check's metadata.csv file to a Markdown table, which will be copied to your clipboard. By default it will be compact and only contain the most useful fields. If you wish to use arbitrary metric data, you may set the check to cb to target the current contents of your clipboard. Usage: ddev meta scripts metrics2md [OPTIONS] CHECK [FIELDS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"metrics2md"},{"location":"ddev/cli/#ddev-meta-scripts-remove-labels","text":"Remove all labels from an issue or pull request. This is useful when there are too many labels and its state cannot be modified (known GitHub issue). $ ddev meta scripts remove-labels 5626 Usage: ddev meta scripts remove-labels [OPTIONS] ISSUE_NUMBER Options: Name Type Description Default --help boolean Show this message and exit. False","title":"remove-labels"},{"location":"ddev/cli/#ddev-meta-scripts-upgrade-python","text":"Upgrade the Python version of all test environments. $ ddev meta scripts upgrade-python 3.8 Usage: ddev meta scripts upgrade-python [OPTIONS] NEW_VERSION [OLD_VERSION] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"upgrade-python"},{"location":"ddev/cli/#ddev-meta-snmp","text":"SNMP utilities Usage: ddev meta snmp [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"snmp"},{"location":"ddev/cli/#ddev-meta-snmp-generate-profile-from-mibs","text":"Generate an SNMP profile from MIBs. Accepts a directory path containing mib files to be used as source to generate the profile, along with a filter if a device or family of devices support only a subset of oids from a mib. filters is the path to a yaml file containing a collection of MIBs, with their list of MIB node names to be included. For example: RFC1213-MIB : - system - interfaces - ip CISCO-SYSLOG-MIB : [] SNMP-FRAMEWORK-MIB : - snmpEngine Note that each MIB:node_name correspond to exactly one and only one OID. However, some MIBs report legacy nodes that are overwritten. To resolve, edit the MIB by removing legacy values manually before loading them with this profile generator. If a MIB is fully supported, it can be omitted from the filter as MIBs not found in a filter will be fully loaded. If a MIB is not fully supported, it can be listed with an empty node list, as CISCO-SYSLOG-MIB in the example. -a, --aliases is an option to provide the path to a YAML file containing a list of aliases to be used as metric tags for tables, in the following format: aliases : - from : MIB : ENTITY-MIB name : entPhysicalIndex to : MIB : ENTITY-MIB name : entPhysicalName MIBs tables most of the time define a column OID within the table, or from a different table and even different MIB, which value can be used to index entries. This is the INDEX field in row nodes. As an example, entPhysicalContainsTable in ENTITY-MIB entPhysicalContainsEntry OBJECT-TYPE SYNTAX EntPhysicalContainsEntry MAX-ACCESS not-accessible STATUS current DESCRIPTION \"A single container/'containee' relationship.\" INDEX { entPhysicalIndex, entPhysicalChildIndex } ::= { entPhysicalContainsTable 1 } or its json dump, where INDEX is replaced by indices \"entPhysicalContainsEntry\" : { \"name\" : \"entPhysicalContainsEntry\" , \"oid\" : \"1.3.6.1.2.1.47.1.3.3.1\" , \"nodetype\" : \"row\" , \"class\" : \"objecttype\" , \"maxaccess\" : \"not-accessible\" , \"indices\" : [ { \"module\" : \"ENTITY-MIB\" , \"object\" : \"entPhysicalIndex\" , \"implied\" : 0 }, { \"module\" : \"ENTITY-MIB\" , \"object\" : \"entPhysicalChildIndex\" , \"implied\" : 0 } ], \"status\" : \"current\" , \"description\" : \"A single container/'containee' relationship.\" }, Sometimes indexes are columns from another table, and we might want to use another column as it could have more human readable information - we might prefer to see the interface name vs its numerical table index. This can be achieved using metric_tag_aliases Return a list of SNMP metrics and copy its yaml dump to the clipboard Metric tags need to be added manually Usage: ddev meta snmp generate-profile-from-mibs [OPTIONS] [MIB_FILES]... Options: Name Type Description Default -f , --filters text Path to OIDs filter required -a , --aliases text Path to metric tag aliases required --debug , -d boolean Include debug output False --interactive , -i boolean Prompt to confirm before saving to a file False --help boolean Show this message and exit. False","title":"generate-profile-from-mibs"},{"location":"ddev/cli/#ddev-meta-snmp-translate-profile","text":"Do OID translation in a SNMP profile. This isn't a plain replacement, as it doesn't preserve comments and indent, but it should automate most of the work. You'll need to install pysnmp and pysnmp-mibs manually beforehand. Usage: ddev meta snmp translate-profile [OPTIONS] PROFILE_PATH Options: Name Type Description Default --mib_source_url text Source url to fetch missing MIBS https://raw.githubusercontent.com/projx/snmp-mibs/master/@mib@ --help boolean Show this message and exit. False","title":"translate-profile"},{"location":"ddev/cli/#ddev-meta-snmp-validate-mib-filenames","text":"Validate MIB file names. Frameworks used to load mib files expect MIB file names to match MIB name. Usage: ddev meta snmp validate-mib-filenames [OPTIONS] [MIB_FILES]... Options: Name Type Description Default --interactive , -i boolean Prompt to confirm before renaming all invalid MIB files False --help boolean Show this message and exit. False","title":"validate-mib-filenames"},{"location":"ddev/cli/#ddev-release","text":"Manage the release of checks Usage: ddev release [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"release"},{"location":"ddev/cli/#ddev-release-build","text":"Build a wheel for a check as it is on the repo HEAD Usage: ddev release build [OPTIONS] CHECK Options: Name Type Description Default --sdist , -s boolean N/A False --help boolean Show this message and exit. False","title":"build"},{"location":"ddev/cli/#ddev-release-changelog","text":"Perform the operations needed to update the changelog. This method is supposed to be used by other tasks and not directly. Usage: ddev release changelog [OPTIONS] CHECK VERSION [OLD_VERSION] Options: Name Type Description Default --initial boolean N/A False --organization , -r text N/A DataDog --quiet , -q boolean N/A False --dry-run , -n boolean N/A False --output-file , -o text N/A CHANGELOG.md --tag-prefix , -tp text N/A v --no-semver , -ns boolean N/A False --help boolean Show this message and exit. False","title":"changelog"},{"location":"ddev/cli/#ddev-release-make","text":"Perform a set of operations needed to release checks: update the version in __about__.py update the changelog update the requirements-agent-release.txt file update in-toto metadata commit the above changes You can release everything at once by setting the check to all . If you run into issues signing: Ensure you did gpg --import .gpg.pub Usage: ddev release make [OPTIONS] CHECKS... Options: Name Type Description Default --version text N/A required --new boolean Ensure versions are at 1.0.0 False --skip-sign boolean Skip the signing of release metadata False --sign-only boolean Only sign release metadata False --exclude text Comma-separated list of checks to skip required --allow-master boolean Allow ddev to commit directly to master. Forbidden for core. False --help boolean Show this message and exit. False","title":"make"},{"location":"ddev/cli/#ddev-release-show","text":"To avoid GitHub's public API rate limits, you need to set github.user / github.token in your config file or use the DD_GITHUB_USER / DD_GITHUB_TOKEN environment variables. Usage: ddev release show [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"show"},{"location":"ddev/cli/#ddev-release-show-changes","text":"Show all the pending PRs for a given check. Usage: ddev release show changes [OPTIONS] CHECK Options: Name Type Description Default --organization , -r text The Github organization the repository belongs to DataDog --tag-pattern text The regex pattern for the format of the tag. Required if the tag doesn't follow semver required --tag-prefix text Specify the prefix of the tag to use if the tag doesn't follow semver required --dry-run , -n boolean Run the command in dry-run mode False --since text The git ref to use instead of auto-detecting the tag to view changes since required --help boolean Show this message and exit. False","title":"changes"},{"location":"ddev/cli/#ddev-release-show-ready","text":"Show all the checks that can be released. Usage: ddev release show ready [OPTIONS] Options: Name Type Description Default --quiet , -q boolean N/A False --help boolean Show this message and exit. False","title":"ready"},{"location":"ddev/cli/#ddev-release-stats","text":"A collection of tasks to generate reports about releases Usage: ddev release stats [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"stats"},{"location":"ddev/cli/#ddev-release-stats-merged-prs","text":"Prints the PRs merged between the first RC and the current RC/final build Usage: ddev release stats merged-prs [OPTIONS] Options: Name Type Description Default --from-ref , -f text Reference to start stats on (first RC tagged) required --to-ref , -t text Reference to end stats at (current RC/final tag) required --release-milestone , -r text Github release milestone required --exclude-releases , -e boolean Flag to exclude the release PRs from the list False --export-csv text CSV file where the list will be exported required --help boolean Show this message and exit. False","title":"merged-prs"},{"location":"ddev/cli/#ddev-release-stats-report","text":"Prints some release stats we want to track Usage: ddev release stats report [OPTIONS] Options: Name Type Description Default --from-ref , -f text Reference to start stats on (first RC tagged) required --to-ref , -t text Reference to end stats at (current RC/final tag) required --release-milestone , -r text Github release milestone required --help boolean Show this message and exit. False","title":"report"},{"location":"ddev/cli/#ddev-release-tag","text":"Tag the HEAD of the git repo with the current release number for a specific check. The tag is pushed to origin by default. You can tag everything at once by setting the check to all . Notice: specifying a different version than the one in __about__.py is a maintenance task that should be run under very specific circumstances (e.g. re-align an old release performed on the wrong commit). Usage: ddev release tag [OPTIONS] CHECK [VERSION] Options: Name Type Description Default --push / --no-push boolean N/A True --dry-run , -n boolean N/A False --help boolean Show this message and exit. False","title":"tag"},{"location":"ddev/cli/#ddev-release-trello","text":"Subcommands for interacting with Trello Release boards. To use Trello: 1. Go to https://trello.com/app-key and copy your API key. 2. Run ddev config set trello.key and paste your API key. 3. Go to https://trello.com/1/authorize?key=key&name=name&scope=read,write&expiration=never&response_type=token , where key is your API key and name is the name to give your token, e.g. ReleaseTestingYourName. Authorize access and copy your token. 4. Run ddev config set trello.token and paste your token. Usage: ddev release trello [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"trello"},{"location":"ddev/cli/#ddev-release-trello-status","text":"Print tabular status of Agent Release based on Trello columns. See trello subcommand for details on how to setup access: ddev release trello -h . Usage: ddev release trello status [OPTIONS] Options: Name Type Description Default --verbose , -v boolean Return the detailed results instead of the aggregates False --json , -j boolean Return as raw JSON instead False --clipboard , -c boolean Copy output to clipboard False --help boolean Show this message and exit. False","title":"status"},{"location":"ddev/cli/#ddev-release-trello-testable","text":"Create a Trello card for changes since a previous release (referenced by BASE_REF ) that need to be tested for the next release (referenced by TARGET_REF ). BASE_REF and TARGET_REF can be any valid git references. It practice, you should use either: A tag: 7.16.1 , 7.17.0-rc.4 , ... A release branch: 6.16.x , 7.17.x , ... The master branch. NOTE: using a minor version shorthand (e.g. 7.16 ) is not supported, as it is ambiguous. Example: assuming we are working on the release of 7.17.0, we can... Create cards for changes between a previous Agent release and master (useful when preparing an initial RC): $ ddev release trello testable 7.16.1 origin/master Create cards for changes between a previous RC and master (useful when preparing a new RC, and a separate release branch was not created yet): $ ddev release trello testable 7.17.0-rc.2 origin/master Create cards for changes between a previous RC and a release branch (useful to only review changes in a release branch that has diverged from master ): $ ddev release trello testable 7.17.0-rc.4 7.17.x Create cards for changes between two arbitrary tags, e.g. between RCs: $ ddev release trello testable 7.17.0-rc.4 7.17.0-rc.5 TIP: run with ddev -x release trello testable to force the use of the current directory. To avoid GitHub's public API rate limits, you need to set github.user / github.token in your config file or use the DD_GITHUB_USER / DD_GITHUB_TOKEN environment variables. See trello subcommand for details on how to setup access: ddev release trello -h . Usage: ddev release trello testable [OPTIONS] BASE_REF TARGET_REF Options: Name Type Description Default --milestone text The PR milestone to filter by required --dry-run , -n boolean Only show the changes False --update-rc-builds-cards boolean Update cards in RC builds column with target_ref version False --move-cards boolean Do not create a card for a change, but move the existing card from HAVE BUGS - FIXME or FIXED - Ready to Rebuild to INBOX team False --help boolean Show this message and exit. False","title":"testable"},{"location":"ddev/cli/#ddev-release-trello-update-rc-links","text":"Update links to RCs in the QA board Trello cards Usage: ddev release trello update-rc-links [OPTIONS] TARGET_REF Options: Name Type Description Default --help boolean Show this message and exit. False","title":"update-rc-links"},{"location":"ddev/cli/#ddev-release-upload","text":"Release a specific check to PyPI as it is on the repo HEAD. Usage: ddev release upload [OPTIONS] CHECK Options: Name Type Description Default --sdist , -s boolean N/A False --dry-run , -n boolean N/A False --help boolean Show this message and exit. False","title":"upload"},{"location":"ddev/cli/#ddev-run","text":"Run commands in the proper repo. Usage: ddev run [OPTIONS] [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"run"},{"location":"ddev/cli/#ddev-test","text":"Run tests for Agent-based checks. If no checks are specified, this will only test checks that were changed compared to the master branch. You can also select specific comma-separated environments to test like so: $ ddev test mysql:mysql57,maria10130 Usage: ddev test [OPTIONS] [CHECKS]... Options: Name Type Description Default --format-style , -fs boolean Run only the code style formatter False --style , -s boolean Run only style checks False --bench , -b boolean Run only benchmarks False --latest-metrics boolean Only verify support of new metrics False --e2e boolean Run only end-to-end tests False --ddtrace boolean Run tests using dd-trace-py False --cov , -c boolean Measure code coverage False --cov-missing , -cm boolean Show line numbers of statements that were not executed False --junit , -j boolean Generate junit reports False --marker , -m text Only run tests matching given marker expression required --filter , -k text Only run tests matching given substring expression required --pdb boolean Drop to PDB on first failure, then end test session False --debug , -d boolean Set the log level to debug False --verbose , -v integer range ( 0 and above) Increase verbosity (can be used additively) 0 --list , -l boolean List available test environments False --passenv text Additional environment variables to pass down required --changed boolean Only test changed checks False --cov-keep boolean Keep coverage reports False --skip-env boolean Skip environment creation and assume it is already running False --pytest-args , -pa text Additional arguments to pytest required --force-base-unpinned boolean Force using datadog-checks-base as specified by check dep False --force-base-min boolean Force using lowest viable release version of datadog-checks-base False --force-env-rebuild boolean Force creating a new env False --help boolean Show this message and exit. False","title":"test"},{"location":"ddev/cli/#ddev-validate","text":"Verify certain aspects of the repo Usage: ddev validate [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"validate"},{"location":"ddev/cli/#ddev-validate-agent-reqs","text":"Verify that the checks versions are in sync with the requirements-agent-release.txt file. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate agent-reqs [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"agent-reqs"},{"location":"ddev/cli/#ddev-validate-all","text":"Run all CI validations for a repo. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate all [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"all"},{"location":"ddev/cli/#ddev-validate-ci","text":"Validate CI infrastructure configuration. Usage: ddev validate ci [OPTIONS] Options: Name Type Description Default --fix boolean Attempt to fix errors False --help boolean Show this message and exit. False","title":"ci"},{"location":"ddev/cli/#ddev-validate-codeowners","text":"Validate that every integration has an entry in the CODEOWNERS file. Usage: ddev validate codeowners [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"codeowners"},{"location":"ddev/cli/#ddev-validate-config","text":"Validate default configuration files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate config [OPTIONS] [CHECK] Options: Name Type Description Default --sync , -s boolean Generate example configuration files based on specifications False --verbose , -v boolean Verbose mode False --help boolean Show this message and exit. False","title":"config"},{"location":"ddev/cli/#ddev-validate-dashboards","text":"Validate all Dashboard definition files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate dashboards [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"dashboards"},{"location":"ddev/cli/#ddev-validate-dep","text":"This command will: Verify the uniqueness of dependency versions across all checks, or optionally a single check Verify all the dependencies are pinned. Verify the embedded Python environment defined in the base check and requirements listed in every integration are compatible. Verify each check specifies a CHECKS_BASE_REQ variable for datadog-checks-base requirement Optionally verify that the datadog-checks-base requirement is lower-bounded Optionally verify that the datadog-checks-base requirement satisfies specific version Usage: ddev validate dep [OPTIONS] [CHECK] Options: Name Type Description Default --require-base-check-version boolean Require specific version for datadog-checks-base requirement False --min-base-check-version text Specify minimum version for datadog-checks-base requirement, e.g. 11.0.0 required --help boolean Show this message and exit. False","title":"dep"},{"location":"ddev/cli/#ddev-validate-eula","text":"Validate all EULA definition files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate eula [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"eula"},{"location":"ddev/cli/#ddev-validate-http","text":"Validate all integrations for usage of http wrapper. Usage: ddev validate http [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"http"},{"location":"ddev/cli/#ddev-validate-imports","text":"Validate proper imports in checks. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate imports [OPTIONS] [CHECK] Options: Name Type Description Default --autofix boolean Apply suggested fix False --help boolean Show this message and exit. False","title":"imports"},{"location":"ddev/cli/#ddev-validate-jmx-metrics","text":"Validate all default JMX metrics definitions. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate jmx-metrics [OPTIONS] [CHECK] Options: Name Type Description Default --verbose , -v boolean Verbose mode False --help boolean Show this message and exit. False","title":"jmx-metrics"},{"location":"ddev/cli/#ddev-validate-legacy-signature","text":"Validate that no integration uses the legacy signature. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate legacy-signature [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"legacy-signature"},{"location":"ddev/cli/#ddev-validate-licenses","text":"Validate third-party license list. Usage: ddev validate licenses [OPTIONS] Options: Name Type Description Default --sync , -s boolean Generate the LICENSE-3rdparty.csv file False --help boolean Show this message and exit. False","title":"licenses"},{"location":"ddev/cli/#ddev-validate-manifest","text":"Validate manifest.json files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate manifest [OPTIONS] [CHECK] Options: Name Type Description Default --fix boolean Attempt to fix errors False --help boolean Show this message and exit. False","title":"manifest"},{"location":"ddev/cli/#ddev-validate-metadata","text":"Validates metadata.csv files If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate metadata [OPTIONS] [CHECK] Options: Name Type Description Default --check-duplicates boolean Output warnings if there are duplicate short names and descriptions False --show-warnings , -w boolean Show warnings in addition to failures False --help boolean Show this message and exit. False","title":"metadata"},{"location":"ddev/cli/#ddev-validate-models","text":"Validate configuration data models. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate models [OPTIONS] [CHECK] Options: Name Type Description Default --sync , -s boolean Generate data models based on specifications False --verbose , -v boolean Verbose mode False --help boolean Show this message and exit. False","title":"models"},{"location":"ddev/cli/#ddev-validate-package","text":"Validate all setup.py files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate package [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"package"},{"location":"ddev/cli/#ddev-validate-readmes","text":"Validates README files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate readmes [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"readmes"},{"location":"ddev/cli/#ddev-validate-recommended-monitors","text":"Validate all recommended monitors definition files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate recommended-monitors [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"recommended-monitors"},{"location":"ddev/cli/#ddev-validate-saved-views","text":"Validates saved view files If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate saved-views [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"saved-views"},{"location":"ddev/cli/#ddev-validate-service-checks","text":"Validate all service_checks.json files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate service-checks [OPTIONS] [CHECK] Options: Name Type Description Default --sync boolean Generate example configuration files based on specifications False --help boolean Show this message and exit. False","title":"service-checks"},{"location":"ddev/configuration/","text":"Configuration \u00b6 All configuration can be managed entirely by the ddev config command group. To locate the TOML config file, run: ddev config find Repository \u00b6 All CLI commands are aware of the current repository context, defined by the option repo . This option should be a reference to a key in repos which is set to the path of a supported repository. For example, this configuration: repo = \"core\" [repos] core = \"/path/to/integrations-core\" extras = \"/path/to/integrations-extras\" agent = \"/path/to/datadog-agent\" would make it so running e.g. ddev test nginx will look for an integration named nginx in /path/to/integrations-core no matter what directory you are in. If the selected path does not exist, then the current directory will be used. By default, repo is set to core . Agent \u00b6 For running environments with a live Agent , you can select a specific build version to use with the option agent . This option should be a reference to a key in agents which is a mapping of environment types to Agent versions. For example, this configuration: agent = \"master\" [agents.master] docker = \"datadog/agent-dev:master\" local = \"latest\" [agents.\"7.18.1\"] docker = \"datadog/agent:7.18.1\" local = \"7.18.1\" would make it so environments that define the type as docker will use the Docker image that was built with the latest commit to the datadog-agent repo. Organization \u00b6 You can switch to using a particular organization with the option org . This option should be a reference to a key in orgs which is a mapping containing data specific to the organization. For example, this configuration: org = \"staging\" [orgs.staging] api_key = \"\" app_key = \"\" site = \"datadoghq.eu\" would use the access keys for the organization named staging and would submit data to the EU region. The supported fields are: api_key app_key site dd_url log_url GitHub \u00b6 To avoid GitHub's public API rate limits, you need to set github.user / github.token in your config file or use the DD_GITHUB_USER / DD_GITHUB_TOKEN environment variables. Run ddev config show to see if your GitHub user and token is set. If not: Run ddev config set github.user Create a personal access token with public_repo and read:org permissions Run ddev config set github.token then paste the token Enable single sign-on for the token Trello \u00b6 To participate as an Agent release manager , you need to set trello.key / trello.token in your config file. Run ddev config show to see if your Trello key and token is set. If not: Go to https://trello.com/app-key and copy your API key Run ddev config set trello.key then paste your API key Go to https://trello.com/1/authorize?key=&name=&scope=read,write&expiration=never&response_type=token , where is your API key and is the name to give your token, e.g. ReleaseTestingYourName . Authorize access and copy your token. Run ddev config set trello.token and paste your token Card Assignment \u00b6 When automatically assigning QA cards , the Trello users which are members of the Agent Release Sprint Trello board will be fetched and cards will be assigned at random to them. Make sure people in your team are all members of the Agent Release Sprint board.","title":"Configuration"},{"location":"ddev/configuration/#configuration","text":"All configuration can be managed entirely by the ddev config command group. To locate the TOML config file, run: ddev config find","title":"Configuration"},{"location":"ddev/configuration/#repository","text":"All CLI commands are aware of the current repository context, defined by the option repo . This option should be a reference to a key in repos which is set to the path of a supported repository. For example, this configuration: repo = \"core\" [repos] core = \"/path/to/integrations-core\" extras = \"/path/to/integrations-extras\" agent = \"/path/to/datadog-agent\" would make it so running e.g. ddev test nginx will look for an integration named nginx in /path/to/integrations-core no matter what directory you are in. If the selected path does not exist, then the current directory will be used. By default, repo is set to core .","title":"Repository"},{"location":"ddev/configuration/#agent","text":"For running environments with a live Agent , you can select a specific build version to use with the option agent . This option should be a reference to a key in agents which is a mapping of environment types to Agent versions. For example, this configuration: agent = \"master\" [agents.master] docker = \"datadog/agent-dev:master\" local = \"latest\" [agents.\"7.18.1\"] docker = \"datadog/agent:7.18.1\" local = \"7.18.1\" would make it so environments that define the type as docker will use the Docker image that was built with the latest commit to the datadog-agent repo.","title":"Agent"},{"location":"ddev/configuration/#organization","text":"You can switch to using a particular organization with the option org . This option should be a reference to a key in orgs which is a mapping containing data specific to the organization. For example, this configuration: org = \"staging\" [orgs.staging] api_key = \"\" app_key = \"\" site = \"datadoghq.eu\" would use the access keys for the organization named staging and would submit data to the EU region. The supported fields are: api_key app_key site dd_url log_url","title":"Organization"},{"location":"ddev/configuration/#github","text":"To avoid GitHub's public API rate limits, you need to set github.user / github.token in your config file or use the DD_GITHUB_USER / DD_GITHUB_TOKEN environment variables. Run ddev config show to see if your GitHub user and token is set. If not: Run ddev config set github.user Create a personal access token with public_repo and read:org permissions Run ddev config set github.token then paste the token Enable single sign-on for the token","title":"GitHub"},{"location":"ddev/configuration/#trello","text":"To participate as an Agent release manager , you need to set trello.key / trello.token in your config file. Run ddev config show to see if your Trello key and token is set. If not: Go to https://trello.com/app-key and copy your API key Run ddev config set trello.key then paste your API key Go to https://trello.com/1/authorize?key=&name=&scope=read,write&expiration=never&response_type=token , where is your API key and is the name to give your token, e.g. ReleaseTestingYourName . Authorize access and copy your token. Run ddev config set trello.token and paste your token","title":"Trello"},{"location":"ddev/configuration/#card-assignment","text":"When automatically assigning QA cards , the Trello users which are members of the Agent Release Sprint Trello board will be fetched and cards will be assigned at random to them. Make sure people in your team are all members of the Agent Release Sprint board.","title":"Card Assignment"},{"location":"ddev/plugins/","text":"Plugins \u00b6 tox \u00b6 Our tox plugin dynamically adds environments based on the presence of options defined in the [testenv] section of each integration's tox.ini file. Style \u00b6 Setting dd_check_style to true will enable 2 environments for enforcing our style conventions : style - This will check the formatting and will error if any issues are found. You may use the -s/--style flag of ddev test to execute only this environment. format_style - This will format the code for you, resolving the most common issues caught by style environment. You can run the formatter by using the -fs/--format-style flag of ddev test . pytest \u00b6 Our pytest plugin makes a few fixtures available globally for use during tests. Also, it's responsible for managing the control flow of E2E environments. Fixtures \u00b6 Agent stubs \u00b6 The stubs provided by each fixture will automatically have their state reset before each test. aggregator datadog_agent Check execution \u00b6 Most tests will execute checks via the run method of the AgentCheck interface (if the check is stateful ). A consequence of this is that, unlike the check method, exceptions are not propagated to the caller meaning not only can an exception not be asserted, but also errors are silently ignored. The dd_run_check fixture takes a check instance and executes it while also propagating any exceptions like normal. def test_metrics ( aggregator , dd_run_check ): check = AwesomeCheck ( 'awesome' , {}, [{ 'port' : 8080 }]) dd_run_check ( check ) ... You can use the extract_message option to condense any exception message to just the original message rather than the full traceback. def test_config ( dd_run_check ): check = AwesomeCheck ( 'awesome' , {}, [{ 'port' : 'foo' }]) with pytest . raises ( Exception , match = '^Option `port` must be an integer$' ): dd_run_check ( check , extract_message = True ) E2E \u00b6 Agent check runner \u00b6 The dd_agent_check fixture will run the integration with a given configuration on a live Agent and return a populated aggregator . It accepts a single dict configuration representing either: a single instance a full configuration with top level keys instances , init_config , etc. Internally, this is a wrapper around ddev env check and you can pass through any supported options or flags. This fixture can only be used from tests marked as e2e . For example: @pytest . mark . e2e def test_e2e_metrics ( dd_agent_check , instance ): aggregator = dd_agent_check ( instance , rate = True ) ... State \u00b6 Occasionally, you will need to persist some data only known at the time of environment creation (like a generated token) through the test and environment tear down phases. To do so, use the following fixtures: dd_save_state - When executing the necessary steps to spin up an environment you may use this to save any object that can be serialized to JSON. For example: dd_save_state ( 'my_data' , { 'foo' : 'bar' }) dd_get_state - This may be used to retrieve the data: my_data = dd_get_state ( 'my_data' , default = {}) Environment manager \u00b6 The fixture dd_environment_runner manages communication between environments and the ddev env command group. You will never use it directly as it runs automatically. It acts upon a fixture named dd_environment that every integration's test suite will define if E2E testing on a live Agent is desired. This fixture is responsible for starting and stopping environments and must adhere to the following requirements: It yield s a single dict representing the default configuration the Agent will use. It must be either: a single instance a full configuration with top level keys instances , init_config , etc. Additionally, you can pass a second dict containing metadata . The setup logic must occur before the yield and the tear down logic must occur after it. Also, both steps must only execute based on the value of environment variables. Setup - only if DDEV_E2E_UP is not set to false Tear down - only if DDEV_E2E_DOWN is not set to false Note The provided Docker and Terraform environment runner utilities will do this automatically for you. Metadata \u00b6 env_type - This is the type of interface that will be used to interact with the Agent. Currently, we support docker (default) and local . env_vars - A dict of environment variables and their values that will be present when starting the Agent. docker_volumes - A list of str representing Docker volume mounts if env_type is docker e.g. /local/path:/agent/container/path:ro . docker_platform - The container architecture to use if env_type is docker . Currently, we support linux (default) and windows . logs_config - A list of configs that will be used by the Logs Agent. You will never need to use this directly, but rather via higher level abstractions .","title":"Plugins"},{"location":"ddev/plugins/#plugins","text":"","title":"Plugins"},{"location":"ddev/plugins/#tox","text":"Our tox plugin dynamically adds environments based on the presence of options defined in the [testenv] section of each integration's tox.ini file.","title":"tox"},{"location":"ddev/plugins/#style","text":"Setting dd_check_style to true will enable 2 environments for enforcing our style conventions : style - This will check the formatting and will error if any issues are found. You may use the -s/--style flag of ddev test to execute only this environment. format_style - This will format the code for you, resolving the most common issues caught by style environment. You can run the formatter by using the -fs/--format-style flag of ddev test .","title":"Style"},{"location":"ddev/plugins/#pytest","text":"Our pytest plugin makes a few fixtures available globally for use during tests. Also, it's responsible for managing the control flow of E2E environments.","title":"pytest"},{"location":"ddev/plugins/#fixtures","text":"","title":"Fixtures"},{"location":"ddev/plugins/#agent-stubs","text":"The stubs provided by each fixture will automatically have their state reset before each test. aggregator datadog_agent","title":"Agent stubs"},{"location":"ddev/plugins/#check-execution","text":"Most tests will execute checks via the run method of the AgentCheck interface (if the check is stateful ). A consequence of this is that, unlike the check method, exceptions are not propagated to the caller meaning not only can an exception not be asserted, but also errors are silently ignored. The dd_run_check fixture takes a check instance and executes it while also propagating any exceptions like normal. def test_metrics ( aggregator , dd_run_check ): check = AwesomeCheck ( 'awesome' , {}, [{ 'port' : 8080 }]) dd_run_check ( check ) ... You can use the extract_message option to condense any exception message to just the original message rather than the full traceback. def test_config ( dd_run_check ): check = AwesomeCheck ( 'awesome' , {}, [{ 'port' : 'foo' }]) with pytest . raises ( Exception , match = '^Option `port` must be an integer$' ): dd_run_check ( check , extract_message = True )","title":"Check execution"},{"location":"ddev/plugins/#e2e","text":"","title":"E2E"},{"location":"ddev/plugins/#agent-check-runner","text":"The dd_agent_check fixture will run the integration with a given configuration on a live Agent and return a populated aggregator . It accepts a single dict configuration representing either: a single instance a full configuration with top level keys instances , init_config , etc. Internally, this is a wrapper around ddev env check and you can pass through any supported options or flags. This fixture can only be used from tests marked as e2e . For example: @pytest . mark . e2e def test_e2e_metrics ( dd_agent_check , instance ): aggregator = dd_agent_check ( instance , rate = True ) ...","title":"Agent check runner"},{"location":"ddev/plugins/#state","text":"Occasionally, you will need to persist some data only known at the time of environment creation (like a generated token) through the test and environment tear down phases. To do so, use the following fixtures: dd_save_state - When executing the necessary steps to spin up an environment you may use this to save any object that can be serialized to JSON. For example: dd_save_state ( 'my_data' , { 'foo' : 'bar' }) dd_get_state - This may be used to retrieve the data: my_data = dd_get_state ( 'my_data' , default = {})","title":"State"},{"location":"ddev/plugins/#environment-manager","text":"The fixture dd_environment_runner manages communication between environments and the ddev env command group. You will never use it directly as it runs automatically. It acts upon a fixture named dd_environment that every integration's test suite will define if E2E testing on a live Agent is desired. This fixture is responsible for starting and stopping environments and must adhere to the following requirements: It yield s a single dict representing the default configuration the Agent will use. It must be either: a single instance a full configuration with top level keys instances , init_config , etc. Additionally, you can pass a second dict containing metadata . The setup logic must occur before the yield and the tear down logic must occur after it. Also, both steps must only execute based on the value of environment variables. Setup - only if DDEV_E2E_UP is not set to false Tear down - only if DDEV_E2E_DOWN is not set to false Note The provided Docker and Terraform environment runner utilities will do this automatically for you.","title":"Environment manager"},{"location":"ddev/plugins/#metadata","text":"env_type - This is the type of interface that will be used to interact with the Agent. Currently, we support docker (default) and local . env_vars - A dict of environment variables and their values that will be present when starting the Agent. docker_volumes - A list of str representing Docker volume mounts if env_type is docker e.g. /local/path:/agent/container/path:ro . docker_platform - The container architecture to use if env_type is docker . Currently, we support linux (default) and windows . logs_config - A list of configs that will be used by the Logs Agent. You will never need to use this directly, but rather via higher level abstractions .","title":"Metadata"},{"location":"ddev/test/","text":"Test framework \u00b6 Environments \u00b6 Most integrations monitor services like databases or web servers, rather than system properties like CPU usage. For such cases, you'll want to spin up an environment and gracefully tear it down when tests finish. We define all environment actions in a fixture called dd_environment that looks semantically like this: @pytest . fixture ( scope = 'session' ) def dd_environment (): try : set_up_env () yield some_default_config finally : tear_down_env () This is not only used for regular tests, but is also the basis of our E2E testing . The start command executes everything before the yield and the stop command executes everything after it. We provide a few utilities for common environment types. Docker \u00b6 The docker_run utility makes it easy to create services using docker-compose . from datadog_checks.dev import docker_run @pytest . fixture ( scope = 'session' ) def dd_environment (): with docker_run ( os . path . join ( HERE , 'docker' , 'compose.yaml' )): yield ... Read the reference for more information. Terraform \u00b6 The terraform_run utility makes it easy to create services from a directory of Terraform files. from datadog_checks.dev.terraform import terraform_run @pytest . fixture ( scope = 'session' ) def dd_environment (): with terraform_run ( os . path . join ( HERE , 'terraform' )): yield ... Currently, we only use this for services that would be too complex to setup with Docker (like OpenStack) or things that cannot be provided by Docker (like vSphere). We provide some ready-to-use cloud templates that are available for referencing by default. We prefer using GCP when possible. Terraform E2E tests are not run in our public CI as that would needlessly slow down builds. Read the reference for more information. Mocker \u00b6 The mocker fixture is provided by the pytest-mock plugin. This fixture automatically restores anything that was mocked at the end of each test and is more ergonomic to use than stacking decorators or nesting context managers. Here's an example from their docs: def test_foo ( mocker ): # all valid calls mocker . patch ( 'os.remove' ) mocker . patch . object ( os , 'listdir' , autospec = True ) mocked_isfile = mocker . patch ( 'os.path.isfile' ) It also has many other nice features, like using pytest introspection when comparing calls. Benchmarks \u00b6 The benchmark fixture is provided by the pytest-benchmark plugin. It enables the profiling of functions with the low-overhead cProfile module. It is quite useful for seeing the approximate time a given check takes to run, as well as gaining insight into any potential performance bottlenecks. You would use it like this: def test_large_payload ( benchmark , dd_run_check ): check = AwesomeCheck ( 'awesome' , {}, [ instance ]) # Run once to get any initialization out of the way. dd_run_check ( check ) benchmark ( dd_run_check , check ) To add benchmarks, define environments in tox.ini with bench somewhere in their names: [tox] ... envlist = ... bench ... [testenv:bench] By default, the test command skips all benchmark environments. To run only benchmark environments use the --bench / -b flag. The results are sorted by tottime , which is the total time spent in the given function (and excluding time made in calls to sub-functions). Logs \u00b6 We provide an easy way to utilize log collection with E2E Docker environments . Pass mount_logs=True to docker_run . This will use the logs example in the integration's config spec . For example, the following defines 2 example log files: - template : logs example : - type : file path : /var/log/apache2/access.log source : apache service : apache - type : file path : /var/log/apache2/error.log source : apache service : apache Alternatives If mount_logs is a sequence of int , only the selected indices (starting at 1) will be used. So, using the Apache example above, to only monitor the error log you would set it to [2] . In lieu of a config spec, for whatever reason, you may set mount_logs to a dict containing the standard logs key. All requested log files are available to reference as environment variables for any Docker calls as DD_LOG_ where the indices start at 1. volumes : - ${DD_LOG_1}:/usr/local/apache2/logs/access_log - ${DD_LOG_2}:/usr/local/apache2/logs/error_log When starting the environment, pass -e DD_LOGS_ENABLED=true to activate the Logs Agent. To send logs to a custom url, pass -e DD_LOGS_CONFIG_LOGS_DD_URL=[CUSTOM_URL]:[CUSTOM_PORT] when starting the environment Reference \u00b6 datadog_checks.dev.docker \u00b6 compose_file_active ( compose_file ) \u00b6 Returns a bool indicating whether or not a compose file has any active services. Source code in datadog_checks/dev/docker.py def compose_file_active ( compose_file ): \"\"\" Returns a `bool` indicating whether or not a compose file has any active services. \"\"\" command = [ 'docker-compose' , '-f' , compose_file , 'ps' ] lines = run_command ( command , capture = 'out' , check = True ) . stdout . splitlines () for i , line in enumerate ( lines , 1 ): if set ( line . strip ()) == { '-' }: return len ( lines [ i :]) >= 1 return False docker_run ( compose_file = None , build = False , service_name = None , up = None , down = None , on_error = None , sleep = None , endpoints = None , log_patterns = None , mount_logs = False , conditions = None , env_vars = None , wrappers = None , attempts = None , attempts_wait = 1 ) \u00b6 A convenient context manager for safely setting up and tearing down Docker environments. compose_file ( str ) - A path to a Docker compose file. A custom tear down is not required when using this. build ( bool ) - Whether or not to build images for when compose_file is provided service_name ( str ) - Optional name for when compose_file is provided up ( callable ) - A custom setup callable down ( callable ) - A custom tear down callable. This is required when using a custom setup. on_error ( callable ) - A callable called in case of an unhandled exception sleep ( float ) - Number of seconds to wait before yielding. This occurs after all conditions are successful. endpoints ( List[str] ) - Endpoints to verify access for before yielding. Shorthand for adding CheckEndpoints(endpoints) to the conditions argument. log_patterns ( List[str|re.Pattern] ) - Regular expression patterns to find in Docker logs before yielding. This is only available when compose_file is provided. Shorthand for adding CheckDockerLogs(compose_file, log_patterns) to the conditions argument. mount_logs ( bool ) - Whether or not to mount log files in Agent containers based on example logs configuration conditions ( callable ) - A list of callable objects that will be executed before yielding to check for errors env_vars ( dict ) - A dictionary to update os.environ with during execution wrappers ( List[callable] ) - A list of context managers to use during execution attempts ( int ) - Number of attempts to run up successfully attempts_wait ( int ) - Time to wait between attempts Source code in datadog_checks/dev/docker.py @contextmanager def docker_run ( compose_file = None , build = False , service_name = None , up = None , down = None , on_error = None , sleep = None , endpoints = None , log_patterns = None , mount_logs = False , conditions = None , env_vars = None , wrappers = None , attempts = None , attempts_wait = 1 , ): \"\"\" A convenient context manager for safely setting up and tearing down Docker environments. - **compose_file** (_str_) - A path to a Docker compose file. A custom tear down is not required when using this. - **build** (_bool_) - Whether or not to build images for when `compose_file` is provided - **service_name** (_str_) - Optional name for when ``compose_file`` is provided - **up** (_callable_) - A custom setup callable - **down** (_callable_) - A custom tear down callable. This is required when using a custom setup. - **on_error** (_callable_) - A callable called in case of an unhandled exception - **sleep** (_float_) - Number of seconds to wait before yielding. This occurs after all conditions are successful. - **endpoints** (_List[str]_) - Endpoints to verify access for before yielding. Shorthand for adding `CheckEndpoints(endpoints)` to the `conditions` argument. - **log_patterns** (_List[str|re.Pattern]_) - Regular expression patterns to find in Docker logs before yielding. This is only available when `compose_file` is provided. Shorthand for adding `CheckDockerLogs(compose_file, log_patterns)` to the `conditions` argument. - **mount_logs** (_bool_) - Whether or not to mount log files in Agent containers based on example logs configuration - **conditions** (_callable_) - A list of callable objects that will be executed before yielding to check for errors - **env_vars** (_dict_) - A dictionary to update `os.environ` with during execution - **wrappers** (_List[callable]_) - A list of context managers to use during execution - **attempts** (_int_) - Number of attempts to run `up` successfully - **attempts_wait** (_int_) - Time to wait between attempts \"\"\" if compose_file and up : raise TypeError ( 'You must select either a compose file or a custom setup callable, not both.' ) if compose_file is not None : if not isinstance ( compose_file , string_types ): raise TypeError ( 'The path to the compose file is not a string: {} ' . format ( repr ( compose_file ))) set_up = ComposeFileUp ( compose_file , build = build , service_name = service_name ) if down is not None : tear_down = down else : tear_down = ComposeFileDown ( compose_file ) if on_error is None : on_error = ComposeFileLogs ( compose_file ) else : set_up = up tear_down = down if attempts is not None : saved_set_up = set_up @retry ( wait = wait_fixed ( attempts_wait ), stop = stop_after_attempt ( attempts )) def set_up_with_retry (): return saved_set_up () set_up = set_up_with_retry docker_conditions = [] if log_patterns is not None : if compose_file is None : raise ValueError ( 'The `log_patterns` convenience is unavailable when using ' 'a custom setup. Please use a custom condition instead.' ) docker_conditions . append ( CheckDockerLogs ( compose_file , log_patterns )) if conditions is not None : docker_conditions . extend ( conditions ) wrappers = list ( wrappers ) if wrappers is not None else [] if mount_logs : if isinstance ( mount_logs , dict ): wrappers . append ( shared_logs ( mount_logs [ 'logs' ])) # Easy mode, read example config else : # An extra level deep because of the context manager check_root = find_check_root ( depth = 2 ) example_log_configs = _read_example_logs_config ( check_root ) if mount_logs is True : wrappers . append ( shared_logs ( example_log_configs )) elif isinstance ( mount_logs , ( list , set )): wrappers . append ( shared_logs ( example_log_configs , mount_whitelist = mount_logs )) else : raise TypeError ( 'mount_logs: expected True, a list or a set, but got {} ' . format ( type ( mount_logs ) . __name__ ) ) with environment_run ( up = set_up , down = tear_down , on_error = on_error , sleep = sleep , endpoints = endpoints , conditions = docker_conditions , env_vars = env_vars , wrappers = wrappers , ) as result : yield result get_container_ip ( container_id_or_name ) \u00b6 Get a Docker container's IP address from its ID or name. Source code in datadog_checks/dev/docker.py def get_container_ip ( container_id_or_name ): \"\"\" Get a Docker container's IP address from its ID or name. \"\"\" command = [ 'docker' , 'inspect' , '-f' , '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' , container_id_or_name , ] return run_command ( command , capture = 'out' , check = True ) . stdout . strip () get_docker_hostname () \u00b6 Determine the hostname Docker uses based on the environment, defaulting to localhost . Source code in datadog_checks/dev/docker.py def get_docker_hostname (): \"\"\" Determine the hostname Docker uses based on the environment, defaulting to `localhost`. \"\"\" return urlparse ( os . getenv ( 'DOCKER_HOST' , '' )) . hostname or 'localhost' datadog_checks.dev.terraform \u00b6 terraform_run ( directory , sleep = None , endpoints = None , conditions = None , env_vars = None , wrappers = None ) \u00b6 A convenient context manager for safely setting up and tearing down Terraform environments. directory ( str ) - A path containing Terraform files sleep ( float ) - Number of seconds to wait before yielding. This occurs after all conditions are successful. endpoints ( List[str] ) - Endpoints to verify access for before yielding. Shorthand for adding CheckEndpoints(endpoints) to the conditions argument. conditions ( callable ) - A list of callable objects that will be executed before yielding to check for errors env_vars ( dict ) - A dictionary to update os.environ with during execution wrappers ( List[callable] ) - A list of context managers to use during execution Source code in datadog_checks/dev/terraform.py @contextmanager def terraform_run ( directory , sleep = None , endpoints = None , conditions = None , env_vars = None , wrappers = None ): \"\"\" A convenient context manager for safely setting up and tearing down Terraform environments. - **directory** (_str_) - A path containing Terraform files - **sleep** (_float_) - Number of seconds to wait before yielding. This occurs after all conditions are successful. - **endpoints** (_List[str]_) - Endpoints to verify access for before yielding. Shorthand for adding `CheckEndpoints(endpoints)` to the `conditions` argument. - **conditions** (_callable_) - A list of callable objects that will be executed before yielding to check for errors - **env_vars** (_dict_) - A dictionary to update `os.environ` with during execution - **wrappers** (_List[callable]_) - A list of context managers to use during execution \"\"\" if not which ( 'terraform' ): pytest . skip ( 'Terraform not available' ) set_up = TerraformUp ( directory ) tear_down = TerraformDown ( directory ) with environment_run ( up = set_up , down = tear_down , sleep = sleep , endpoints = endpoints , conditions = conditions , env_vars = env_vars , wrappers = wrappers , ) as result : yield result","title":"Test framework"},{"location":"ddev/test/#test-framework","text":"","title":"Test framework"},{"location":"ddev/test/#environments","text":"Most integrations monitor services like databases or web servers, rather than system properties like CPU usage. For such cases, you'll want to spin up an environment and gracefully tear it down when tests finish. We define all environment actions in a fixture called dd_environment that looks semantically like this: @pytest . fixture ( scope = 'session' ) def dd_environment (): try : set_up_env () yield some_default_config finally : tear_down_env () This is not only used for regular tests, but is also the basis of our E2E testing . The start command executes everything before the yield and the stop command executes everything after it. We provide a few utilities for common environment types.","title":"Environments"},{"location":"ddev/test/#docker","text":"The docker_run utility makes it easy to create services using docker-compose . from datadog_checks.dev import docker_run @pytest . fixture ( scope = 'session' ) def dd_environment (): with docker_run ( os . path . join ( HERE , 'docker' , 'compose.yaml' )): yield ... Read the reference for more information.","title":"Docker"},{"location":"ddev/test/#terraform","text":"The terraform_run utility makes it easy to create services from a directory of Terraform files. from datadog_checks.dev.terraform import terraform_run @pytest . fixture ( scope = 'session' ) def dd_environment (): with terraform_run ( os . path . join ( HERE , 'terraform' )): yield ... Currently, we only use this for services that would be too complex to setup with Docker (like OpenStack) or things that cannot be provided by Docker (like vSphere). We provide some ready-to-use cloud templates that are available for referencing by default. We prefer using GCP when possible. Terraform E2E tests are not run in our public CI as that would needlessly slow down builds. Read the reference for more information.","title":"Terraform"},{"location":"ddev/test/#mocker","text":"The mocker fixture is provided by the pytest-mock plugin. This fixture automatically restores anything that was mocked at the end of each test and is more ergonomic to use than stacking decorators or nesting context managers. Here's an example from their docs: def test_foo ( mocker ): # all valid calls mocker . patch ( 'os.remove' ) mocker . patch . object ( os , 'listdir' , autospec = True ) mocked_isfile = mocker . patch ( 'os.path.isfile' ) It also has many other nice features, like using pytest introspection when comparing calls.","title":"Mocker"},{"location":"ddev/test/#benchmarks","text":"The benchmark fixture is provided by the pytest-benchmark plugin. It enables the profiling of functions with the low-overhead cProfile module. It is quite useful for seeing the approximate time a given check takes to run, as well as gaining insight into any potential performance bottlenecks. You would use it like this: def test_large_payload ( benchmark , dd_run_check ): check = AwesomeCheck ( 'awesome' , {}, [ instance ]) # Run once to get any initialization out of the way. dd_run_check ( check ) benchmark ( dd_run_check , check ) To add benchmarks, define environments in tox.ini with bench somewhere in their names: [tox] ... envlist = ... bench ... [testenv:bench] By default, the test command skips all benchmark environments. To run only benchmark environments use the --bench / -b flag. The results are sorted by tottime , which is the total time spent in the given function (and excluding time made in calls to sub-functions).","title":"Benchmarks"},{"location":"ddev/test/#logs","text":"We provide an easy way to utilize log collection with E2E Docker environments . Pass mount_logs=True to docker_run . This will use the logs example in the integration's config spec . For example, the following defines 2 example log files: - template : logs example : - type : file path : /var/log/apache2/access.log source : apache service : apache - type : file path : /var/log/apache2/error.log source : apache service : apache Alternatives If mount_logs is a sequence of int , only the selected indices (starting at 1) will be used. So, using the Apache example above, to only monitor the error log you would set it to [2] . In lieu of a config spec, for whatever reason, you may set mount_logs to a dict containing the standard logs key. All requested log files are available to reference as environment variables for any Docker calls as DD_LOG_ where the indices start at 1. volumes : - ${DD_LOG_1}:/usr/local/apache2/logs/access_log - ${DD_LOG_2}:/usr/local/apache2/logs/error_log When starting the environment, pass -e DD_LOGS_ENABLED=true to activate the Logs Agent. To send logs to a custom url, pass -e DD_LOGS_CONFIG_LOGS_DD_URL=[CUSTOM_URL]:[CUSTOM_PORT] when starting the environment","title":"Logs"},{"location":"ddev/test/#reference","text":"","title":"Reference"},{"location":"ddev/test/#datadog_checks.dev.docker","text":"","title":"docker"},{"location":"ddev/test/#datadog_checks.dev.docker.compose_file_active","text":"Returns a bool indicating whether or not a compose file has any active services. Source code in datadog_checks/dev/docker.py def compose_file_active ( compose_file ): \"\"\" Returns a `bool` indicating whether or not a compose file has any active services. \"\"\" command = [ 'docker-compose' , '-f' , compose_file , 'ps' ] lines = run_command ( command , capture = 'out' , check = True ) . stdout . splitlines () for i , line in enumerate ( lines , 1 ): if set ( line . strip ()) == { '-' }: return len ( lines [ i :]) >= 1 return False","title":"compose_file_active()"},{"location":"ddev/test/#datadog_checks.dev.docker.docker_run","text":"A convenient context manager for safely setting up and tearing down Docker environments. compose_file ( str ) - A path to a Docker compose file. A custom tear down is not required when using this. build ( bool ) - Whether or not to build images for when compose_file is provided service_name ( str ) - Optional name for when compose_file is provided up ( callable ) - A custom setup callable down ( callable ) - A custom tear down callable. This is required when using a custom setup. on_error ( callable ) - A callable called in case of an unhandled exception sleep ( float ) - Number of seconds to wait before yielding. This occurs after all conditions are successful. endpoints ( List[str] ) - Endpoints to verify access for before yielding. Shorthand for adding CheckEndpoints(endpoints) to the conditions argument. log_patterns ( List[str|re.Pattern] ) - Regular expression patterns to find in Docker logs before yielding. This is only available when compose_file is provided. Shorthand for adding CheckDockerLogs(compose_file, log_patterns) to the conditions argument. mount_logs ( bool ) - Whether or not to mount log files in Agent containers based on example logs configuration conditions ( callable ) - A list of callable objects that will be executed before yielding to check for errors env_vars ( dict ) - A dictionary to update os.environ with during execution wrappers ( List[callable] ) - A list of context managers to use during execution attempts ( int ) - Number of attempts to run up successfully attempts_wait ( int ) - Time to wait between attempts Source code in datadog_checks/dev/docker.py @contextmanager def docker_run ( compose_file = None , build = False , service_name = None , up = None , down = None , on_error = None , sleep = None , endpoints = None , log_patterns = None , mount_logs = False , conditions = None , env_vars = None , wrappers = None , attempts = None , attempts_wait = 1 , ): \"\"\" A convenient context manager for safely setting up and tearing down Docker environments. - **compose_file** (_str_) - A path to a Docker compose file. A custom tear down is not required when using this. - **build** (_bool_) - Whether or not to build images for when `compose_file` is provided - **service_name** (_str_) - Optional name for when ``compose_file`` is provided - **up** (_callable_) - A custom setup callable - **down** (_callable_) - A custom tear down callable. This is required when using a custom setup. - **on_error** (_callable_) - A callable called in case of an unhandled exception - **sleep** (_float_) - Number of seconds to wait before yielding. This occurs after all conditions are successful. - **endpoints** (_List[str]_) - Endpoints to verify access for before yielding. Shorthand for adding `CheckEndpoints(endpoints)` to the `conditions` argument. - **log_patterns** (_List[str|re.Pattern]_) - Regular expression patterns to find in Docker logs before yielding. This is only available when `compose_file` is provided. Shorthand for adding `CheckDockerLogs(compose_file, log_patterns)` to the `conditions` argument. - **mount_logs** (_bool_) - Whether or not to mount log files in Agent containers based on example logs configuration - **conditions** (_callable_) - A list of callable objects that will be executed before yielding to check for errors - **env_vars** (_dict_) - A dictionary to update `os.environ` with during execution - **wrappers** (_List[callable]_) - A list of context managers to use during execution - **attempts** (_int_) - Number of attempts to run `up` successfully - **attempts_wait** (_int_) - Time to wait between attempts \"\"\" if compose_file and up : raise TypeError ( 'You must select either a compose file or a custom setup callable, not both.' ) if compose_file is not None : if not isinstance ( compose_file , string_types ): raise TypeError ( 'The path to the compose file is not a string: {} ' . format ( repr ( compose_file ))) set_up = ComposeFileUp ( compose_file , build = build , service_name = service_name ) if down is not None : tear_down = down else : tear_down = ComposeFileDown ( compose_file ) if on_error is None : on_error = ComposeFileLogs ( compose_file ) else : set_up = up tear_down = down if attempts is not None : saved_set_up = set_up @retry ( wait = wait_fixed ( attempts_wait ), stop = stop_after_attempt ( attempts )) def set_up_with_retry (): return saved_set_up () set_up = set_up_with_retry docker_conditions = [] if log_patterns is not None : if compose_file is None : raise ValueError ( 'The `log_patterns` convenience is unavailable when using ' 'a custom setup. Please use a custom condition instead.' ) docker_conditions . append ( CheckDockerLogs ( compose_file , log_patterns )) if conditions is not None : docker_conditions . extend ( conditions ) wrappers = list ( wrappers ) if wrappers is not None else [] if mount_logs : if isinstance ( mount_logs , dict ): wrappers . append ( shared_logs ( mount_logs [ 'logs' ])) # Easy mode, read example config else : # An extra level deep because of the context manager check_root = find_check_root ( depth = 2 ) example_log_configs = _read_example_logs_config ( check_root ) if mount_logs is True : wrappers . append ( shared_logs ( example_log_configs )) elif isinstance ( mount_logs , ( list , set )): wrappers . append ( shared_logs ( example_log_configs , mount_whitelist = mount_logs )) else : raise TypeError ( 'mount_logs: expected True, a list or a set, but got {} ' . format ( type ( mount_logs ) . __name__ ) ) with environment_run ( up = set_up , down = tear_down , on_error = on_error , sleep = sleep , endpoints = endpoints , conditions = docker_conditions , env_vars = env_vars , wrappers = wrappers , ) as result : yield result","title":"docker_run()"},{"location":"ddev/test/#datadog_checks.dev.docker.get_container_ip","text":"Get a Docker container's IP address from its ID or name. Source code in datadog_checks/dev/docker.py def get_container_ip ( container_id_or_name ): \"\"\" Get a Docker container's IP address from its ID or name. \"\"\" command = [ 'docker' , 'inspect' , '-f' , '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' , container_id_or_name , ] return run_command ( command , capture = 'out' , check = True ) . stdout . strip ()","title":"get_container_ip()"},{"location":"ddev/test/#datadog_checks.dev.docker.get_docker_hostname","text":"Determine the hostname Docker uses based on the environment, defaulting to localhost . Source code in datadog_checks/dev/docker.py def get_docker_hostname (): \"\"\" Determine the hostname Docker uses based on the environment, defaulting to `localhost`. \"\"\" return urlparse ( os . getenv ( 'DOCKER_HOST' , '' )) . hostname or 'localhost'","title":"get_docker_hostname()"},{"location":"ddev/test/#datadog_checks.dev.terraform","text":"","title":"terraform"},{"location":"ddev/test/#datadog_checks.dev.terraform.terraform_run","text":"A convenient context manager for safely setting up and tearing down Terraform environments. directory ( str ) - A path containing Terraform files sleep ( float ) - Number of seconds to wait before yielding. This occurs after all conditions are successful. endpoints ( List[str] ) - Endpoints to verify access for before yielding. Shorthand for adding CheckEndpoints(endpoints) to the conditions argument. conditions ( callable ) - A list of callable objects that will be executed before yielding to check for errors env_vars ( dict ) - A dictionary to update os.environ with during execution wrappers ( List[callable] ) - A list of context managers to use during execution Source code in datadog_checks/dev/terraform.py @contextmanager def terraform_run ( directory , sleep = None , endpoints = None , conditions = None , env_vars = None , wrappers = None ): \"\"\" A convenient context manager for safely setting up and tearing down Terraform environments. - **directory** (_str_) - A path containing Terraform files - **sleep** (_float_) - Number of seconds to wait before yielding. This occurs after all conditions are successful. - **endpoints** (_List[str]_) - Endpoints to verify access for before yielding. Shorthand for adding `CheckEndpoints(endpoints)` to the `conditions` argument. - **conditions** (_callable_) - A list of callable objects that will be executed before yielding to check for errors - **env_vars** (_dict_) - A dictionary to update `os.environ` with during execution - **wrappers** (_List[callable]_) - A list of context managers to use during execution \"\"\" if not which ( 'terraform' ): pytest . skip ( 'Terraform not available' ) set_up = TerraformUp ( directory ) tear_down = TerraformDown ( directory ) with environment_run ( up = set_up , down = tear_down , sleep = sleep , endpoints = endpoints , conditions = conditions , env_vars = env_vars , wrappers = wrappers , ) as result : yield result","title":"terraform_run()"},{"location":"faq/acknowledgements/","text":"Acknowledgements \u00b6 This is not meant to be an exhaustive list of all the things we use, but rather a token of appreciation for the services and open source software we publicly benefit from. Base \u00b6 The Python programming language , the default language of Agent Integrations, enables us and contributors to think about problems abstractly and express intent as clearly and concisely as possible. Dependencies \u00b6 We would be unable to move as fast as we do without the massive ecosystem of established software others have built. If you've contributed to one of the following projects, thank you! Your code is deployed on many systems and devices across the world. We stand on the shoulders of giants. Dependencies Core adodbapi aerospike aws-requests-auth beautifulsoup4 binary boto boto3 botocore cachetools clickhouse-cityhash clickhouse-driver contextlib2 cryptography cx-oracle ddtrace dnspython enum34 flup flup-py3 futures gearman immutables in-toto ipaddress jaydebeapi jpype1 kafka-python kazoo kubernetes ldap3 lxml lz4 mmh3 openstacksdk orjson paramiko ply prometheus-client protobuf psutil psycopg2-binary pyasn1 pycryptodomex pydantic pyhdb pyjwt pymongo pymqi pymysql pyodbc pyro4 pysmi pysnmp pysnmp-mibs pysocks python-binary-memcached python-dateutil python3-gearman pyvmomi pywin32 pyyaml redis requests requests-kerberos requests-unixsocket requests_ntlm requests_toolbelt rethinkdb scandir securesystemslib selectors34 semver serpent service_identity simplejson six snowflake-connector-python supervisor tuf typing uptime vertica-python win-inet-pton Other Rick Hosting \u00b6 A huge thanks to everyone involved in maintaining PyPI . We rely on it for providing all dependencies for not only tests, but also all Datadog Agent deployments. Documentation \u00b6 MkDocs provides us with powerful and extensible static site generation capabilities, leading to an equally impressive community around it. The Material for MkDocs theme allows us to create beautiful documentation with cross-browser and mobile support. PyMdown Extensions gives us the ability to use advanced HTML, CSS, and JavaScript functionality with simple, easy to use Markdown. CI/CD \u00b6 Azure Pipelines is used for testing all Agent Integrations. A special shout-out to Microsoft for being extremely generous with our allowance of parallel runners; only they were able to meet the requirements of our unique monorepo. GitHub Actions is used for all repository automation, like documentation deployment and pull request labeling.","title":"Acknowledgements"},{"location":"faq/acknowledgements/#acknowledgements","text":"This is not meant to be an exhaustive list of all the things we use, but rather a token of appreciation for the services and open source software we publicly benefit from.","title":"Acknowledgements"},{"location":"faq/acknowledgements/#base","text":"The Python programming language , the default language of Agent Integrations, enables us and contributors to think about problems abstractly and express intent as clearly and concisely as possible.","title":"Base"},{"location":"faq/acknowledgements/#dependencies","text":"We would be unable to move as fast as we do without the massive ecosystem of established software others have built. If you've contributed to one of the following projects, thank you! Your code is deployed on many systems and devices across the world. We stand on the shoulders of giants. Dependencies Core adodbapi aerospike aws-requests-auth beautifulsoup4 binary boto boto3 botocore cachetools clickhouse-cityhash clickhouse-driver contextlib2 cryptography cx-oracle ddtrace dnspython enum34 flup flup-py3 futures gearman immutables in-toto ipaddress jaydebeapi jpype1 kafka-python kazoo kubernetes ldap3 lxml lz4 mmh3 openstacksdk orjson paramiko ply prometheus-client protobuf psutil psycopg2-binary pyasn1 pycryptodomex pydantic pyhdb pyjwt pymongo pymqi pymysql pyodbc pyro4 pysmi pysnmp pysnmp-mibs pysocks python-binary-memcached python-dateutil python3-gearman pyvmomi pywin32 pyyaml redis requests requests-kerberos requests-unixsocket requests_ntlm requests_toolbelt rethinkdb scandir securesystemslib selectors34 semver serpent service_identity simplejson six snowflake-connector-python supervisor tuf typing uptime vertica-python win-inet-pton Other Rick","title":"Dependencies"},{"location":"faq/acknowledgements/#hosting","text":"A huge thanks to everyone involved in maintaining PyPI . We rely on it for providing all dependencies for not only tests, but also all Datadog Agent deployments.","title":"Hosting"},{"location":"faq/acknowledgements/#documentation","text":"MkDocs provides us with powerful and extensible static site generation capabilities, leading to an equally impressive community around it. The Material for MkDocs theme allows us to create beautiful documentation with cross-browser and mobile support. PyMdown Extensions gives us the ability to use advanced HTML, CSS, and JavaScript functionality with simple, easy to use Markdown.","title":"Documentation"},{"location":"faq/acknowledgements/#cicd","text":"Azure Pipelines is used for testing all Agent Integrations. A special shout-out to Microsoft for being extremely generous with our allowance of parallel runners; only they were able to meet the requirements of our unique monorepo. GitHub Actions is used for all repository automation, like documentation deployment and pull request labeling.","title":"CI/CD"},{"location":"faq/faq/","text":"FAQ \u00b6 Integration vs Check \u00b6 A Check is any integration whose execution is triggered directly in code by the Datadog Agent . Therefore, all Agent-based integrations written in Python or Go are considered Checks. Why test tests \u00b6 We track the coverage of tests in all cases as a drop in test coverage for test code means a test function or part of it is not called. For an example see this test bug fixed thanks to test coverage. See pyca/pynacl!290 and !4280 for more details.","title":"FAQ"},{"location":"faq/faq/#faq","text":"","title":"FAQ"},{"location":"faq/faq/#integration-vs-check","text":"A Check is any integration whose execution is triggered directly in code by the Datadog Agent . Therefore, all Agent-based integrations written in Python or Go are considered Checks.","title":"Integration vs Check"},{"location":"faq/faq/#why-test-tests","text":"We track the coverage of tests in all cases as a drop in test coverage for test code means a test function or part of it is not called. For an example see this test bug fixed thanks to test coverage. See pyca/pynacl!290 and !4280 for more details.","title":"Why test tests"},{"location":"guidelines/conventions/","text":"Conventions \u00b6 File naming \u00b6 Often, libraries that interact with a product will name their packages after the product. So if you name a file .py , and inside try to import the library of the same name, you will get import errors that will be difficult to diagnose. Never name a Python file the same as the integration's name. Attribute naming \u00b6 The base classes may freely add new attributes for new features. Therefore to avoid collisions it is recommended that attribute names be prefixed with underscores, especially for names that are generic. For an example, see below . Stateful checks \u00b6 Since Agent v6, every instance of AgentCheck corresponds to a single YAML instance of an integration defined in the instances array of user configuration. As such, the instance argument the check method accepts is redundant and wasteful since you are parsing the same configuration at every run. Parse configuration once and save the results. Do this class AwesomeCheck ( AgentCheck ): def __init__ ( self , name , init_config , instances ): super ( AwesomeCheck , self ) . __init__ ( name , init_config , instances ) self . _server = self . instance . get ( 'server' , '' ) self . _port = int ( self . instance . get ( 'port' , 8080 )) self . _tags = list ( self . instance . get ( 'tags' , [])) self . _tags . append ( 'server: {} ' . format ( self . _server )) self . _tags . append ( 'port: {} ' . format ( self . _port )) def check ( self , _ ): ... Do NOT do this class AwesomeCheck ( AgentCheck ): def check ( self , instance ): server = instance . get ( 'server' , '' ) port = int ( instance . get ( 'port' , 8080 )) tags = list ( instance . get ( 'tags' , [])) tags . append ( 'server: {} ' . format ( server )) tags . append ( 'port: {} ' . format ( port )) ...","title":"Conventions"},{"location":"guidelines/conventions/#conventions","text":"","title":"Conventions"},{"location":"guidelines/conventions/#file-naming","text":"Often, libraries that interact with a product will name their packages after the product. So if you name a file .py , and inside try to import the library of the same name, you will get import errors that will be difficult to diagnose. Never name a Python file the same as the integration's name.","title":"File naming"},{"location":"guidelines/conventions/#attribute-naming","text":"The base classes may freely add new attributes for new features. Therefore to avoid collisions it is recommended that attribute names be prefixed with underscores, especially for names that are generic. For an example, see below .","title":"Attribute naming"},{"location":"guidelines/conventions/#stateful-checks","text":"Since Agent v6, every instance of AgentCheck corresponds to a single YAML instance of an integration defined in the instances array of user configuration. As such, the instance argument the check method accepts is redundant and wasteful since you are parsing the same configuration at every run. Parse configuration once and save the results. Do this class AwesomeCheck ( AgentCheck ): def __init__ ( self , name , init_config , instances ): super ( AwesomeCheck , self ) . __init__ ( name , init_config , instances ) self . _server = self . instance . get ( 'server' , '' ) self . _port = int ( self . instance . get ( 'port' , 8080 )) self . _tags = list ( self . instance . get ( 'tags' , [])) self . _tags . append ( 'server: {} ' . format ( self . _server )) self . _tags . append ( 'port: {} ' . format ( self . _port )) def check ( self , _ ): ... Do NOT do this class AwesomeCheck ( AgentCheck ): def check ( self , instance ): server = instance . get ( 'server' , '' ) port = int ( instance . get ( 'port' , 8080 )) tags = list ( instance . get ( 'tags' , [])) tags . append ( 'server: {} ' . format ( server )) tags . append ( 'port: {} ' . format ( port )) ...","title":"Stateful checks"},{"location":"guidelines/dashboards/","text":"Dashboards \u00b6 Datadog dashboards enable you to efficiently monitor your infrastructure and integrations by displaying and tracking key metrics. Integration Preset Dashboards \u00b6 If you would like to create a default dashboard for an integration, follow the guidelines in the Best Practices section. Exporting a dashboard payload \u00b6 When you've created a dashboard in the Datadog UI, you can export the dashboard payload to be included in its integration's assets directory. Ensure that you have set an api_key and app_key for the org that contains the new dashboard in the ddev configuration . Run the following command to export the dashboard : ddev meta dash export Tip If the dashboard is for a contributor-maintained integration in the integration-extras repo, run the command with the --extras or -e flag. The command will add the dashboard definition to the manifest.json file of the integration. The dashboard JSON payload will be available in /assets/dashboards/.json . Commit the changes and create a pull request. Verify the Preset Dashboard \u00b6 Once your PR is merged and synced on production, you can find your dashboard in the Dashboard List page. Tip Make sure the integration tile is Installed in order to see the preset dashboard in the list. Ensure logos render correctly on the Dashboard List page and within the preset dashboard. Best Practices \u00b6 General \u00b6 When creating a new dashboard, select the default dashboard type (internally called multisize layout). Dashboard titles should contain the integration name. Some examples of a good dashboard title are Syclla and Cilium Overview . Warning Avoid using - (hyphen) in the dashboard title as the dashboard URL is generated from the title. Research the metrics supported by the integration and consider grouping them in relevant categories. Important metrics that are key to the performance and overview of the integration should be at the top. Always include an \"about\" group for the integration containing a brief description and helpful links. Also include an \"overview\" group containing a few of the most important metrics, and place it at the top of the dashboard. Edit the \"about\" group and select the \"banner\" display option, then link to a banner image like this: /static/images/integration_dashboard/your-image.png . We store integration banner images in github, add a new one by creating a PR in the web-ui repo . The \"about\" section should contain content, not data; the \"overview\" section should contain data. Avoid making the \"about\" section full-width. Use Group widgets to title and group sections, rather than note widgets as you might on a screenboard. Use partial width groups to display groups side-by-side. Most dashboards should display every widget within a group. Timeseries widgets should be at least 4 columns wide in order not to appear squashed on smaller displays Stream widgets should be at least 6 columns wide (half the dashboard width) for readability. Avoid placing full-width stream widgets in the middle of a dashboard as they'll \"trap\" scroll events. It's also useful to put stream widgets in a group by themselves so they can be collapsed. Add an event stream only if the service monitored by the dashboard is reporting events. Use sources:service_name . Which widgets best represent your data? Try using a mix of widget types and sizes. Explore visualizations and formatting options until you're confident your dashboard is as clear as it can be. Sometimes a whole dashboard of timeseries is ok, but other times variety can improve things. The most commonly used metric widgets are timeseries , query values , and tables . For more information on the available widget types, see the list of supported dashboard widgets . Add a logo to the dashboard header. The integration logo will automatically appear in the header if the icon exists here and the integration_id matches the icon name. That means it will only appear when the dashboard you're working on is made into the official integration board. Try to make the left and right halves of your dashboard symmetrical in high density mode. Users with large monitors will see your dashboard in high density mode by default, so it's important to make sure the group relationships make sense, and the dashboard looks good. You can adjust group heights to achieve this, and move groups between the left and right halves. a. (perfectly symmetrical) b. (close enough) Template variables allow you to dynamically filter one or more widgets in a dashboard. Template variables must be universal and accessible by any user or account using the monitored service. Make sure all relevant graphs are listening to the relevant template variable filters. Tip Adding *=scope as a template variable is useful since users can access all their own tags. Copy \u00b6 Prefer concise graph titles that start with the most important information. Avoid common phrases such as \"number of\", and don't include the integration title e.g. \"Memcached Load\". Concise title (good) Verbose title (bad) Events per node Number of Kubernetes events per node Pending tasks: [$node_name] Total number of pending tasks in [$node_name] Read/write operations Number of read/write operations Connections to server - rate Rate of connections to server Load Memcached Load Avoid repeating the group title or integration name in every widget in a group, especially if the widgets are query values with a custom unit of the same name. Note the word \"shards\" in each widget title in the group named \"shards\". Always alias formulas Group titles should be title case. Widget titles should be sentence case. If you're showing a legend, make sure the aliases are easy to understand. Graph titles should summarize the queried metric. Do not indicate the unit in the graph title because unit types are displayed automatically from metadata. An exception to this is if the calculation of the query represents a different type of unit. QA \u00b6 Always check a dashboard at 1280px wide and 2560px wide to see how it looks on a smaller laptop and a larger monitor. The most common screen widths for dashboards are 1920, 1680, 1440, 2560, and 1280px, making up more than half of all dashboard page views combined. Tip Use TV Mode to ensure that the whole dashboard fits your screen. Visual Style \u00b6 Format notes to make them fit their use case. Try the presets \"caption\", \"annotation\", or \"header\", or pick your own combination of styles. Avoid using the smallest font size for notes that are long or include complex formatting, like bulleted lists or code blocks. Use colors to highlight important relationships and to improve readability, not for style. If several groups are related, apply the same group header color to all of them. If you've applied a green header color to a group, try making its notes green as well. If two groups are related, but one is more important, try using the \"vivid\" color on the important group and the \"light\" color on the less important group. Don't be afraid to leave groups with white headers, and be careful not to overuse color e.g. don't make every group on a dashboard vivid blue. Also avoid using gray headers. Use legends when they make sense. Legends make it easy to read a graph without having to hover over each series or maximize the widget. Make sure you use aliases so the legend is easy to read. Automatic mode for legends is a great option that hides legends when space is tight and shows them when there's room. If you want users to compare two graphs side-by-side, make sure their x-axes align. If one graph is showing a legend and the other isn't, the x-axes won't align - make sure they either both show a legend or both do not. For timeseries, base the display type on the type of metric Types of metric Display type Volume (e.g. Number of connections) area Counts (e.g. Number of errors) bars Multiple groups or default lines Examples \u00b6 Elasticsearch \u00b6 Attention-grabbing \"about\" section with a banner image, concise copy, useful links, and a good typography hierarchy A brief, annotated \"overview\" section with the most important statistics, right at the top Simple graph titles. Group titles in title-case Close to symmetrical in high density mode Well formatted, concise notes Color coordination between related groups, notes within groups, and graphs within groups","title":"Dashboards"},{"location":"guidelines/dashboards/#dashboards","text":"Datadog dashboards enable you to efficiently monitor your infrastructure and integrations by displaying and tracking key metrics.","title":"Dashboards"},{"location":"guidelines/dashboards/#integration-preset-dashboards","text":"If you would like to create a default dashboard for an integration, follow the guidelines in the Best Practices section.","title":"Integration Preset Dashboards"},{"location":"guidelines/dashboards/#exporting-a-dashboard-payload","text":"When you've created a dashboard in the Datadog UI, you can export the dashboard payload to be included in its integration's assets directory. Ensure that you have set an api_key and app_key for the org that contains the new dashboard in the ddev configuration . Run the following command to export the dashboard : ddev meta dash export Tip If the dashboard is for a contributor-maintained integration in the integration-extras repo, run the command with the --extras or -e flag. The command will add the dashboard definition to the manifest.json file of the integration. The dashboard JSON payload will be available in /assets/dashboards/.json . Commit the changes and create a pull request.","title":"Exporting a dashboard payload"},{"location":"guidelines/dashboards/#verify-the-preset-dashboard","text":"Once your PR is merged and synced on production, you can find your dashboard in the Dashboard List page. Tip Make sure the integration tile is Installed in order to see the preset dashboard in the list. Ensure logos render correctly on the Dashboard List page and within the preset dashboard.","title":"Verify the Preset Dashboard"},{"location":"guidelines/dashboards/#best-practices","text":"","title":"Best Practices"},{"location":"guidelines/dashboards/#general","text":"When creating a new dashboard, select the default dashboard type (internally called multisize layout). Dashboard titles should contain the integration name. Some examples of a good dashboard title are Syclla and Cilium Overview . Warning Avoid using - (hyphen) in the dashboard title as the dashboard URL is generated from the title. Research the metrics supported by the integration and consider grouping them in relevant categories. Important metrics that are key to the performance and overview of the integration should be at the top. Always include an \"about\" group for the integration containing a brief description and helpful links. Also include an \"overview\" group containing a few of the most important metrics, and place it at the top of the dashboard. Edit the \"about\" group and select the \"banner\" display option, then link to a banner image like this: /static/images/integration_dashboard/your-image.png . We store integration banner images in github, add a new one by creating a PR in the web-ui repo . The \"about\" section should contain content, not data; the \"overview\" section should contain data. Avoid making the \"about\" section full-width. Use Group widgets to title and group sections, rather than note widgets as you might on a screenboard. Use partial width groups to display groups side-by-side. Most dashboards should display every widget within a group. Timeseries widgets should be at least 4 columns wide in order not to appear squashed on smaller displays Stream widgets should be at least 6 columns wide (half the dashboard width) for readability. Avoid placing full-width stream widgets in the middle of a dashboard as they'll \"trap\" scroll events. It's also useful to put stream widgets in a group by themselves so they can be collapsed. Add an event stream only if the service monitored by the dashboard is reporting events. Use sources:service_name . Which widgets best represent your data? Try using a mix of widget types and sizes. Explore visualizations and formatting options until you're confident your dashboard is as clear as it can be. Sometimes a whole dashboard of timeseries is ok, but other times variety can improve things. The most commonly used metric widgets are timeseries , query values , and tables . For more information on the available widget types, see the list of supported dashboard widgets . Add a logo to the dashboard header. The integration logo will automatically appear in the header if the icon exists here and the integration_id matches the icon name. That means it will only appear when the dashboard you're working on is made into the official integration board. Try to make the left and right halves of your dashboard symmetrical in high density mode. Users with large monitors will see your dashboard in high density mode by default, so it's important to make sure the group relationships make sense, and the dashboard looks good. You can adjust group heights to achieve this, and move groups between the left and right halves. a. (perfectly symmetrical) b. (close enough) Template variables allow you to dynamically filter one or more widgets in a dashboard. Template variables must be universal and accessible by any user or account using the monitored service. Make sure all relevant graphs are listening to the relevant template variable filters. Tip Adding *=scope as a template variable is useful since users can access all their own tags.","title":"General"},{"location":"guidelines/dashboards/#copy","text":"Prefer concise graph titles that start with the most important information. Avoid common phrases such as \"number of\", and don't include the integration title e.g. \"Memcached Load\". Concise title (good) Verbose title (bad) Events per node Number of Kubernetes events per node Pending tasks: [$node_name] Total number of pending tasks in [$node_name] Read/write operations Number of read/write operations Connections to server - rate Rate of connections to server Load Memcached Load Avoid repeating the group title or integration name in every widget in a group, especially if the widgets are query values with a custom unit of the same name. Note the word \"shards\" in each widget title in the group named \"shards\". Always alias formulas Group titles should be title case. Widget titles should be sentence case. If you're showing a legend, make sure the aliases are easy to understand. Graph titles should summarize the queried metric. Do not indicate the unit in the graph title because unit types are displayed automatically from metadata. An exception to this is if the calculation of the query represents a different type of unit.","title":"Copy"},{"location":"guidelines/dashboards/#qa","text":"Always check a dashboard at 1280px wide and 2560px wide to see how it looks on a smaller laptop and a larger monitor. The most common screen widths for dashboards are 1920, 1680, 1440, 2560, and 1280px, making up more than half of all dashboard page views combined. Tip Use TV Mode to ensure that the whole dashboard fits your screen.","title":"QA"},{"location":"guidelines/dashboards/#visual-style","text":"Format notes to make them fit their use case. Try the presets \"caption\", \"annotation\", or \"header\", or pick your own combination of styles. Avoid using the smallest font size for notes that are long or include complex formatting, like bulleted lists or code blocks. Use colors to highlight important relationships and to improve readability, not for style. If several groups are related, apply the same group header color to all of them. If you've applied a green header color to a group, try making its notes green as well. If two groups are related, but one is more important, try using the \"vivid\" color on the important group and the \"light\" color on the less important group. Don't be afraid to leave groups with white headers, and be careful not to overuse color e.g. don't make every group on a dashboard vivid blue. Also avoid using gray headers. Use legends when they make sense. Legends make it easy to read a graph without having to hover over each series or maximize the widget. Make sure you use aliases so the legend is easy to read. Automatic mode for legends is a great option that hides legends when space is tight and shows them when there's room. If you want users to compare two graphs side-by-side, make sure their x-axes align. If one graph is showing a legend and the other isn't, the x-axes won't align - make sure they either both show a legend or both do not. For timeseries, base the display type on the type of metric Types of metric Display type Volume (e.g. Number of connections) area Counts (e.g. Number of errors) bars Multiple groups or default lines","title":"Visual Style"},{"location":"guidelines/dashboards/#examples","text":"","title":"Examples"},{"location":"guidelines/dashboards/#elasticsearch","text":"Attention-grabbing \"about\" section with a banner image, concise copy, useful links, and a good typography hierarchy A brief, annotated \"overview\" section with the most important statistics, right at the top Simple graph titles. Group titles in title-case Close to symmetrical in high density mode Well formatted, concise notes Color coordination between related groups, notes within groups, and graphs within groups","title":"Elasticsearch"},{"location":"guidelines/pr/","text":"Pull requests \u00b6 Title \u00b6 The release command uses the title of pull requests as-is to generate changelog entries. Therefore, be as explicit and concise as possible when describing code changes. For example, do not say Fix typo , but rather something like Fix typo in debug log messages . As each integration has its own release cycle and changelog, and every pull request is automatically labeled appropriately by our CI, there is no need include the integration's name in the title. For the base package and dev package , you may want to prefix the title with the component being modified e.g. [openmetrics] or [cli] . Changelog label \u00b6 Our labeler will automatically detect if changes would not impact shipped code and apply changelog/no-changelog . In all other cases, you must manually apply changelog/ . For changelog types, we adhere to those defined by Keep a Changelog : Added for new features or any non-trivial refactors. Changed for changes in existing functionality. Deprecated for soon-to-be removed features. Removed for now removed features. Fixed for any bug fixes. Security in case of vulnerabilities. Caveat If you are fixing something that is not yet released, apply changelog/no-changelog . Separation of concerns \u00b6 Every pull request should do one thing only, for many reasons: Easy Git management - For example, if you are editing documentation and notice an error in the shipped example configuration, you should fix the error in a separate pull request. Doing so will enable a clean cherry-pick or revert of the bug fix should the need arise. Easier release management - Let's consider how the release command would handle the case of making a code change to multiple integrations. If one of the changes only fixes a typo in a code comment, that integration will still be released as indicated by the label. If both changes should indeed be released but they do different things, only one integration's changelog entry would make sense. Merges \u00b6 We only allow GitHub's squash and merge , for 2 reasons: To keep a clean Git history Our release tooling relies on commits being suffixed with the PR number in order to list changes between versions","title":"Pull requests"},{"location":"guidelines/pr/#pull-requests","text":"","title":"Pull requests"},{"location":"guidelines/pr/#title","text":"The release command uses the title of pull requests as-is to generate changelog entries. Therefore, be as explicit and concise as possible when describing code changes. For example, do not say Fix typo , but rather something like Fix typo in debug log messages . As each integration has its own release cycle and changelog, and every pull request is automatically labeled appropriately by our CI, there is no need include the integration's name in the title. For the base package and dev package , you may want to prefix the title with the component being modified e.g. [openmetrics] or [cli] .","title":"Title"},{"location":"guidelines/pr/#changelog-label","text":"Our labeler will automatically detect if changes would not impact shipped code and apply changelog/no-changelog . In all other cases, you must manually apply changelog/ . For changelog types, we adhere to those defined by Keep a Changelog : Added for new features or any non-trivial refactors. Changed for changes in existing functionality. Deprecated for soon-to-be removed features. Removed for now removed features. Fixed for any bug fixes. Security in case of vulnerabilities. Caveat If you are fixing something that is not yet released, apply changelog/no-changelog .","title":"Changelog label"},{"location":"guidelines/pr/#separation-of-concerns","text":"Every pull request should do one thing only, for many reasons: Easy Git management - For example, if you are editing documentation and notice an error in the shipped example configuration, you should fix the error in a separate pull request. Doing so will enable a clean cherry-pick or revert of the bug fix should the need arise. Easier release management - Let's consider how the release command would handle the case of making a code change to multiple integrations. If one of the changes only fixes a typo in a code comment, that integration will still be released as indicated by the label. If both changes should indeed be released but they do different things, only one integration's changelog entry would make sense.","title":"Separation of concerns"},{"location":"guidelines/pr/#merges","text":"We only allow GitHub's squash and merge , for 2 reasons: To keep a clean Git history Our release tooling relies on commits being suffixed with the PR number in order to list changes between versions","title":"Merges"},{"location":"guidelines/style/","text":"Style \u00b6 These are all the checkers used by our style enforcement . black \u00b6 An opinionated formatter, like JavaScript's prettier and Golang's gofmt . isort \u00b6 A tool to sort imports lexicographically, by section, and by type. We use the 5 standard sections: __future__ , stdlib, third party, first party, and local. datadog_checks is configured as a first party namespace. flake8 \u00b6 An easy-to-use wrapper around pycodestyle and pyflakes . We select everything it provides and only ignore a few things to give precedence to other tools. bugbear \u00b6 A flake8 plugin for finding likely bugs and design problems in programs. We enable: B001 : Do not use bare except: , it also catches unexpected events like memory errors, interrupts, system exit, and so on. Prefer except Exception: . B003 : Assigning to os.environ doesn't clear the environment. Subprocesses are going to see outdated variables, in disagreement with the current process. Use os.environ.clear() or the env= argument to Popen. B006 : Do not use mutable data structures for argument defaults. All calls reuse one instance of that data structure, persisting changes between them. B007 : Loop control variable not used within the loop body. If this is intended, start the name with an underscore. B301 : Python 3 does not include .iter* methods on dictionaries. The default behavior is to return iterables. Simply remove the iter prefix from the method. For Python 2 compatibility, also prefer the Python 3 equivalent if you expect that the size of the dict to be small and bounded. The performance regression on Python 2 will be negligible and the code is going to be the clearest. Alternatively, use six.iter* . B305 : .next() is not a thing on Python 3. Use the next() builtin. For Python 2 compatibility, use six.next() . B306 : BaseException.message has been deprecated as of Python 2.6 and is removed in Python 3. Use str(e) to access the user-readable message. Use e.args to access arguments passed to the exception. B902 : Invalid first argument used for method. Use self for instance methods, and cls for class methods. logging-format \u00b6 A flake8 plugin for ensuring a consistent logging format. We enable: G001 : Logging statements should not use string.format() for their first argument G002 : Logging statements should not use % formatting for their first argument G003 : Logging statements should not use + concatenation for their first argument G004 : Logging statements should not use f\"...\" for their first argument (only in Python 3.6+) G010 : Logging statements should not use warn (use warning instead) G100 : Logging statements should not use extra arguments unless whitelisted G201 : Logging statements should not use error(..., exc_info=True) (use exception(...) instead) G202 : Logging statements should not use redundant exc_info=True in exception Mypy \u00b6 A comment-based type checker allowing a mix of dynamic and static typing. This is optional for now. In order to enable mypy for a specific integration, open its tox.ini file and add the 2 lines in the correct section: [testenv] dd_check_types = true dd_mypy_args = --py2 datadog_checks/ tests/ ... The dd_mypy_args defines the mypy command line option for this specific integration. --py2 is here to make sure the integration is Python2.7 compatible. Here are some useful flags you can add: --check-untyped-defs : Type-checks the interior of functions without type annotations. --disallow-untyped-defs : Disallows defining functions without type annotations or with incomplete type annotations. The datadog_checks/ tests/ arguments represent the list of files that mypy should type check. Feel free to edit them as desired, including removing tests/ (if you'd prefer to not type-check the test suite), or targeting specific files (when doing partial type checking). For a complete example, see the datadog_checks_base tox configuration . Note that there is a default configuration in the mypy.ini file. Example \u00b6 Extracted from rethinkdb : from typing import Any , Iterator # Contains the different types used import rethinkdb from .document_db.types import Metric class RethinkDBCheck ( AgentCheck ): def __init__ ( self , * args , ** kwargs ): # type: (*Any, **Any) -> None super ( RethinkDBCheck , self ) . __init__ ( * args , ** kwargs ) def collect_metrics ( self , conn ): # type: (rethinkdb.net.Connection) -> Iterator[Metric] \"\"\" Collect metrics from the RethinkDB cluster we are connected to. \"\"\" for query in self . queries : for metric in query . run ( logger = self . log , conn = conn , config = self . _config ): yield metric Take a look at vsphere or ibm_mq integrations for more examples.","title":"Style"},{"location":"guidelines/style/#style","text":"These are all the checkers used by our style enforcement .","title":"Style"},{"location":"guidelines/style/#black","text":"An opinionated formatter, like JavaScript's prettier and Golang's gofmt .","title":"black"},{"location":"guidelines/style/#isort","text":"A tool to sort imports lexicographically, by section, and by type. We use the 5 standard sections: __future__ , stdlib, third party, first party, and local. datadog_checks is configured as a first party namespace.","title":"isort"},{"location":"guidelines/style/#flake8","text":"An easy-to-use wrapper around pycodestyle and pyflakes . We select everything it provides and only ignore a few things to give precedence to other tools.","title":"flake8"},{"location":"guidelines/style/#bugbear","text":"A flake8 plugin for finding likely bugs and design problems in programs. We enable: B001 : Do not use bare except: , it also catches unexpected events like memory errors, interrupts, system exit, and so on. Prefer except Exception: . B003 : Assigning to os.environ doesn't clear the environment. Subprocesses are going to see outdated variables, in disagreement with the current process. Use os.environ.clear() or the env= argument to Popen. B006 : Do not use mutable data structures for argument defaults. All calls reuse one instance of that data structure, persisting changes between them. B007 : Loop control variable not used within the loop body. If this is intended, start the name with an underscore. B301 : Python 3 does not include .iter* methods on dictionaries. The default behavior is to return iterables. Simply remove the iter prefix from the method. For Python 2 compatibility, also prefer the Python 3 equivalent if you expect that the size of the dict to be small and bounded. The performance regression on Python 2 will be negligible and the code is going to be the clearest. Alternatively, use six.iter* . B305 : .next() is not a thing on Python 3. Use the next() builtin. For Python 2 compatibility, use six.next() . B306 : BaseException.message has been deprecated as of Python 2.6 and is removed in Python 3. Use str(e) to access the user-readable message. Use e.args to access arguments passed to the exception. B902 : Invalid first argument used for method. Use self for instance methods, and cls for class methods.","title":"bugbear"},{"location":"guidelines/style/#logging-format","text":"A flake8 plugin for ensuring a consistent logging format. We enable: G001 : Logging statements should not use string.format() for their first argument G002 : Logging statements should not use % formatting for their first argument G003 : Logging statements should not use + concatenation for their first argument G004 : Logging statements should not use f\"...\" for their first argument (only in Python 3.6+) G010 : Logging statements should not use warn (use warning instead) G100 : Logging statements should not use extra arguments unless whitelisted G201 : Logging statements should not use error(..., exc_info=True) (use exception(...) instead) G202 : Logging statements should not use redundant exc_info=True in exception","title":"logging-format"},{"location":"guidelines/style/#mypy","text":"A comment-based type checker allowing a mix of dynamic and static typing. This is optional for now. In order to enable mypy for a specific integration, open its tox.ini file and add the 2 lines in the correct section: [testenv] dd_check_types = true dd_mypy_args = --py2 datadog_checks/ tests/ ... The dd_mypy_args defines the mypy command line option for this specific integration. --py2 is here to make sure the integration is Python2.7 compatible. Here are some useful flags you can add: --check-untyped-defs : Type-checks the interior of functions without type annotations. --disallow-untyped-defs : Disallows defining functions without type annotations or with incomplete type annotations. The datadog_checks/ tests/ arguments represent the list of files that mypy should type check. Feel free to edit them as desired, including removing tests/ (if you'd prefer to not type-check the test suite), or targeting specific files (when doing partial type checking). For a complete example, see the datadog_checks_base tox configuration . Note that there is a default configuration in the mypy.ini file.","title":"Mypy"},{"location":"guidelines/style/#example","text":"Extracted from rethinkdb : from typing import Any , Iterator # Contains the different types used import rethinkdb from .document_db.types import Metric class RethinkDBCheck ( AgentCheck ): def __init__ ( self , * args , ** kwargs ): # type: (*Any, **Any) -> None super ( RethinkDBCheck , self ) . __init__ ( * args , ** kwargs ) def collect_metrics ( self , conn ): # type: (rethinkdb.net.Connection) -> Iterator[Metric] \"\"\" Collect metrics from the RethinkDB cluster we are connected to. \"\"\" for query in self . queries : for metric in query . run ( logger = self . log , conn = conn , config = self . _config ): yield metric Take a look at vsphere or ibm_mq integrations for more examples.","title":"Example"},{"location":"meta/cd/","text":"Continuous delivery \u00b6","title":"CD"},{"location":"meta/cd/#continuous-delivery","text":"","title":"Continuous delivery"},{"location":"meta/ci/","text":"Continuous integration \u00b6 Tests \u00b6 All Agent Integrations use Azure Pipelines to execute tests. Execution \u00b6 Every runner will execute test stages in the following order: Unit & integration E2E Benchmarks Platforms \u00b6 We make extensive use of Microsoft-hosted agents . Windows-only integrations run on Windows Server 2019 with Visual Studio 2019 All other integrations run on Ubuntu 18.04 LTS Some things are tested on multiple platforms, like the base package and the Disk check. Pipelines \u00b6 Pull requests \u00b6 Every commit to a branch tied to an open pull request triggers a Linux and Windows job . Each runner will test any integration that was changed , with the Windows runner being further restricted to Windows-only integrations. If the base package is modified, jobs will be triggered for every integration, similar to the pipeline for master . Master \u00b6 Every commit to the master branch triggers one or more jobs for every integration . Scripts \u00b6 Some integrations require additional set up such as the installation of system dependencies. As we only want these extra steps to occur when necessary, there is a stage ran for every job that will detect what needs to be done and execute the appropriate scripts . As integrations may need different set up on different platforms, all scripts live under a directory named after the platform. All scripts in the directory will be executed in lexicographical order. Validations \u00b6 In addition to running tests on our CI, there are also some validations that are run to check for correctness of changes to various components of integrations. If any of these validations fail on your branch, then the CI will fail. In short, each validation is a ddev command, which fails if the component it is validating is not correct. See the ddev documentation and source code for the full docs for each validation. Tip A list of the current validations can be found here . CI configuration \u00b6 ddev validate ci This validates that all CI entries for integrations are valid. This includes checking if the integration has the correct codecov config, and has a valid CI entry if it is testable. Tip Run ddev validate ci --fix to resolve most errors. Agent requirements \u00b6 ddev validate agent-reqs This validates that each integration version is in sync with the requirements-agent-release.txt file. It is uncommon for this to fail because the release process is automated. Codeowners \u00b6 ddev validate codeowners This validates that every integration has a codeowner entry . If you fail this validation, add an entry in the codewners file corresponding to any newly added integration. Note: This validation command is only run when contributing to integrations-extras Default configuration files \u00b6 ddev validate config This verifies that the config specs for all integrations are valid by enforcing our configuration spec schema . The most common failure at this validation stage is some version of File needs to be synced. To resolve this issue, you can run ddev validate config --sync If you see failures regarding formatting or missing parameters, see our config spec documentation for more details on how to construc configuration specs. Dashboard definition files \u00b6 ddev validate dashboards This validates that dashboards are formatted correctly. This means that they need to be proper JSON and generated from Datadog's /dashboard API . Tip If you see a failure regarding use of the screen endpoint, consider using our dashboard utility command to generate your dashboard payload. Dependencies \u00b6 ddev validate dep This command: Verifies the uniqueness of dependency versions across all checks. Verifies all the dependencies are pinned. Verifies the embedded Python environment defined in the base check and requirements listed in every integration are compatible. This validation only applies if your work introduces new external dependencies. Manifest files \u00b6 ddev validate manifest This validates that the manifest files contain required fields, are formatted correctly, and don't contain common errors. See the Datadog docs for more detailed constraints. Metadata \u00b6 ddev validate metadata This checks that every metadata.csv file is formatted correctly. See the Datadog docs for more detailed constraints. README files \u00b6 ddev validate readmes This ensures that every integration's README.md file is formatted correctly. The main purpose of this validation is to ensure that any image linked in the readme exists and that all images are located in an integration's /image directory. Saved views data \u00b6 ddev validate saved-views This validates that saved views for an integration are formatted correctly and contain required fields, such as \"type\". Tip View example saved views for inspiration and guidance. Service check data \u00b6 ddev validate service-checks This checks that every service check file is formatted correctly. See the Datadog docs for more specific constraints. Imports \u00b6 ddev validate imports This verifies that all integrations import the base package in the correct way, such as: from datadog_checks.base.foo import bar Tip See the New Integration Instructions for more examples of how to use the base package. Labeler \u00b6 We use a GitHub Action to automatically add labels to pull requests. Tip If the Labeler CI step fails on your PR, it's probably because your PR is from a fork. Don't worry if this happens- the team can manually add labels for you. The labeler is configured to add the following: Label Condition integration/ any directory at the root that actually contains an integration documentation any Markdown, config specs , manifest.json , or anything in /docs/ dev/testing Codecov or Azure Pipelines config dev/tooling GitLab (see CD ), GitHub Actions , or Stale bot config, or the ddev CLI dependencies any change in shipped dependencies release any base package , dev package , or integration release changelog/no-changelog any release, or if all files don't modify code that is shipped The changelog/ label must be applied manually . Fork \u00b6 We forked the official action to support the following: actions/labeler!43 actions/labeler!44 a special all: prefix modifier indicating the pattern must match every file Docs \u00b6 Stale bot \u00b6 We use a GitHub App that is configured to address abandoned issues and pull requests.","title":"CI"},{"location":"meta/ci/#continuous-integration","text":"","title":"Continuous integration"},{"location":"meta/ci/#tests","text":"All Agent Integrations use Azure Pipelines to execute tests.","title":"Tests"},{"location":"meta/ci/#execution","text":"Every runner will execute test stages in the following order: Unit & integration E2E Benchmarks","title":"Execution"},{"location":"meta/ci/#platforms","text":"We make extensive use of Microsoft-hosted agents . Windows-only integrations run on Windows Server 2019 with Visual Studio 2019 All other integrations run on Ubuntu 18.04 LTS Some things are tested on multiple platforms, like the base package and the Disk check.","title":"Platforms"},{"location":"meta/ci/#pipelines","text":"","title":"Pipelines"},{"location":"meta/ci/#pull-requests","text":"Every commit to a branch tied to an open pull request triggers a Linux and Windows job . Each runner will test any integration that was changed , with the Windows runner being further restricted to Windows-only integrations. If the base package is modified, jobs will be triggered for every integration, similar to the pipeline for master .","title":"Pull requests"},{"location":"meta/ci/#master","text":"Every commit to the master branch triggers one or more jobs for every integration .","title":"Master"},{"location":"meta/ci/#scripts","text":"Some integrations require additional set up such as the installation of system dependencies. As we only want these extra steps to occur when necessary, there is a stage ran for every job that will detect what needs to be done and execute the appropriate scripts . As integrations may need different set up on different platforms, all scripts live under a directory named after the platform. All scripts in the directory will be executed in lexicographical order.","title":"Scripts"},{"location":"meta/ci/#validations","text":"In addition to running tests on our CI, there are also some validations that are run to check for correctness of changes to various components of integrations. If any of these validations fail on your branch, then the CI will fail. In short, each validation is a ddev command, which fails if the component it is validating is not correct. See the ddev documentation and source code for the full docs for each validation. Tip A list of the current validations can be found here .","title":"Validations"},{"location":"meta/ci/#ci-configuration","text":"ddev validate ci This validates that all CI entries for integrations are valid. This includes checking if the integration has the correct codecov config, and has a valid CI entry if it is testable. Tip Run ddev validate ci --fix to resolve most errors.","title":"CI configuration"},{"location":"meta/ci/#agent-requirements","text":"ddev validate agent-reqs This validates that each integration version is in sync with the requirements-agent-release.txt file. It is uncommon for this to fail because the release process is automated.","title":"Agent requirements"},{"location":"meta/ci/#codeowners","text":"ddev validate codeowners This validates that every integration has a codeowner entry . If you fail this validation, add an entry in the codewners file corresponding to any newly added integration. Note: This validation command is only run when contributing to integrations-extras","title":"Codeowners"},{"location":"meta/ci/#default-configuration-files","text":"ddev validate config This verifies that the config specs for all integrations are valid by enforcing our configuration spec schema . The most common failure at this validation stage is some version of File needs to be synced. To resolve this issue, you can run ddev validate config --sync If you see failures regarding formatting or missing parameters, see our config spec documentation for more details on how to construc configuration specs.","title":"Default configuration files"},{"location":"meta/ci/#dashboard-definition-files","text":"ddev validate dashboards This validates that dashboards are formatted correctly. This means that they need to be proper JSON and generated from Datadog's /dashboard API . Tip If you see a failure regarding use of the screen endpoint, consider using our dashboard utility command to generate your dashboard payload.","title":"Dashboard definition files"},{"location":"meta/ci/#dependencies","text":"ddev validate dep This command: Verifies the uniqueness of dependency versions across all checks. Verifies all the dependencies are pinned. Verifies the embedded Python environment defined in the base check and requirements listed in every integration are compatible. This validation only applies if your work introduces new external dependencies.","title":"Dependencies"},{"location":"meta/ci/#manifest-files","text":"ddev validate manifest This validates that the manifest files contain required fields, are formatted correctly, and don't contain common errors. See the Datadog docs for more detailed constraints.","title":"Manifest files"},{"location":"meta/ci/#metadata","text":"ddev validate metadata This checks that every metadata.csv file is formatted correctly. See the Datadog docs for more detailed constraints.","title":"Metadata"},{"location":"meta/ci/#readme-files","text":"ddev validate readmes This ensures that every integration's README.md file is formatted correctly. The main purpose of this validation is to ensure that any image linked in the readme exists and that all images are located in an integration's /image directory.","title":"README files"},{"location":"meta/ci/#saved-views-data","text":"ddev validate saved-views This validates that saved views for an integration are formatted correctly and contain required fields, such as \"type\". Tip View example saved views for inspiration and guidance.","title":"Saved views data"},{"location":"meta/ci/#service-check-data","text":"ddev validate service-checks This checks that every service check file is formatted correctly. See the Datadog docs for more specific constraints.","title":"Service check data"},{"location":"meta/ci/#imports","text":"ddev validate imports This verifies that all integrations import the base package in the correct way, such as: from datadog_checks.base.foo import bar Tip See the New Integration Instructions for more examples of how to use the base package.","title":"Imports"},{"location":"meta/ci/#labeler","text":"We use a GitHub Action to automatically add labels to pull requests. Tip If the Labeler CI step fails on your PR, it's probably because your PR is from a fork. Don't worry if this happens- the team can manually add labels for you. The labeler is configured to add the following: Label Condition integration/ any directory at the root that actually contains an integration documentation any Markdown, config specs , manifest.json , or anything in /docs/ dev/testing Codecov or Azure Pipelines config dev/tooling GitLab (see CD ), GitHub Actions , or Stale bot config, or the ddev CLI dependencies any change in shipped dependencies release any base package , dev package , or integration release changelog/no-changelog any release, or if all files don't modify code that is shipped The changelog/ label must be applied manually .","title":"Labeler"},{"location":"meta/ci/#fork","text":"We forked the official action to support the following: actions/labeler!43 actions/labeler!44 a special all: prefix modifier indicating the pattern must match every file","title":"Fork"},{"location":"meta/ci/#docs","text":"","title":"Docs"},{"location":"meta/ci/#stale-bot","text":"We use a GitHub App that is configured to address abandoned issues and pull requests.","title":"Stale bot"},{"location":"meta/config-models/","text":"Config models \u00b6 All integrations use pydantic models as the primary way to validate and interface with configuration. As config spec data types are based on OpenAPI 3, we automatically generate the necessary code. The models reside in a package named config_models located at the root of a check's namespaced package. For example, a new integration named foo : foo \u2502 ... \u251c\u2500\u2500 datadog_checks \u2502 \u2514\u2500\u2500 foo \u2502 \u2514\u2500\u2500 config_models \u2502 \u251c\u2500\u2500 __init__.py \u2502 \u251c\u2500\u2500 defaults.py \u2502 \u251c\u2500\u2500 instance.py \u2502 \u251c\u2500\u2500 shared.py \u2502 \u2514\u2500\u2500 validators.py \u2502 \u2514\u2500\u2500 __init__.py \u2502 ... ... There are 2 possible models: SharedConfig (ID: shared ) that corresponds to the init_config section InstanceConfig (ID: instance ) that corresponds to a check's entry in the instances section All models are defined in .py and are available for import directly under config_models . Default values \u00b6 The default values for optional settings are populated in defaults.py and are derived from the value property of config spec options. The precedence is: the default key the example key, if it appears to represent a real value rather than an illustrative example and the type is a primitive the default value of the type e.g. string -> str() , object -> dict() , etc. Validation \u00b6 The validation of fields for every model occurs in 6 stages. Initial \u00b6 def initialize_ < ID > ( values : dict [ str , Any ], ** kwargs ) -> dict [ str , Any ]: ... If such a validator exists in validators.py , then it is called once with the raw config that was supplied by the user. The returned mapping is used as the input config for the subsequent stages. Default value population \u00b6 If a field was not supplied by the user nor during the initialization stage, then its default value is taken from defaults.py . This stage is skipped for required fields. Default field validators \u00b6 At this point pydantic will parse the values and perform validation of types, etc. Custom field validators \u00b6 The contents of validators.py are entirely custom and contain functions to perform extra validation if necessary. def < ID > _ < OPTION_NAME > ( value : Any , * , field : pydantic . fields . ModelField , ** kwargs ) -> Any : ... Such validators are called for the appropriate field of the proper model if the option was supplied by the user. The returned value is used as the new value of the option for the subsequent stages. Pre-defined field validators \u00b6 A new validators key under the value property of config spec options is considered. Every entry will refer to a relative import path to a field validator under datadog_checks.base.utils.models.validation and is executed in the defined order. The last returned value is used as the new value of the option for the final stage. Final \u00b6 def finalize_ < ID > ( values : dict [ str , Any ], ** kwargs ) -> dict [ str , Any ]: ... If such a validator exists in validators.py , then it is called with the cumulative result of all fields. The returned mapping is used to instantiate the model. Loading \u00b6 A check initialization occurs before a check's first run that loads the config models. Validation errors will thus prevent check execution. Interface \u00b6 The config models package contains a class ConfigMixin from which checks inherit: from datadog_checks.base import AgentCheck from .config_models import ConfigMixin class Check ( AgentCheck , ConfigMixin ): ... It exposes the instantiated InstanceConfig model at self.config and SharedConfig model at self.shared_config . Immutability \u00b6 All generated models are configured as immutable . Additionally, every list is converted to tuple and every dict is converted to immutables.Map . Deprecation \u00b6 Every option marked as deprecated in the config spec will log a warning with information about when it will be removed and what to do. Enforcement \u00b6 A validation command ddev validate models runs in our CI. To locally generate the proper files, run ddev validate models [CHECK] --sync .","title":"Config models"},{"location":"meta/config-models/#config-models","text":"All integrations use pydantic models as the primary way to validate and interface with configuration. As config spec data types are based on OpenAPI 3, we automatically generate the necessary code. The models reside in a package named config_models located at the root of a check's namespaced package. For example, a new integration named foo : foo \u2502 ... \u251c\u2500\u2500 datadog_checks \u2502 \u2514\u2500\u2500 foo \u2502 \u2514\u2500\u2500 config_models \u2502 \u251c\u2500\u2500 __init__.py \u2502 \u251c\u2500\u2500 defaults.py \u2502 \u251c\u2500\u2500 instance.py \u2502 \u251c\u2500\u2500 shared.py \u2502 \u2514\u2500\u2500 validators.py \u2502 \u2514\u2500\u2500 __init__.py \u2502 ... ... There are 2 possible models: SharedConfig (ID: shared ) that corresponds to the init_config section InstanceConfig (ID: instance ) that corresponds to a check's entry in the instances section All models are defined in .py and are available for import directly under config_models .","title":"Config models"},{"location":"meta/config-models/#default-values","text":"The default values for optional settings are populated in defaults.py and are derived from the value property of config spec options. The precedence is: the default key the example key, if it appears to represent a real value rather than an illustrative example and the type is a primitive the default value of the type e.g. string -> str() , object -> dict() , etc.","title":"Default values"},{"location":"meta/config-models/#validation","text":"The validation of fields for every model occurs in 6 stages.","title":"Validation"},{"location":"meta/config-models/#initial","text":"def initialize_ < ID > ( values : dict [ str , Any ], ** kwargs ) -> dict [ str , Any ]: ... If such a validator exists in validators.py , then it is called once with the raw config that was supplied by the user. The returned mapping is used as the input config for the subsequent stages.","title":"Initial"},{"location":"meta/config-models/#default-value-population","text":"If a field was not supplied by the user nor during the initialization stage, then its default value is taken from defaults.py . This stage is skipped for required fields.","title":"Default value population"},{"location":"meta/config-models/#default-field-validators","text":"At this point pydantic will parse the values and perform validation of types, etc.","title":"Default field validators"},{"location":"meta/config-models/#custom-field-validators","text":"The contents of validators.py are entirely custom and contain functions to perform extra validation if necessary. def < ID > _ < OPTION_NAME > ( value : Any , * , field : pydantic . fields . ModelField , ** kwargs ) -> Any : ... Such validators are called for the appropriate field of the proper model if the option was supplied by the user. The returned value is used as the new value of the option for the subsequent stages.","title":"Custom field validators"},{"location":"meta/config-models/#pre-defined-field-validators","text":"A new validators key under the value property of config spec options is considered. Every entry will refer to a relative import path to a field validator under datadog_checks.base.utils.models.validation and is executed in the defined order. The last returned value is used as the new value of the option for the final stage.","title":"Pre-defined field validators"},{"location":"meta/config-models/#final","text":"def finalize_ < ID > ( values : dict [ str , Any ], ** kwargs ) -> dict [ str , Any ]: ... If such a validator exists in validators.py , then it is called with the cumulative result of all fields. The returned mapping is used to instantiate the model.","title":"Final"},{"location":"meta/config-models/#loading","text":"A check initialization occurs before a check's first run that loads the config models. Validation errors will thus prevent check execution.","title":"Loading"},{"location":"meta/config-models/#interface","text":"The config models package contains a class ConfigMixin from which checks inherit: from datadog_checks.base import AgentCheck from .config_models import ConfigMixin class Check ( AgentCheck , ConfigMixin ): ... It exposes the instantiated InstanceConfig model at self.config and SharedConfig model at self.shared_config .","title":"Interface"},{"location":"meta/config-models/#immutability","text":"All generated models are configured as immutable . Additionally, every list is converted to tuple and every dict is converted to immutables.Map .","title":"Immutability"},{"location":"meta/config-models/#deprecation","text":"Every option marked as deprecated in the config spec will log a warning with information about when it will be removed and what to do.","title":"Deprecation"},{"location":"meta/config-models/#enforcement","text":"A validation command ddev validate models runs in our CI. To locally generate the proper files, run ddev validate models [CHECK] --sync .","title":"Enforcement"},{"location":"meta/config-specs/","text":"Configuration specification \u00b6 Every integration has a specification detailing all the options that influence behavior. These YAML files are located at /assets/configuration/spec.yaml . Producer \u00b6 The producer 's job is to read a specification and: Validate for correctness Populate all unset default fields Resolve any defined templates Output the complete specification as JSON for arbitrary consumers Consumers \u00b6 Consumers may utilize specs in a number of scenarios, such as: rendering example configuration shipped to end users documenting all options in-app & on the docs site form for creating configuration in multiple formats on Integration tiles automatic configuration loading for Checks Agent based and/or in-app validator for user-supplied configuration Schema \u00b6 The root of every spec is a map with 3 keys: name - The display name of what the spec refers to e.g. Postgres , Datadog Agent , etc. version - The released version of what the spec refers to files - A list of all files that influence behavior Files \u00b6 Every file has 3 possible attributes: name - This is the name of the file the Agent will look for ( REQUIRED ) example_name - This is the name of the example file the Agent will ship. If none is provided, the default will be conf.yaml.example . The exception is auto-discovery files, which are also named auto_conf.yaml . options - A list of options ( REQUIRED ) Options \u00b6 Every option has 10 possible attributes: name - This is the name of the option ( REQUIRED ) description - Information about the option ( REQUIRED ) required - Whether or not the option is required for basic functionality. It defaults to false . hidden - Whether or not the option should not be publicly exposed. It defaults to false . display_priority - An integer representing the relative visual rank the option should take on compared to other options when publicly exposed. It defaults to 0 , meaning that every option will be displayed in the order defined in the spec. deprecation - If the option is deprecated, a mapping of relevant information. For example: deprecation : Release : 8.0.0 Migration : | do this and that multiple - Whether or not options may be selected multiple times like instances or just once like init_config metadata_tags - A list of tags (like docs:foo ) that can serve for unexpected use cases in the future options - Nested options, indicating that this is a section like instances or logs value - The expected type data There are 2 types of options: those with and without a value . Those with a value attribute are the actual user-controlled settings that influence behavior like username . Those without are expected to be sections and therefore must have an options attribute. An option cannot have both attributes. Options with a value (non-section) also support: secret - Whether or not consumers should treat the option as sensitive information like password . It defaults to false . Info The option vs section logic was chosen instead of going fully typed to avoid deeply nested value s. Values \u00b6 The type system is based on a loose subset of OpenAPI 3 data types . The differences are: Only the minimum and maximum numeric modifiers are supported Only the pattern string modifier is supported The properties object modifier is not a map, but rather a list of maps with a required name attribute. This is so consumers will load objects consistently regardless of language guarantees regarding map key order. Values also support 1 field of our own: example - An example value, only required if the type is boolean . The default is . Templates \u00b6 Every option may reference pre-defined templates using a key called template . The template format looks like path/to/template_file where path/to must point an existing directory relative to a template directory and template_file must have the file extension .yaml or .yml . You can use custom templates that will take precedence over the pre-defined templates by using the template_paths parameter of the ConfigSpec class. Override \u00b6 For occasions when deeply nested default template values need to be overridden, there is the ability to redefine attributes via a . (dot) accessor. options : - template : instances/http overrides : timeout.value.example : 42 Example file consumer \u00b6 The example consumer uses each spec to render the example configuration files that are shipped with every Agent and individual Integration release. It respects a few extra option -level attributes: example - A complete example of an option in lieu of a strictly typed value attribute enabled - Whether or not to un-comment the option, overriding the behavior of required display_priority - This is an integer affecting the order in which options are displayed, with higher values indicating higher priority. The default is 0 . It also respects a few extra fields under the value attribute of each option: display_default - This is the default value that will be shown in the header of each option, useful if it differs from the example . You may set it to null explicitly to disable showing this part of the header. compact_example - Whether or not to display complex types like arrays in their most compact representation. It defaults to false . Usage \u00b6 Use the --sync flag of the config validation command to render the example configuration files. Data model consumer \u00b6 The model consumer uses each spec to render the pydantic models that checks use to validate and interface with configuration. The models are shipped with every Agent and individual Integration release. It respects an extra field under the value attribute of each option: default - This is the default value that options will be set to, taking precedence over the example . validators - This refers to an array of pre-defined field validators to use. Every entry will refer to a relative import path to a field validator under datadog_checks.base.utils.models.validation and will be executed in the defined order. Usage \u00b6 Use the --sync flag of the model validation command to render the data model files. API \u00b6 datadog_checks.dev.tooling.specs.configuration.core.ConfigSpec \u00b6 __init__ ( self , contents , template_paths = None , source = None , version = None ) special \u00b6 Source code in datadog_checks/dev/tooling/specs/configuration/core.py def __init__ ( self , contents , template_paths = None , source = None , version = None ): super () . __init__ ( contents , template_paths , source , version ) self . spec_type = 'Configuration' self . templates = ConfigTemplates ( template_paths )","title":"Config specs"},{"location":"meta/config-specs/#configuration-specification","text":"Every integration has a specification detailing all the options that influence behavior. These YAML files are located at /assets/configuration/spec.yaml .","title":"Configuration specification"},{"location":"meta/config-specs/#producer","text":"The producer 's job is to read a specification and: Validate for correctness Populate all unset default fields Resolve any defined templates Output the complete specification as JSON for arbitrary consumers","title":"Producer"},{"location":"meta/config-specs/#consumers","text":"Consumers may utilize specs in a number of scenarios, such as: rendering example configuration shipped to end users documenting all options in-app & on the docs site form for creating configuration in multiple formats on Integration tiles automatic configuration loading for Checks Agent based and/or in-app validator for user-supplied configuration","title":"Consumers"},{"location":"meta/config-specs/#schema","text":"The root of every spec is a map with 3 keys: name - The display name of what the spec refers to e.g. Postgres , Datadog Agent , etc. version - The released version of what the spec refers to files - A list of all files that influence behavior","title":"Schema"},{"location":"meta/config-specs/#files","text":"Every file has 3 possible attributes: name - This is the name of the file the Agent will look for ( REQUIRED ) example_name - This is the name of the example file the Agent will ship. If none is provided, the default will be conf.yaml.example . The exception is auto-discovery files, which are also named auto_conf.yaml . options - A list of options ( REQUIRED )","title":"Files"},{"location":"meta/config-specs/#options","text":"Every option has 10 possible attributes: name - This is the name of the option ( REQUIRED ) description - Information about the option ( REQUIRED ) required - Whether or not the option is required for basic functionality. It defaults to false . hidden - Whether or not the option should not be publicly exposed. It defaults to false . display_priority - An integer representing the relative visual rank the option should take on compared to other options when publicly exposed. It defaults to 0 , meaning that every option will be displayed in the order defined in the spec. deprecation - If the option is deprecated, a mapping of relevant information. For example: deprecation : Release : 8.0.0 Migration : | do this and that multiple - Whether or not options may be selected multiple times like instances or just once like init_config metadata_tags - A list of tags (like docs:foo ) that can serve for unexpected use cases in the future options - Nested options, indicating that this is a section like instances or logs value - The expected type data There are 2 types of options: those with and without a value . Those with a value attribute are the actual user-controlled settings that influence behavior like username . Those without are expected to be sections and therefore must have an options attribute. An option cannot have both attributes. Options with a value (non-section) also support: secret - Whether or not consumers should treat the option as sensitive information like password . It defaults to false . Info The option vs section logic was chosen instead of going fully typed to avoid deeply nested value s.","title":"Options"},{"location":"meta/config-specs/#values","text":"The type system is based on a loose subset of OpenAPI 3 data types . The differences are: Only the minimum and maximum numeric modifiers are supported Only the pattern string modifier is supported The properties object modifier is not a map, but rather a list of maps with a required name attribute. This is so consumers will load objects consistently regardless of language guarantees regarding map key order. Values also support 1 field of our own: example - An example value, only required if the type is boolean . The default is .","title":"Values"},{"location":"meta/config-specs/#templates","text":"Every option may reference pre-defined templates using a key called template . The template format looks like path/to/template_file where path/to must point an existing directory relative to a template directory and template_file must have the file extension .yaml or .yml . You can use custom templates that will take precedence over the pre-defined templates by using the template_paths parameter of the ConfigSpec class.","title":"Templates"},{"location":"meta/config-specs/#override","text":"For occasions when deeply nested default template values need to be overridden, there is the ability to redefine attributes via a . (dot) accessor. options : - template : instances/http overrides : timeout.value.example : 42","title":"Override"},{"location":"meta/config-specs/#example-file-consumer","text":"The example consumer uses each spec to render the example configuration files that are shipped with every Agent and individual Integration release. It respects a few extra option -level attributes: example - A complete example of an option in lieu of a strictly typed value attribute enabled - Whether or not to un-comment the option, overriding the behavior of required display_priority - This is an integer affecting the order in which options are displayed, with higher values indicating higher priority. The default is 0 . It also respects a few extra fields under the value attribute of each option: display_default - This is the default value that will be shown in the header of each option, useful if it differs from the example . You may set it to null explicitly to disable showing this part of the header. compact_example - Whether or not to display complex types like arrays in their most compact representation. It defaults to false .","title":"Example file consumer"},{"location":"meta/config-specs/#usage","text":"Use the --sync flag of the config validation command to render the example configuration files.","title":"Usage"},{"location":"meta/config-specs/#data-model-consumer","text":"The model consumer uses each spec to render the pydantic models that checks use to validate and interface with configuration. The models are shipped with every Agent and individual Integration release. It respects an extra field under the value attribute of each option: default - This is the default value that options will be set to, taking precedence over the example . validators - This refers to an array of pre-defined field validators to use. Every entry will refer to a relative import path to a field validator under datadog_checks.base.utils.models.validation and will be executed in the defined order.","title":"Data model consumer"},{"location":"meta/config-specs/#usage_1","text":"Use the --sync flag of the model validation command to render the data model files.","title":"Usage"},{"location":"meta/config-specs/#api","text":"","title":"API"},{"location":"meta/config-specs/#datadog_checks.dev.tooling.specs.configuration.core.ConfigSpec","text":"","title":"ConfigSpec"},{"location":"meta/config-specs/#datadog_checks.dev.tooling.specs.configuration.core.ConfigSpec.__init__","text":"Source code in datadog_checks/dev/tooling/specs/configuration/core.py def __init__ ( self , contents , template_paths = None , source = None , version = None ): super () . __init__ ( contents , template_paths , source , version ) self . spec_type = 'Configuration' self . templates = ConfigTemplates ( template_paths )","title":"__init__()"},{"location":"meta/docs-specs/","text":"Documentation specification \u00b6 Building on top of the configuration spec implementation, we also incorporate a documentation spec. Similar to configuration specs, these YAML files are located at /assets/documentation/spec.yaml , and referenced in the check's manifest.json file. Producer \u00b6 The producer s job is to read a specification and: Validate for correctness Populate all unset default fields Gather and prioritize other schema for inclusion Resolve any defined templates Normalize links to embedded style Output the complete specification as JSON for arbitrary consumers This spec is dependent on other config files within an integration check, in order of precedence: manifest.json assets/service_checks.json assets/configuration/spec.yaml (included for reference, but unused for now) Consumers \u00b6 Consumers may utilize specs in a number of scenarios, such as: rendering README.md files for git and user documentation rendering HTML files for user documentation on our datadoghq.com site easily updating common components via base template changes creating single-source-of-truth for data such as short_description Schema \u00b6 The root of every spec is a map with 3 keys: name - The display name of what the spec refers to e.g. Postgres , Nagios , etc. version - The released version of what the spec refers to options - Top-level spec options related to the check overall (optional) files - A list of all files that influence behavior Spec Options \u00b6 Every spec has a set of optional options: autodiscovery - Indicates if this check supports autodiscovery. Default: false Files \u00b6 Every file has 3 possible attributes: name - This is the name of the file the Agent will look for ( REQUIRED ) render_name - This is the name of the rendered file, and defaults to README.md . Consumers may choose their own output name, or may read from this value. sections - A list of sections ( REQUIRED ) Sections \u00b6 Every section has these possible attributes: name - The title of the section. header_level - Level of indentation. tab - If not null, then the name of the tab, and all sections of the same indent must specify. description - Actual text content for the section. May be parameterized using keyword argument formatter strings, see parameterization for more info. Hyperlinks may be embedded or reference-style. parameters - Mapping of extra parameters for string formatting in the description . prepend_text - Text to insert in front of the description field. Useful for overrides. append_text - Text to append after the description field. Useful for overrides. processor - Reference to a Python function which should be invoked. If the function returns None , the default description carries forward, otherwise the results of the function will be used for the description . Used by the data_collected/service_checks template, for example. hidden - Whether or not the section should be publicly exposed. It defaults to false . sections - Nested sections, this will increase the header_level of embedded sections accordingly. template - See templates below for more. overrides - Override specific attributes within a given template. See overrides for more. Parameters \u00b6 When constructing each text section, the description field will first prepend and append values from prepend_text and append_text , respectively. Next string formatting operations will take place by using a default set of parameters joined with any parameters explicitly defined in the parameter attribute. Default parameters which will be present for all sections and passed as keyword args during string formatting include: name - the formal name of the check all fields from manifest.json objects from service_checks.json Templates \u00b6 Every section may reference pre-defined doc templates using a key called template . The template format looks like path/to/template_file where path/to must point an existing directory relative to a template directory and template_file must have the file extension .yaml or .yml . You can use custom templates that will take precedence over the pre-defined templates by using the template_paths parameter of the ConfigSpec class. Overrides \u00b6 Commonly used to update a description of a given template, or to inject specific parameters: sections : - template : setup/installation overrides : description : | The Nagios check is included in the [Datadog Agent][1] package, so you don't need to install anything else on your Nagios servers. [1]: https://docs.datadoghq.com/agent/ For occasions when deeply nested default template values need to be overridden, there is the ability to redefine attributes via a . (dot) accessor. options : - template : setup/configuration overrides : templates.log_collection.hidden : true README file consumer \u00b6 The README example consumer uses the documentation spec to render the README files that are included with every Integration package. Links \u00b6 As a custom with our README.md files, we use reference style links . Each section description may have embedded or reference style links, and as part of the Producer step, these will be all normalized to embedded links. This ensures that any consumers can handle them as needed. For the README consumer, it will translate everything to reference style as part of its output stage. Usage \u00b6 Use the --sync flag of the config validation command to render the README files. API \u00b6 datadog_checks.dev.tooling.specs.docs.core.DocsSpec \u00b6 __init__ ( self , contents , template_paths = None , source = None , version = None ) special \u00b6 Source code in datadog_checks/dev/tooling/specs/docs/core.py def __init__ ( self , contents , template_paths = None , source = None , version = None ): super () . __init__ ( contents , template_paths , source , version ) self . spec_type = 'Docs' self . templates = DocsTemplates ( template_paths ) normalize_links ( self ) \u00b6 Translate all reference-style links to inline links. Source code in datadog_checks/dev/tooling/specs/docs/core.py def normalize_links ( self ): \"\"\"Translate all reference-style links to inline links.\"\"\" # Markdown doc reference: https://www.markdownguide.org/basic-syntax/#links for fidx , file in enumerate ( self . data [ 'files' ], 1 ): sections = deque ( enumerate ( file [ 'sections' ], 1 )) while sections : sidx , section = sections . popleft () section [ 'prepend_text' ] = self . _normalize ( section [ 'prepend_text' ], fidx , sidx ) section [ 'description' ] = self . _normalize ( section [ 'description' ], fidx , sidx ) section [ 'append_text' ] = self . _normalize ( section [ 'append_text' ], fidx , sidx ) if 'sections' in section : nested_sections = [ ( f ' { sidx } . { subidx } ' , subsection ) for subidx , subsection in enumerate ( section [ 'sections' ], 1 ) ] # extend left backwards for correct order of sections sections . extendleft ( nested_sections [:: - 1 ]) validate ( self ) \u00b6 Source code in datadog_checks/dev/tooling/specs/docs/core.py def validate ( self ): spec_validator ( self . data , self ) if self . errors : return self . normalize_links () rendering: heading_level: 3 selection: members: - init - load","title":"Docs specs"},{"location":"meta/docs-specs/#documentation-specification","text":"Building on top of the configuration spec implementation, we also incorporate a documentation spec. Similar to configuration specs, these YAML files are located at /assets/documentation/spec.yaml , and referenced in the check's manifest.json file.","title":"Documentation specification"},{"location":"meta/docs-specs/#producer","text":"The producer s job is to read a specification and: Validate for correctness Populate all unset default fields Gather and prioritize other schema for inclusion Resolve any defined templates Normalize links to embedded style Output the complete specification as JSON for arbitrary consumers This spec is dependent on other config files within an integration check, in order of precedence: manifest.json assets/service_checks.json assets/configuration/spec.yaml (included for reference, but unused for now)","title":"Producer"},{"location":"meta/docs-specs/#consumers","text":"Consumers may utilize specs in a number of scenarios, such as: rendering README.md files for git and user documentation rendering HTML files for user documentation on our datadoghq.com site easily updating common components via base template changes creating single-source-of-truth for data such as short_description","title":"Consumers"},{"location":"meta/docs-specs/#schema","text":"The root of every spec is a map with 3 keys: name - The display name of what the spec refers to e.g. Postgres , Nagios , etc. version - The released version of what the spec refers to options - Top-level spec options related to the check overall (optional) files - A list of all files that influence behavior","title":"Schema"},{"location":"meta/docs-specs/#spec-options","text":"Every spec has a set of optional options: autodiscovery - Indicates if this check supports autodiscovery. Default: false","title":"Spec Options"},{"location":"meta/docs-specs/#files","text":"Every file has 3 possible attributes: name - This is the name of the file the Agent will look for ( REQUIRED ) render_name - This is the name of the rendered file, and defaults to README.md . Consumers may choose their own output name, or may read from this value. sections - A list of sections ( REQUIRED )","title":"Files"},{"location":"meta/docs-specs/#sections","text":"Every section has these possible attributes: name - The title of the section. header_level - Level of indentation. tab - If not null, then the name of the tab, and all sections of the same indent must specify. description - Actual text content for the section. May be parameterized using keyword argument formatter strings, see parameterization for more info. Hyperlinks may be embedded or reference-style. parameters - Mapping of extra parameters for string formatting in the description . prepend_text - Text to insert in front of the description field. Useful for overrides. append_text - Text to append after the description field. Useful for overrides. processor - Reference to a Python function which should be invoked. If the function returns None , the default description carries forward, otherwise the results of the function will be used for the description . Used by the data_collected/service_checks template, for example. hidden - Whether or not the section should be publicly exposed. It defaults to false . sections - Nested sections, this will increase the header_level of embedded sections accordingly. template - See templates below for more. overrides - Override specific attributes within a given template. See overrides for more.","title":"Sections"},{"location":"meta/docs-specs/#parameters","text":"When constructing each text section, the description field will first prepend and append values from prepend_text and append_text , respectively. Next string formatting operations will take place by using a default set of parameters joined with any parameters explicitly defined in the parameter attribute. Default parameters which will be present for all sections and passed as keyword args during string formatting include: name - the formal name of the check all fields from manifest.json objects from service_checks.json","title":"Parameters"},{"location":"meta/docs-specs/#templates","text":"Every section may reference pre-defined doc templates using a key called template . The template format looks like path/to/template_file where path/to must point an existing directory relative to a template directory and template_file must have the file extension .yaml or .yml . You can use custom templates that will take precedence over the pre-defined templates by using the template_paths parameter of the ConfigSpec class.","title":"Templates"},{"location":"meta/docs-specs/#overrides","text":"Commonly used to update a description of a given template, or to inject specific parameters: sections : - template : setup/installation overrides : description : | The Nagios check is included in the [Datadog Agent][1] package, so you don't need to install anything else on your Nagios servers. [1]: https://docs.datadoghq.com/agent/ For occasions when deeply nested default template values need to be overridden, there is the ability to redefine attributes via a . (dot) accessor. options : - template : setup/configuration overrides : templates.log_collection.hidden : true","title":"Overrides"},{"location":"meta/docs-specs/#readme-file-consumer","text":"The README example consumer uses the documentation spec to render the README files that are included with every Integration package.","title":"README file consumer"},{"location":"meta/docs-specs/#links","text":"As a custom with our README.md files, we use reference style links . Each section description may have embedded or reference style links, and as part of the Producer step, these will be all normalized to embedded links. This ensures that any consumers can handle them as needed. For the README consumer, it will translate everything to reference style as part of its output stage.","title":"Links"},{"location":"meta/docs-specs/#usage","text":"Use the --sync flag of the config validation command to render the README files.","title":"Usage"},{"location":"meta/docs-specs/#api","text":"","title":"API"},{"location":"meta/docs-specs/#datadog_checks.dev.tooling.specs.docs.core.DocsSpec","text":"","title":"DocsSpec"},{"location":"meta/docs-specs/#datadog_checks.dev.tooling.specs.docs.core.DocsSpec.__init__","text":"Source code in datadog_checks/dev/tooling/specs/docs/core.py def __init__ ( self , contents , template_paths = None , source = None , version = None ): super () . __init__ ( contents , template_paths , source , version ) self . spec_type = 'Docs' self . templates = DocsTemplates ( template_paths )","title":"__init__()"},{"location":"meta/docs-specs/#datadog_checks.dev.tooling.specs.docs.core.DocsSpec.normalize_links","text":"Translate all reference-style links to inline links. Source code in datadog_checks/dev/tooling/specs/docs/core.py def normalize_links ( self ): \"\"\"Translate all reference-style links to inline links.\"\"\" # Markdown doc reference: https://www.markdownguide.org/basic-syntax/#links for fidx , file in enumerate ( self . data [ 'files' ], 1 ): sections = deque ( enumerate ( file [ 'sections' ], 1 )) while sections : sidx , section = sections . popleft () section [ 'prepend_text' ] = self . _normalize ( section [ 'prepend_text' ], fidx , sidx ) section [ 'description' ] = self . _normalize ( section [ 'description' ], fidx , sidx ) section [ 'append_text' ] = self . _normalize ( section [ 'append_text' ], fidx , sidx ) if 'sections' in section : nested_sections = [ ( f ' { sidx } . { subidx } ' , subsection ) for subidx , subsection in enumerate ( section [ 'sections' ], 1 ) ] # extend left backwards for correct order of sections sections . extendleft ( nested_sections [:: - 1 ])","title":"normalize_links()"},{"location":"meta/docs-specs/#datadog_checks.dev.tooling.specs.docs.core.DocsSpec.validate","text":"Source code in datadog_checks/dev/tooling/specs/docs/core.py def validate ( self ): spec_validator ( self . data , self ) if self . errors : return self . normalize_links () rendering: heading_level: 3 selection: members: - init - load","title":"validate()"},{"location":"meta/docs/","text":"Documentation \u00b6 Generation \u00b6 Our docs are configured to be rendered by the static site generator MkDocs with the beautiful Material for MkDocs theme. Plugins \u00b6 We use a select few MkDocs plugins to achieve the following: minify HTML ( :octicons-octoface-24: ) display the date of the last Git modification of every page ( :octicons-octoface-24: ) automatically generate docs based on code and docstrings ( :octicons-octoface-24: ) export the site as a PDF ( :octicons-octoface-24: ) Extensions \u00b6 We also depend on a few Python-Markdown extensions to achieve the following: support for emojis, collapsible elements, code highlighting, and other advanced features courtesy of the PyMdown extension suite ( :octicons-octoface-24: ) ability to inline SVG icons from Material , FontAwesome , and Octicons ( :octicons-octoface-24: ) allow arbitrary scripts to modify MkDocs input files ( :octicons-octoface-24: ) automatically generate reference docs for Click -based command line interfaces ( :octicons-octoface-24: ) References \u00b6 All references are automatically available to all pages. Abbreviations \u00b6 These allow for the expansion of text on hover, useful for acronyms and definitions. For example, if you add the following to the list of abbreviations : *[CERN]: European Organization for Nuclear Research then anywhere you type CERN the organization's full name will appear on hover. External links \u00b6 All links to external resources should be added to the list of external links rather than defined on a per-page basis, for many reasons: it keeps the Markdown content compact and thus easy to read and modify the ability to re-use a link, even if you forsee no immediate use elsewhere easy automation of stale link detection when links to external resources change, the last date of Git modification displayed on pages will not Scripts \u00b6 We use some scripts to dynamically modify pages before being processed by other extensions and MkDocs itself, to achieve the following: add references to the bottom of every page render the status of various aspects of integrations enumerate all the dependencies that are shipped with the Datadog Agent Build \u00b6 We configure a tox environment called docs that provides all the dependencies necessary to build the documentation. To build and view the documentation in your browser, run the serve command (the first invocation may take a few extra moments): ddev docs serve By default, live reloading is enabled so any modification will be reflected in near-real time. Note: In order to export the site as a PDF, you can use the --pdf flag, but you will need some external dependencies . Deploy \u00b6 Our CI deploys the documentation to GitHub Pages if any changes occur on commits to the master branch. Danger Never make documentation non-deterministic as it will trigger deploys for every single commit. For example, say you want to display the valid values of a CLI option and the enumeration is represented as a set . Formatting the sequence directly will produce inconsistent results because sets do not guarantee order like dictionaries do, so you must sort it first.","title":"Docs"},{"location":"meta/docs/#documentation","text":"","title":"Documentation"},{"location":"meta/docs/#generation","text":"Our docs are configured to be rendered by the static site generator MkDocs with the beautiful Material for MkDocs theme.","title":"Generation"},{"location":"meta/docs/#plugins","text":"We use a select few MkDocs plugins to achieve the following: minify HTML ( :octicons-octoface-24: ) display the date of the last Git modification of every page ( :octicons-octoface-24: ) automatically generate docs based on code and docstrings ( :octicons-octoface-24: ) export the site as a PDF ( :octicons-octoface-24: )","title":"Plugins"},{"location":"meta/docs/#extensions","text":"We also depend on a few Python-Markdown extensions to achieve the following: support for emojis, collapsible elements, code highlighting, and other advanced features courtesy of the PyMdown extension suite ( :octicons-octoface-24: ) ability to inline SVG icons from Material , FontAwesome , and Octicons ( :octicons-octoface-24: ) allow arbitrary scripts to modify MkDocs input files ( :octicons-octoface-24: ) automatically generate reference docs for Click -based command line interfaces ( :octicons-octoface-24: )","title":"Extensions"},{"location":"meta/docs/#references","text":"All references are automatically available to all pages.","title":"References"},{"location":"meta/docs/#abbreviations","text":"These allow for the expansion of text on hover, useful for acronyms and definitions. For example, if you add the following to the list of abbreviations : *[CERN]: European Organization for Nuclear Research then anywhere you type CERN the organization's full name will appear on hover.","title":"Abbreviations"},{"location":"meta/docs/#external-links","text":"All links to external resources should be added to the list of external links rather than defined on a per-page basis, for many reasons: it keeps the Markdown content compact and thus easy to read and modify the ability to re-use a link, even if you forsee no immediate use elsewhere easy automation of stale link detection when links to external resources change, the last date of Git modification displayed on pages will not","title":"External links"},{"location":"meta/docs/#scripts","text":"We use some scripts to dynamically modify pages before being processed by other extensions and MkDocs itself, to achieve the following: add references to the bottom of every page render the status of various aspects of integrations enumerate all the dependencies that are shipped with the Datadog Agent","title":"Scripts"},{"location":"meta/docs/#build","text":"We configure a tox environment called docs that provides all the dependencies necessary to build the documentation. To build and view the documentation in your browser, run the serve command (the first invocation may take a few extra moments): ddev docs serve By default, live reloading is enabled so any modification will be reflected in near-real time. Note: In order to export the site as a PDF, you can use the --pdf flag, but you will need some external dependencies .","title":"Build"},{"location":"meta/docs/#deploy","text":"Our CI deploys the documentation to GitHub Pages if any changes occur on commits to the master branch. Danger Never make documentation non-deterministic as it will trigger deploys for every single commit. For example, say you want to display the valid values of a CLI option and the enumeration is represented as a set . Formatting the sequence directly will produce inconsistent results because sets do not guarantee order like dictionaries do, so you must sort it first.","title":"Deploy"},{"location":"meta/status/","text":"Status \u00b6 Dashboards \u00b6 78.91% Completed 116/147 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_active_directory azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cockroachdb confluent_platform consul consul_connect containerd coredns couch couchbase cri crio databricks directory disk dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly jmeter journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios network nfsstat nginx nginx_ingress_controller nvidia_jetson oom_kill openldap openshift openstack openstack_controller oracle otel pan_firewall pgbouncer php_fpm postfix postgres powerdns_recursor presto process proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snowflake solr sonarqube spark sqlserver squid statsd system_core systemd tcp_check tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere yarn zk Logs support \u00b6 92.73% Completed 102/110 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio druid ecs_fargate eks_fargate elastic envoy etcd exchange_server flink fluentd gearmand gitlab gitlab_runner glusterfs gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kyototycoon lighttpd linkerd mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openstack openstack_controller pgbouncer php_fpm postfix postgres powerdns_recursor presto proxysql rabbitmq redisdb rethinkdb riak scylla sidekiq solr sonarqube spark sqlserver squid statsd supervisord teamcity tenable tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log yarn zk Recommended monitors \u00b6 17.48% Completed 25/143 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk Config specs \u00b6 94.41% Completed 135/143 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk Docs specs \u00b6 0.70% Completed 1/143 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk E2E tests \u00b6 77.86% Completed 109/140 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk Config validation \u00b6 35.77% Completed 49/137 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory disk dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_scheduler kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios network nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk Metadata submission \u00b6 30.71% Completed 43/140 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk Process signatures \u00b6 30.56% Completed 44/144 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory disk dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios network nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snmp solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk Agent 8 check signatures \u00b6 50.34% Completed 73/145 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory disk dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios network nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk Default saved views (for integrations with logs) \u00b6 44.66% Completed 46/103 active_directory activemq activemq_xml aerospike airflow ambari apache aspdotnet azure_iot_edge cacti cassandra cassandra_nodetool ceph cilium clickhouse confluent_platform consul coredns couch couchbase druid ecs_fargate eks_fargate elastic envoy etcd exchange_server flink fluentd gearmand gitlab gitlab_runner glusterfs gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_scheduler kyototycoon lighttpd linkerd mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openstack openstack_controller pgbouncer postfix postgres powerdns_recursor presto proxysql rabbitmq redisdb rethinkdb riak scylla sidekiq solr sonarqube spark sqlserver squid statsd supervisord teamcity tenable tomcat twemproxy twistlock varnish vault vertica voltdb win32_event_log yarn zk","title":"Status"},{"location":"meta/status/#status","text":"","title":"Status"},{"location":"meta/status/#dashboards","text":"78.91% Completed 116/147 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_active_directory azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cockroachdb confluent_platform consul consul_connect containerd coredns couch couchbase cri crio databricks directory disk dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly jmeter journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios network nfsstat nginx nginx_ingress_controller nvidia_jetson oom_kill openldap openshift openstack openstack_controller oracle otel pan_firewall pgbouncer php_fpm postfix postgres powerdns_recursor presto process proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snowflake solr sonarqube spark sqlserver squid statsd system_core systemd tcp_check tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere yarn zk","title":"Dashboards"},{"location":"meta/status/#logs-support","text":"92.73% Completed 102/110 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio druid ecs_fargate eks_fargate elastic envoy etcd exchange_server flink fluentd gearmand gitlab gitlab_runner glusterfs gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kyototycoon lighttpd linkerd mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openstack openstack_controller pgbouncer php_fpm postfix postgres powerdns_recursor presto proxysql rabbitmq redisdb rethinkdb riak scylla sidekiq solr sonarqube spark sqlserver squid statsd supervisord teamcity tenable tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log yarn zk","title":"Logs support"},{"location":"meta/status/#recommended-monitors","text":"17.48% Completed 25/143 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk","title":"Recommended monitors"},{"location":"meta/status/#config-specs","text":"94.41% Completed 135/143 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk","title":"Config specs"},{"location":"meta/status/#docs-specs","text":"0.70% Completed 1/143 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk","title":"Docs specs"},{"location":"meta/status/#e2e-tests","text":"77.86% Completed 109/140 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk","title":"E2E tests"},{"location":"meta/status/#config-validation","text":"35.77% Completed 49/137 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory disk dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_scheduler kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios network nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk","title":"Config validation"},{"location":"meta/status/#metadata-submission","text":"30.71% Completed 43/140 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk","title":"Metadata submission"},{"location":"meta/status/#process-signatures","text":"30.56% Completed 44/144 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory disk dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios network nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snmp solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk","title":"Process signatures"},{"location":"meta/status/#agent-8-check-signatures","text":"50.34% Completed 73/145 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory disk dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios network nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk","title":"Agent 8 check signatures"},{"location":"meta/status/#default-saved-views-for-integrations-with-logs","text":"44.66% Completed 46/103 active_directory activemq activemq_xml aerospike airflow ambari apache aspdotnet azure_iot_edge cacti cassandra cassandra_nodetool ceph cilium clickhouse confluent_platform consul coredns couch couchbase druid ecs_fargate eks_fargate elastic envoy etcd exchange_server flink fluentd gearmand gitlab gitlab_runner glusterfs gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_scheduler kyototycoon lighttpd linkerd mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openstack openstack_controller pgbouncer postfix postgres powerdns_recursor presto proxysql rabbitmq redisdb rethinkdb riak scylla sidekiq solr sonarqube spark sqlserver squid statsd supervisord teamcity tenable tomcat twemproxy twistlock varnish vault vertica voltdb win32_event_log yarn zk","title":"Default saved views (for integrations with logs)"},{"location":"process/integration-release/","text":"Integration release \u00b6 Each Agent integration has its own release cycle. Many integrations are actively developed and released often while some are rarely touched (usually indicating feature-completeness). Versioning \u00b6 All releases adhere to Semantic Versioning . Tags in the form - are added to the Git repository. Therefore, it's possible to checkout and build the code for a certain version of a specific check. Setup \u00b6 Configure your GitHub auth. Identify changes \u00b6 Note If you already know which integration you'd like to release, skip this section. To see all checks that need to be released, run ddev release show ready . Steps \u00b6 Checkout and pull the most recent version of the master branch. git checkout master git pull Important Not using the latest version of master may cause errors in the build pipeline . Review which PRs were merged in between the latest release and the master branch. ddev release show changes You should ensure that PR titles and changelog labels are correct. Create a release branch from master (suggested naming format is /release- ). This has the purpose of opening a PR so others can review the changelog. Important It is critical the branch name is not in the form /- because one of our Gitlab jobs is triggered whenever a Git reference matches that pattern, see !3843 & !3980 . Make the release. ddev release make You may need to touch your Yubikey multiple times. This will automatically: update the version in /datadog_checks//__about__.py update the changelog update the requirements-agent-release.txt file update in-toto metadata commit the above changes Push your branch to GitHub and create a pull request. Update the title of the PR to something like [Release] Bumped version to . Ask for a review in Slack. Merge the pull request after approval. PyPI \u00b6 If you released datadog_checks_base or datadog_checks_dev then these must be uploaded to PyPI for use by integrations-extras . This is automatically handled by two GitHub Action jobs: release-base.yml and release-dev.yml . In case you need to do it manually: ddev release upload datadog_checks_[base|dev] Metadata \u00b6 You need to run certain jobs if any changes modified integration metadata. See the Declarative Integration Pipeline wiki. Bulk releases \u00b6 To create a release for every integration that has changed, use all as the integration name in the ddev release make step above. ddev release make all You may also pass a comma-separated list of checks to skip using the --exclude option, e.g.: ddev release make all --exclude datadog_checks_dev Warning There is a known GitHub limitation where if an issue has too many labels (100), its state cannot be modified. If you cannot merge the pull request: Run the remove-labels command After merging, manually add back the changelog/no-changelog label Betas \u00b6 Creating pre-releases is the same workflow except you do not open a pull request but rather release directly from a branch. In the ddev release make step set --version to [major|minor|patch],[rc|alpha|beta] . For example, if the current version of an integration is 1.1.3 , the following command will bump it to 1.2.0-rc.1 : ddev release make --version minor,rc After pushing the release commits to GitHub, run: ddev release tag This manually triggers the build pipeline . To increment the version, omit the first part, e.g.: ddev release make --version rc New integrations \u00b6 To bump a new integration to 1.0.0 if it is not already there, run: ddev release make --new To ensure this for all integrations, run: ddev release make all --new If a release was created, run: ddev agent requirements Troubleshooting \u00b6 If you encounter errors when signing with your Yubikey, ensure you ran gpg --import .gpg.pub . If the build pipeline failed, it is likely that you modified a file in the pull request without re-signing. To resolve this, you'll need to bootstrap metadata for every integration: Checkout and pull the most recent version of the master branch. git checkout master git pull Sign everything. ddev release make all --sign-only You may need to touch your Yubikey multiple times. Push your branch to GitHub. Manually trigger a build. git tag bootstrap-1.0.0 -m bootstrap-1.0.0 The tag name is irrelevant, it just needs to look like an integration release. Gitlab doesn't sync deleted tags, so any subsequent manual trigger tags will need to increment the version number. Delete the branch and tag, locally and on GitHub. Releasers \u00b6 For whom it may concern, the following is a list of GPG public key fingerprints known to correspond to developers who, at the time of writing (28-02-2020), can trigger a build by signing in-toto metadata . Christine Chen 57CE 2495 EA48 D456 B9C4 BA4F 66E8 2239 9141 D9D3 36C0 82E7 38C7 B4A1 E169 11C0 D633 59C4 875A 1A9A Paul Coignet 024E 42FE 76AD F19F 5D57 7503 07E5 2EA3 88E4 08FD 1286 0553 D1DC 93A7 2CD1 6956 2D98 DCE7 FBFF C9C2 Dave Coleman 8278 C406 C1BB F1F2 DFBB 5AD6 0AE7 E246 4F8F D375 98A5 37CD CCA2 8DFF B35B 0551 5D50 0742 90F6 422F Paola Ducolin EAC5 F27E C6B1 A814 1222 1942 C4E1 549E 937E F5A2 A40A DD71 41EB C767 BBFB E0B8 9128 2E2F E536 C858 Mike Garabedian F90C 0097 67F2 4B27 9DC2 C83D A227 6601 6CB4 CF1D 2669 6E67 28D2 0CB0 C1E0 D2BE 6643 5756 8398 9306 Thomas Herv\u00e9 59DB 2532 75A5 BD4E 55C7 C5AA 0678 55A2 8E90 3B3B E2BD 994F 95C0 BC0B B923 1D21 F752 1EC8 F485 90D0 Ofek Lev C295 CF63 B355 DFEB 3316 02F7 F426 A944 35BE 6F99 D009 8861 8057 D2F4 D855 5A62 B472 442C B7D3 AF42 Florimond Manca B023 B02A 0331 9CD8 D19A 4328 83ED 89A4 5548 48FC 0992 11D9 AA67 D21E 7098 7B59 7C7D CB06 C9F2 0C13 Greg Marabout Demazure 01CC 90D7 F047 93D4 30DF 9C7B 825B 84BD 1EE8 E57C C719 8925 CAE5 11DE 7FC2 EB15 A9B3 5A96 7570 B459 Julia Simon 4A54 09A2 3361 109C 047C C76A DC8A 42C2 8B95 0123 129A 26CF A726 3C85 98A6 94B0 8659 1366 CBA1 BF3C Florian Veaux 3109 1C85 5D78 7789 93E5 0348 9BFE 5299 D02F 83E9 7A73 0C5E 48B0 6986 1045 CF8B 8B2D 16D6 5DE4 C95E Alexandre Yang FBC6 3AE0 9D0C A9B4 584C 9D7F 4291 A11A 36EA 52CD F8D9 181D 9309 F8A4 957D 636A 27F8 F48B 18AE 91AA","title":"Integration release"},{"location":"process/integration-release/#integration-release","text":"Each Agent integration has its own release cycle. Many integrations are actively developed and released often while some are rarely touched (usually indicating feature-completeness).","title":"Integration release"},{"location":"process/integration-release/#versioning","text":"All releases adhere to Semantic Versioning . Tags in the form - are added to the Git repository. Therefore, it's possible to checkout and build the code for a certain version of a specific check.","title":"Versioning"},{"location":"process/integration-release/#setup","text":"Configure your GitHub auth.","title":"Setup"},{"location":"process/integration-release/#identify-changes","text":"Note If you already know which integration you'd like to release, skip this section. To see all checks that need to be released, run ddev release show ready .","title":"Identify changes"},{"location":"process/integration-release/#steps","text":"Checkout and pull the most recent version of the master branch. git checkout master git pull Important Not using the latest version of master may cause errors in the build pipeline . Review which PRs were merged in between the latest release and the master branch. ddev release show changes You should ensure that PR titles and changelog labels are correct. Create a release branch from master (suggested naming format is /release- ). This has the purpose of opening a PR so others can review the changelog. Important It is critical the branch name is not in the form /- because one of our Gitlab jobs is triggered whenever a Git reference matches that pattern, see !3843 & !3980 . Make the release. ddev release make You may need to touch your Yubikey multiple times. This will automatically: update the version in /datadog_checks//__about__.py update the changelog update the requirements-agent-release.txt file update in-toto metadata commit the above changes Push your branch to GitHub and create a pull request. Update the title of the PR to something like [Release] Bumped version to . Ask for a review in Slack. Merge the pull request after approval.","title":"Steps"},{"location":"process/integration-release/#pypi","text":"If you released datadog_checks_base or datadog_checks_dev then these must be uploaded to PyPI for use by integrations-extras . This is automatically handled by two GitHub Action jobs: release-base.yml and release-dev.yml . In case you need to do it manually: ddev release upload datadog_checks_[base|dev]","title":"PyPI"},{"location":"process/integration-release/#metadata","text":"You need to run certain jobs if any changes modified integration metadata. See the Declarative Integration Pipeline wiki.","title":"Metadata"},{"location":"process/integration-release/#bulk-releases","text":"To create a release for every integration that has changed, use all as the integration name in the ddev release make step above. ddev release make all You may also pass a comma-separated list of checks to skip using the --exclude option, e.g.: ddev release make all --exclude datadog_checks_dev Warning There is a known GitHub limitation where if an issue has too many labels (100), its state cannot be modified. If you cannot merge the pull request: Run the remove-labels command After merging, manually add back the changelog/no-changelog label","title":"Bulk releases"},{"location":"process/integration-release/#betas","text":"Creating pre-releases is the same workflow except you do not open a pull request but rather release directly from a branch. In the ddev release make step set --version to [major|minor|patch],[rc|alpha|beta] . For example, if the current version of an integration is 1.1.3 , the following command will bump it to 1.2.0-rc.1 : ddev release make --version minor,rc After pushing the release commits to GitHub, run: ddev release tag This manually triggers the build pipeline . To increment the version, omit the first part, e.g.: ddev release make --version rc","title":"Betas"},{"location":"process/integration-release/#new-integrations","text":"To bump a new integration to 1.0.0 if it is not already there, run: ddev release make --new To ensure this for all integrations, run: ddev release make all --new If a release was created, run: ddev agent requirements","title":"New integrations"},{"location":"process/integration-release/#troubleshooting","text":"If you encounter errors when signing with your Yubikey, ensure you ran gpg --import .gpg.pub . If the build pipeline failed, it is likely that you modified a file in the pull request without re-signing. To resolve this, you'll need to bootstrap metadata for every integration: Checkout and pull the most recent version of the master branch. git checkout master git pull Sign everything. ddev release make all --sign-only You may need to touch your Yubikey multiple times. Push your branch to GitHub. Manually trigger a build. git tag bootstrap-1.0.0 -m bootstrap-1.0.0 The tag name is irrelevant, it just needs to look like an integration release. Gitlab doesn't sync deleted tags, so any subsequent manual trigger tags will need to increment the version number. Delete the branch and tag, locally and on GitHub.","title":"Troubleshooting"},{"location":"process/integration-release/#releasers","text":"For whom it may concern, the following is a list of GPG public key fingerprints known to correspond to developers who, at the time of writing (28-02-2020), can trigger a build by signing in-toto metadata . Christine Chen 57CE 2495 EA48 D456 B9C4 BA4F 66E8 2239 9141 D9D3 36C0 82E7 38C7 B4A1 E169 11C0 D633 59C4 875A 1A9A Paul Coignet 024E 42FE 76AD F19F 5D57 7503 07E5 2EA3 88E4 08FD 1286 0553 D1DC 93A7 2CD1 6956 2D98 DCE7 FBFF C9C2 Dave Coleman 8278 C406 C1BB F1F2 DFBB 5AD6 0AE7 E246 4F8F D375 98A5 37CD CCA2 8DFF B35B 0551 5D50 0742 90F6 422F Paola Ducolin EAC5 F27E C6B1 A814 1222 1942 C4E1 549E 937E F5A2 A40A DD71 41EB C767 BBFB E0B8 9128 2E2F E536 C858 Mike Garabedian F90C 0097 67F2 4B27 9DC2 C83D A227 6601 6CB4 CF1D 2669 6E67 28D2 0CB0 C1E0 D2BE 6643 5756 8398 9306 Thomas Herv\u00e9 59DB 2532 75A5 BD4E 55C7 C5AA 0678 55A2 8E90 3B3B E2BD 994F 95C0 BC0B B923 1D21 F752 1EC8 F485 90D0 Ofek Lev C295 CF63 B355 DFEB 3316 02F7 F426 A944 35BE 6F99 D009 8861 8057 D2F4 D855 5A62 B472 442C B7D3 AF42 Florimond Manca B023 B02A 0331 9CD8 D19A 4328 83ED 89A4 5548 48FC 0992 11D9 AA67 D21E 7098 7B59 7C7D CB06 C9F2 0C13 Greg Marabout Demazure 01CC 90D7 F047 93D4 30DF 9C7B 825B 84BD 1EE8 E57C C719 8925 CAE5 11DE 7FC2 EB15 A9B3 5A96 7570 B459 Julia Simon 4A54 09A2 3361 109C 047C C76A DC8A 42C2 8B95 0123 129A 26CF A726 3C85 98A6 94B0 8659 1366 CBA1 BF3C Florian Veaux 3109 1C85 5D78 7789 93E5 0348 9BFE 5299 D02F 83E9 7A73 0C5E 48B0 6986 1045 CF8B 8B2D 16D6 5DE4 C95E Alexandre Yang FBC6 3AE0 9D0C A9B4 584C 9D7F 4291 A11A 36EA 52CD F8D9 181D 9309 F8A4 957D 636A 27F8 F48B 18AE 91AA","title":"Releasers"},{"location":"process/agent-release/post-release/","text":"Post release \u00b6 Finalize \u00b6 On the day of the final stable release, tag the branch with ..0 . After the main Agent release manager confirms successful deployment to a few targets, create a branch based on master and run: ddev agent changelog ddev agent integrations See more options for ddev agent changelog and ddev agent integrations . Run the following commands to update the contents: ddev agent changelog -w -f to update the existing AGENT_CHANGELOG file ddev agent integrations -w -f to update the existing AGENT_INTEGRATIONS file. ddev agent integrations-changelog -w to add Agent version to existing CHANGELOG.md releases of integrations. Create a pull request and wait for approval before merging. Patches \u00b6 Important Only critical fixes are included in patches. See definition for critical fixes . Releases after the final Agent release should be reserved for critical issues only. Cherry-picking commits and releases for the patch release is mostly similar to the process for preparing release candidates . However, it's possible that from the time code freeze ended and a bugfix is needed, the integration has other non-critical commits or was released. The next section will describe the process for preparing the patch release candidates. Multiple check releases between bugfix release \u00b6 Given the effort of QA-ing the Agent release, any new changes should be carefully selected and included for the patch. Follow the following steps to add patch release: Cherry-pick the bugfix commit to the release branch . Release the integration on the release branch. Make a pull request with integration release , then merge it to the release branch. Important Remember to trigger the release pipeline and build the wheel. You can do so by tagging the release : `ddev release tag ` Note: only release PRs merged to master automatically build a wheel. Then pull the latest release branch so your branch has both the bugfix commit and release commit. Tag the branch with the new bumped version ..-rc.1 . When the patch release is ready, follow the same steps to finalize the release . Also manually update the changelog of the integrations that were released on the release branch, see example .","title":"Post release"},{"location":"process/agent-release/post-release/#post-release","text":"","title":"Post release"},{"location":"process/agent-release/post-release/#finalize","text":"On the day of the final stable release, tag the branch with ..0 . After the main Agent release manager confirms successful deployment to a few targets, create a branch based on master and run: ddev agent changelog ddev agent integrations See more options for ddev agent changelog and ddev agent integrations . Run the following commands to update the contents: ddev agent changelog -w -f to update the existing AGENT_CHANGELOG file ddev agent integrations -w -f to update the existing AGENT_INTEGRATIONS file. ddev agent integrations-changelog -w to add Agent version to existing CHANGELOG.md releases of integrations. Create a pull request and wait for approval before merging.","title":"Finalize"},{"location":"process/agent-release/post-release/#patches","text":"Important Only critical fixes are included in patches. See definition for critical fixes . Releases after the final Agent release should be reserved for critical issues only. Cherry-picking commits and releases for the patch release is mostly similar to the process for preparing release candidates . However, it's possible that from the time code freeze ended and a bugfix is needed, the integration has other non-critical commits or was released. The next section will describe the process for preparing the patch release candidates.","title":"Patches"},{"location":"process/agent-release/post-release/#multiple-check-releases-between-bugfix-release","text":"Given the effort of QA-ing the Agent release, any new changes should be carefully selected and included for the patch. Follow the following steps to add patch release: Cherry-pick the bugfix commit to the release branch . Release the integration on the release branch. Make a pull request with integration release , then merge it to the release branch. Important Remember to trigger the release pipeline and build the wheel. You can do so by tagging the release : `ddev release tag ` Note: only release PRs merged to master automatically build a wheel. Then pull the latest release branch so your branch has both the bugfix commit and release commit. Tag the branch with the new bumped version ..-rc.1 . When the patch release is ready, follow the same steps to finalize the release . Also manually update the changelog of the integrations that were released on the release branch, see example .","title":"Multiple check releases between bugfix release"},{"location":"process/agent-release/pre-release/","text":"Pre release \u00b6 A new minor version of the Agent is released every 6 weeks (approximately). Each release ships a snapshot of integrations-core . Setup \u00b6 Ensure that you have configured the following: GitHub credentials Trello credentials Trello team mappings Before Freeze \u00b6 Update style dependencies to latest versions (except if comments say otherwise) via PR. Example: ISORT_DEP , BLACK_DEP , etc. Check that the master , py2 and base_check builds are green. Freeze \u00b6 At midnight (EDT/EST) on the Friday before QA week we freeze, at which point the release manager will release all integrations with pending changes then branch off. Release \u00b6 Make a pull request to release any new integrations , then merge it and pull master Make a pull request to release all changed integrations , then merge it and pull master Get 2+ thorough reviews on the changelogs. Entries should have appropriate SemVer levels (e.g. Changed entries must refer to breaking changes only). See also PR guidelines . Consider x-posting the PR to Agent teams that have integrations in integrations-core , so they can check relevant changelogs too. Important Update PyPI if you released datadog_checks_base or datadog_checks_dev . Branch \u00b6 Create a branch based on master named after the highest version of the Agent being released in the form ..x Push the branch to GitHub Tag \u00b6 Run: git tag ..0-rc.1 -m ..0-rc.1 git push origin ..0-rc.1 QA week \u00b6 We test all changes to integrations that were introduced since the last release. Create items \u00b6 Create an item for every change in our board using the Trello subcommand called testable . For example: ddev release trello testable 7.17.1 7.18.0-rc.1 or if the tag is not ready yet: ddev release trello testable 7.17.1 origin/master would select all commits that were merged between the Git references. The command will display each change and prompt you to assign a team or skip. Purely documentation changes are automatically skipped. Cards are automatically assigned if $trello_users_$team table is configured . Release candidates \u00b6 The main Agent release manager will increment and build a new rc every day a bug fix needs to be tested until all QA is complete. Before each build is triggered: Merge any fixes that have been approved, then pull master Release all changed integrations with the exception of datadog_checks_dev For each fix merged, you must cherry-pick to the branch : The commit to master itself The release commit, so the shipped versions match the individually released integrations After all fixes have been cherry-picked: Push the changes to GitHub Tag with the appropriate rc number even if there were no changes Communication \u00b6 The Agent Release Manager will post a daily status for the entire release cycle. Reply in the thread with any pending PRs meant for the next RC and update the spreadsheet PRs included in Agent RCs . Logs \u00b6 Each release candidate is deployed in a staging environment. We observe the WARN or ERROR level logs filtered with the facets Service:datadog-agent and index:main and LogMessage to see if any unexpected or frequent errors start occurring that was not caught during QA. Release week \u00b6 After QA week ends the code freeze is lifted, even if there are items yet to be tested. The release manager will continue the same process outlined above. Notify the Agent Release Manager when code freeze ends.","title":"Pre release"},{"location":"process/agent-release/pre-release/#pre-release","text":"A new minor version of the Agent is released every 6 weeks (approximately). Each release ships a snapshot of integrations-core .","title":"Pre release"},{"location":"process/agent-release/pre-release/#setup","text":"Ensure that you have configured the following: GitHub credentials Trello credentials Trello team mappings","title":"Setup"},{"location":"process/agent-release/pre-release/#before-freeze","text":"Update style dependencies to latest versions (except if comments say otherwise) via PR. Example: ISORT_DEP , BLACK_DEP , etc. Check that the master , py2 and base_check builds are green.","title":"Before Freeze"},{"location":"process/agent-release/pre-release/#freeze","text":"At midnight (EDT/EST) on the Friday before QA week we freeze, at which point the release manager will release all integrations with pending changes then branch off.","title":"Freeze"},{"location":"process/agent-release/pre-release/#release","text":"Make a pull request to release any new integrations , then merge it and pull master Make a pull request to release all changed integrations , then merge it and pull master Get 2+ thorough reviews on the changelogs. Entries should have appropriate SemVer levels (e.g. Changed entries must refer to breaking changes only). See also PR guidelines . Consider x-posting the PR to Agent teams that have integrations in integrations-core , so they can check relevant changelogs too. Important Update PyPI if you released datadog_checks_base or datadog_checks_dev .","title":"Release"},{"location":"process/agent-release/pre-release/#branch","text":"Create a branch based on master named after the highest version of the Agent being released in the form ..x Push the branch to GitHub","title":"Branch"},{"location":"process/agent-release/pre-release/#tag","text":"Run: git tag ..0-rc.1 -m ..0-rc.1 git push origin ..0-rc.1","title":"Tag"},{"location":"process/agent-release/pre-release/#qa-week","text":"We test all changes to integrations that were introduced since the last release.","title":"QA week"},{"location":"process/agent-release/pre-release/#create-items","text":"Create an item for every change in our board using the Trello subcommand called testable . For example: ddev release trello testable 7.17.1 7.18.0-rc.1 or if the tag is not ready yet: ddev release trello testable 7.17.1 origin/master would select all commits that were merged between the Git references. The command will display each change and prompt you to assign a team or skip. Purely documentation changes are automatically skipped. Cards are automatically assigned if $trello_users_$team table is configured .","title":"Create items"},{"location":"process/agent-release/pre-release/#release-candidates","text":"The main Agent release manager will increment and build a new rc every day a bug fix needs to be tested until all QA is complete. Before each build is triggered: Merge any fixes that have been approved, then pull master Release all changed integrations with the exception of datadog_checks_dev For each fix merged, you must cherry-pick to the branch : The commit to master itself The release commit, so the shipped versions match the individually released integrations After all fixes have been cherry-picked: Push the changes to GitHub Tag with the appropriate rc number even if there were no changes","title":"Release candidates"},{"location":"process/agent-release/pre-release/#communication","text":"The Agent Release Manager will post a daily status for the entire release cycle. Reply in the thread with any pending PRs meant for the next RC and update the spreadsheet PRs included in Agent RCs .","title":"Communication"},{"location":"process/agent-release/pre-release/#logs","text":"Each release candidate is deployed in a staging environment. We observe the WARN or ERROR level logs filtered with the facets Service:datadog-agent and index:main and LogMessage to see if any unexpected or frequent errors start occurring that was not caught during QA.","title":"Logs"},{"location":"process/agent-release/pre-release/#release-week","text":"After QA week ends the code freeze is lifted, even if there are items yet to be tested. The release manager will continue the same process outlined above. Notify the Agent Release Manager when code freeze ends.","title":"Release week"},{"location":"tutorials/memory-profiling/","text":"Memory profiling \u00b6","title":"Memory profiling"},{"location":"tutorials/memory-profiling/#memory-profiling","text":"","title":"Memory profiling"},{"location":"tutorials/jmx/integration/","text":"JMX integration \u00b6 Tutorial for starting a JMX integration Step 1: Create a JMX integration scaffolding \u00b6 ddev create --type jmx MyJMXIntegration JMX integration contains specific init configs and instance configs: init_config : is_jmx : true # tells the Agent that the integration is a JMX type of integration collect_default_metrics : true # if true, metrics declared in `metrics.yaml` are collected instances : - host : # JMX hostname port : # JMX port ... Other init and instance configs can be found on JMX integration page Step 2: Define metrics you want to collect \u00b6 Select what metrics you want to collect from JMX. Available metrics can be usually found on official documentation of the service you want to monitor. You can also use tools like VisualVM , JConsole or jmxterm to explore the available JMX beans and their descriptions. Step 3: Define metrics filters \u00b6 Edit the metrics.yaml to define the filters for collecting metrics. The metrics filters format details can be found on JMX integration doc JMXFetch test cases also help understanding how metrics filters work and provide many examples. Example of metrics.yaml jmx_metrics : - include : domain : org.apache.activemq destinationType : Queue attribute : AverageEnqueueTime : alias : activemq.queue.avg_enqueue_time metric_type : gauge ConsumerCount : alias : activemq.queue.consumer_count metric_type : gauge Testing \u00b6 Using ddev tool , you can test against the JMX service by providing a dd_environment in tests/conftest.py like this one: @pytest . fixture ( scope = \"session\" ) def dd_environment (): compose_file = os . path . join ( HERE , 'compose' , 'docker-compose.yaml' ) with docker_run ( compose_file , conditions = [ # Kafka Broker CheckDockerLogs ( 'broker' , 'Monitored service is now ready' ), ], ): yield CHECK_CONFIG , { 'use_jmx' : True } And a e2e test like: @pytest . mark . e2e def test ( dd_agent_check ): instance = {} aggregator = dd_agent_check ( instance ) for metric in ACTIVEMQ_E2E_METRICS + JVM_E2E_METRICS : aggregator . assert_metric ( metric ) aggregator . assert_all_metrics_covered () aggregator . assert_metrics_using_metadata ( get_metadata_metrics (), exclude = JVM_E2E_METRICS ) Real examples of: JMX dd_environment JMX e2e test","title":"JMX integration"},{"location":"tutorials/jmx/integration/#jmx-integration","text":"Tutorial for starting a JMX integration","title":"JMX integration"},{"location":"tutorials/jmx/integration/#step-1-create-a-jmx-integration-scaffolding","text":"ddev create --type jmx MyJMXIntegration JMX integration contains specific init configs and instance configs: init_config : is_jmx : true # tells the Agent that the integration is a JMX type of integration collect_default_metrics : true # if true, metrics declared in `metrics.yaml` are collected instances : - host : # JMX hostname port : # JMX port ... Other init and instance configs can be found on JMX integration page","title":"Step 1: Create a JMX integration scaffolding"},{"location":"tutorials/jmx/integration/#step-2-define-metrics-you-want-to-collect","text":"Select what metrics you want to collect from JMX. Available metrics can be usually found on official documentation of the service you want to monitor. You can also use tools like VisualVM , JConsole or jmxterm to explore the available JMX beans and their descriptions.","title":"Step 2: Define metrics you want to collect"},{"location":"tutorials/jmx/integration/#step-3-define-metrics-filters","text":"Edit the metrics.yaml to define the filters for collecting metrics. The metrics filters format details can be found on JMX integration doc JMXFetch test cases also help understanding how metrics filters work and provide many examples. Example of metrics.yaml jmx_metrics : - include : domain : org.apache.activemq destinationType : Queue attribute : AverageEnqueueTime : alias : activemq.queue.avg_enqueue_time metric_type : gauge ConsumerCount : alias : activemq.queue.consumer_count metric_type : gauge","title":"Step 3: Define metrics filters"},{"location":"tutorials/jmx/integration/#testing","text":"Using ddev tool , you can test against the JMX service by providing a dd_environment in tests/conftest.py like this one: @pytest . fixture ( scope = \"session\" ) def dd_environment (): compose_file = os . path . join ( HERE , 'compose' , 'docker-compose.yaml' ) with docker_run ( compose_file , conditions = [ # Kafka Broker CheckDockerLogs ( 'broker' , 'Monitored service is now ready' ), ], ): yield CHECK_CONFIG , { 'use_jmx' : True } And a e2e test like: @pytest . mark . e2e def test ( dd_agent_check ): instance = {} aggregator = dd_agent_check ( instance ) for metric in ACTIVEMQ_E2E_METRICS + JVM_E2E_METRICS : aggregator . assert_metric ( metric ) aggregator . assert_all_metrics_covered () aggregator . assert_metrics_using_metadata ( get_metadata_metrics (), exclude = JVM_E2E_METRICS ) Real examples of: JMX dd_environment JMX e2e test","title":"Testing"},{"location":"tutorials/jmx/tools/","text":"JMX Tools \u00b6 List JMX beans using JMXTerm \u00b6 curl -L https://github.com/jiaqi/jmxterm/releases/download/v1.0.1/jmxterm-1.0.1-uber.jar -o /tmp/jmxterm-1.0.1-uber.jar java -jar /tmp/jmxterm-1.0.1-uber.jar -l localhost: domains beans Example output: $ curl -L https://github.com/jiaqi/jmxterm/releases/download/v1.0.1/jmxterm-1.0.1-uber.jar -o /tmp/jmxterm-1.0.1-uber.jar $ java -jar /tmp/jmxterm-1.0.1-uber.jar -l localhost:1616 Welcome to JMX terminal. Type \"help\" for available commands. $>domains #following domains are available JMImplementation com.sun.management io.fabric8.insight java.lang java.nio java.util.logging jmx4perl jolokia org.apache.activemq $>beans #domain = JMImplementation: JMImplementation:type=MBeanServerDelegate #domain = com.sun.management: com.sun.management:type=DiagnosticCommand com.sun.management:type=HotSpotDiagnostic #domain = io.fabric8.insight: io.fabric8.insight:type=LogQuery #domain = java.lang: java.lang:name=Code Cache,type=MemoryPool java.lang:name=CodeCacheManager,type=MemoryManager java.lang:name=Compressed Class Space,type=MemoryPool java.lang:name=Metaspace Manager,type=MemoryManager java.lang:name=Metaspace,type=MemoryPool java.lang:name=PS Eden Space,type=MemoryPool java.lang:name=PS MarkSweep,type=GarbageCollector java.lang:name=PS Old Gen,type=MemoryPool java.lang:name=PS Scavenge,type=GarbageCollector java.lang:name=PS Survivor Space,type=MemoryPool java.lang:type=ClassLoading java.lang:type=Compilation java.lang:type=Memory java.lang:type=OperatingSystem java.lang:type=Runtime java.lang:type=Threading [...] List JMX beans using JMXTerm with extra jars \u00b6 In the example below, the extra jar is jboss-client.jar . curl -L https://github.com/jiaqi/jmxterm/releases/download/v1.0.1/jmxterm-1.0.1-uber.jar -o /tmp/jmxterm-1.0.1-uber.jar java -cp /wildfly-17.0.1.Final/bin/client/jboss-client.jar:/tmp/jmxterm-1.0.1-uber.jar org.cyclopsgroup.jmxterm.boot.CliMain --url service:jmx:remote+http://localhost:9990 -u datadog -p pa$$word domains beans","title":"JMX Tools"},{"location":"tutorials/jmx/tools/#jmx-tools","text":"","title":"JMX Tools"},{"location":"tutorials/jmx/tools/#list-jmx-beans-using-jmxterm","text":"curl -L https://github.com/jiaqi/jmxterm/releases/download/v1.0.1/jmxterm-1.0.1-uber.jar -o /tmp/jmxterm-1.0.1-uber.jar java -jar /tmp/jmxterm-1.0.1-uber.jar -l localhost: domains beans Example output: $ curl -L https://github.com/jiaqi/jmxterm/releases/download/v1.0.1/jmxterm-1.0.1-uber.jar -o /tmp/jmxterm-1.0.1-uber.jar $ java -jar /tmp/jmxterm-1.0.1-uber.jar -l localhost:1616 Welcome to JMX terminal. Type \"help\" for available commands. $>domains #following domains are available JMImplementation com.sun.management io.fabric8.insight java.lang java.nio java.util.logging jmx4perl jolokia org.apache.activemq $>beans #domain = JMImplementation: JMImplementation:type=MBeanServerDelegate #domain = com.sun.management: com.sun.management:type=DiagnosticCommand com.sun.management:type=HotSpotDiagnostic #domain = io.fabric8.insight: io.fabric8.insight:type=LogQuery #domain = java.lang: java.lang:name=Code Cache,type=MemoryPool java.lang:name=CodeCacheManager,type=MemoryManager java.lang:name=Compressed Class Space,type=MemoryPool java.lang:name=Metaspace Manager,type=MemoryManager java.lang:name=Metaspace,type=MemoryPool java.lang:name=PS Eden Space,type=MemoryPool java.lang:name=PS MarkSweep,type=GarbageCollector java.lang:name=PS Old Gen,type=MemoryPool java.lang:name=PS Scavenge,type=GarbageCollector java.lang:name=PS Survivor Space,type=MemoryPool java.lang:type=ClassLoading java.lang:type=Compilation java.lang:type=Memory java.lang:type=OperatingSystem java.lang:type=Runtime java.lang:type=Threading [...]","title":"List JMX beans using JMXTerm"},{"location":"tutorials/jmx/tools/#list-jmx-beans-using-jmxterm-with-extra-jars","text":"In the example below, the extra jar is jboss-client.jar . curl -L https://github.com/jiaqi/jmxterm/releases/download/v1.0.1/jmxterm-1.0.1-uber.jar -o /tmp/jmxterm-1.0.1-uber.jar java -cp /wildfly-17.0.1.Final/bin/client/jboss-client.jar:/tmp/jmxterm-1.0.1-uber.jar org.cyclopsgroup.jmxterm.boot.CliMain --url service:jmx:remote+http://localhost:9990 -u datadog -p pa$$word domains beans","title":"List JMX beans using JMXTerm with extra jars"},{"location":"tutorials/snmp/how-to/","text":"SNMP How-To \u00b6 Simulate SNMP devices \u00b6 SNMP is a protocol for gathering metrics from network devices, but automated testing of the integration would not be practical nor reliable if we used actual devices. Our approach is to use a simulated SNMP device that responds to SNMP queries using simulation data . This simulated device is brought up as a Docker container when starting the SNMP test environment using: ddev env start snmp [ ... ] Test SNMP profiles locally \u00b6 Once the environment is up and running, you can modify the instance configuration to test profiles that support simulated metrics. The following is an example of an instance configured to use the Cisco Nexus profile. init_config : profiles : cisco_nexus : definition_file : cisco-nexus.yaml instances : - community_string : cisco_nexus # (1.) ip_address : # (2.) profile : cisco_nexus name : localhost port : 1161 The community_string must match the corresponding device .snmprec file name. For example, myprofile.snmprec gives community_string: myprofile . This also applies to walk files : myprofile.snmpwalk gives community_string: myprofile . To find the IP address of the SNMP container, run: docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dd-snmp Run SNMP queries \u00b6 With the test environment is up and running, we can issue SNMP queries to the simulated device using a command line SNMP client. Prerequisites \u00b6 Make sure you have the Net-SNMP tools installed on your machine. These should come pre-installed by default on Linux and macOS. If necessary, you can download them on the Net-SNMP website . Available commands \u00b6 The Net-SNMP tools provide a number of commands to interact with SNMP devices. The most commonly used commands are: snmpget : to issue an SNMP GET query. snmpgetnext : to issue an SNMP GETNEXT query. snmpwalk : to query an entire OID sub-tree at once. snmptable : to query rows in an SNMP table. Examples \u00b6 GET query \u00b6 To query a specific OID from a device, we can use the snmpget command. For example, the following command will query sysDescr OID of an SNMP device, which returns its human-readable description: $ snmpget -v 2c -c public -IR 127 .0.0.1:1161 system.sysDescr.0 SNMPv2-MIB::sysDescr.0 = STRING: Linux 41ba948911b9 4.9.87-linuxkit-aufs #1 SMP Wed Mar 14 15:12:16 UTC 2018 x86_64 SNMPv2-MIB::sysORUpTime.1 = Timeticks: (9) 0:00:00.09 Let's break this command down: snmpget : this command sends an SNMP GET request, and can be used to query the value of an OID. Here, we are requesting the system.sysDescr.0 OID. -v 2c : instructs your SNMP client to send the request using SNMP version 2c. See SNMP Versions . -c public : instructs the SNMP client to send the community string public along with our request. (This is a form of authentication provided by SNMP v2. See SNMP Versions .) 127.0.0.1:1161 : this is the host and port where the simulated SNMP agent is available at. (Confirm the port used by the ddev environment by inspecting the Docker port mapping via $ docker ps .) system.sysDescr.0 : this is the OID that the client should request. In practice this can refer to either a fully-resolved OID (e.g. 1.3.6.1.4.1[...] ), or a label (e.g. sysDescr.0 ). -IR : this option allows us to use labels for OIDs that aren't in the generic 1.3.6.1.2.1.* sub-tree (see: The OID tree ). TL;DR: always use this option when working with OIDs coming from vendor-specific MIBs. Tip If the above command fails, try using the explicit OID like so: $ snmpget -v 2c -c public -IR 127 .0.0.1:1161 iso.3.6.1.2.1.1.1.0 Table query \u00b6 For tables, use the snmptable command, which will output the rows in the table in a tabular format. Its arguments and options are similar to snmpget . $ snmptable -v 2c -c public -IR -Os 127 .0.0.1:1161 hrStorageTable SNMP table: hrStorageTable hrStorageIndex hrStorageType hrStorageDescr hrStorageAllocationUnits hrStorageSize hrStorageUsed hrStorageAllocationFailures 1 hrStorageRam Physical memory 1024 Bytes 2046940 1969964 ? 3 hrStorageVirtualMemory Virtual memory 1024 Bytes 3095512 1969964 ? 6 hrStorageOther Memory buffers 1024 Bytes 2046940 73580 ? 7 hrStorageOther Cached memory 1024 Bytes 1577648 1577648 ? 8 hrStorageOther Shared memory 1024 Bytes 2940 2940 ? 10 hrStorageVirtualMemory Swap space 1024 Bytes 1048572 0 ? 33 hrStorageFixedDisk /dev 4096 Bytes 16384 0 ? 36 hrStorageFixedDisk /sys/fs/cgroup 4096 Bytes 255867 0 ? 52 hrStorageFixedDisk /etc/resolv.conf 4096 Bytes 16448139 6493059 ? 53 hrStorageFixedDisk /etc/hostname 4096 Bytes 16448139 6493059 ? 54 hrStorageFixedDisk /etc/hosts 4096 Bytes 16448139 6493059 ? 55 hrStorageFixedDisk /dev/shm 4096 Bytes 16384 0 ? 61 hrStorageFixedDisk /proc/kcore 4096 Bytes 16384 0 ? 62 hrStorageFixedDisk /proc/keys 4096 Bytes 16384 0 ? 63 hrStorageFixedDisk /proc/timer_list 4096 Bytes 16384 0 ? 64 hrStorageFixedDisk /proc/sched_debug 4096 Bytes 16384 0 ? 65 hrStorageFixedDisk /sys/firmware 4096 Bytes 255867 0 ? (In this case, we added the -Os option which prints only the last symbolic element and reduces the output of hrStorageTypes .) Walk query \u00b6 A walk query can be used to query all OIDs in a given sub-tree . The snmpwalk command can be used to perform a walk query. To facilitate usage of walk files for debugging, the following options are recommended: -ObentU . Here's what each option does: b : do not break OID indexes down. e : print enums numerically (for example, 24 instead of softwareLoopback(24) ). n : print OIDs numerically (for example, .1.3.6.1.2.1.2.2.1.1.1 instead of IF-MIB::ifIndex.1 ). t : print timeticks numerically (for example, 4226041 instead of Timeticks: (4226041) 11:44:20.41 ). U : don't print units. For example, the following command gets a walk of the 1.3.6.1.2.1.1 ( system ) sub-tree: $ snmpwalk -v 2c -c public -ObentU 127 .0.0.1:1161 1 .3.6.1.2.1.1 .1.3.6.1.2.1.1.1.0 = STRING: Linux 41ba948911b9 4.9.87-linuxkit-aufs #1 SMP Wed Mar 14 15:12:16 UTC 2018 x86_64 .1.3.6.1.2.1.1.2.0 = OID: .1.3.6.1.4.1.8072.3.2.10 .1.3.6.1.2.1.1.3.0 = 4226041 .1.3.6.1.2.1.1.4.0 = STRING: root@localhost .1.3.6.1.2.1.1.5.0 = STRING: 41ba948911b9 .1.3.6.1.2.1.1.6.0 = STRING: Unknown .1.3.6.1.2.1.1.8.0 = 9 .1.3.6.1.2.1.1.9.1.2.1 = OID: .1.3.6.1.6.3.11.3.1.1 .1.3.6.1.2.1.1.9.1.2.2 = OID: .1.3.6.1.6.3.15.2.1.1 .1.3.6.1.2.1.1.9.1.2.3 = OID: .1.3.6.1.6.3.10.3.1.1 .1.3.6.1.2.1.1.9.1.2.4 = OID: .1.3.6.1.6.3.1 .1.3.6.1.2.1.1.9.1.2.5 = OID: .1.3.6.1.2.1.49 .1.3.6.1.2.1.1.9.1.2.6 = OID: .1.3.6.1.2.1.4 .1.3.6.1.2.1.1.9.1.2.7 = OID: .1.3.6.1.2.1.50 .1.3.6.1.2.1.1.9.1.2.8 = OID: .1.3.6.1.6.3.16.2.2.1 .1.3.6.1.2.1.1.9.1.2.9 = OID: .1.3.6.1.6.3.13.3.1.3 .1.3.6.1.2.1.1.9.1.2.10 = OID: .1.3.6.1.2.1.92 .1.3.6.1.2.1.1.9.1.3.1 = STRING: The MIB for Message Processing and Dispatching. .1.3.6.1.2.1.1.9.1.3.2 = STRING: The management information definitions for the SNMP User-based Security Model. .1.3.6.1.2.1.1.9.1.3.3 = STRING: The SNMP Management Architecture MIB. .1.3.6.1.2.1.1.9.1.3.4 = STRING: The MIB module for SNMPv2 entities .1.3.6.1.2.1.1.9.1.3.5 = STRING: The MIB module for managing TCP implementations .1.3.6.1.2.1.1.9.1.3.6 = STRING: The MIB module for managing IP and ICMP implementations .1.3.6.1.2.1.1.9.1.3.7 = STRING: The MIB module for managing UDP implementations .1.3.6.1.2.1.1.9.1.3.8 = STRING: View-based Access Control Model for SNMP. .1.3.6.1.2.1.1.9.1.3.9 = STRING: The MIB modules for managing SNMP Notification, plus filtering. .1.3.6.1.2.1.1.9.1.3.10 = STRING: The MIB module for logging SNMP Notifications. .1.3.6.1.2.1.1.9.1.4.1 = 9 .1.3.6.1.2.1.1.9.1.4.2 = 9 .1.3.6.1.2.1.1.9.1.4.3 = 9 .1.3.6.1.2.1.1.9.1.4.4 = 9 .1.3.6.1.2.1.1.9.1.4.5 = 9 .1.3.6.1.2.1.1.9.1.4.6 = 9 .1.3.6.1.2.1.1.9.1.4.7 = 9 .1.3.6.1.2.1.1.9.1.4.8 = 9 .1.3.6.1.2.1.1.9.1.4.9 = 9 .1.3.6.1.2.1.1.9.1.4.10 = 9 As you can see, all OIDs that the device has available in the .1.3.6.1.2.1.1.* sub-tree are returned. In particular, one can recognize: sysObjectID ( .1.3.6.1.2.1.1.2.0 = OID: .1.3.6.1.4.1.8072.3.2.10 ) sysUpTime ( .1.3.6.1.2.1.1.3.0 = 4226041 ) sysName ( .1.3.6.1.2.1.1.5.0 = STRING: 41ba948911b9 ). Here is another example that queries the entire contents of ifTable (the table in IF-MIB that contains information about network interfaces): snmpwalk -v 2c -c public -OentU 127.0.0.1:1161 1.3.6.1.2.1.2.2 .1.3.6.1.2.1.2.2.1.1.1 = INTEGER: 1 .1.3.6.1.2.1.2.2.1.1.90 = INTEGER: 90 .1.3.6.1.2.1.2.2.1.2.1 = STRING: lo .1.3.6.1.2.1.2.2.1.2.90 = STRING: eth0 .1.3.6.1.2.1.2.2.1.3.1 = INTEGER: 24 .1.3.6.1.2.1.2.2.1.3.90 = INTEGER: 6 .1.3.6.1.2.1.2.2.1.4.1 = INTEGER: 65536 .1.3.6.1.2.1.2.2.1.4.90 = INTEGER: 1500 .1.3.6.1.2.1.2.2.1.5.1 = Gauge32: 10000000 .1.3.6.1.2.1.2.2.1.5.90 = Gauge32: 4294967295 .1.3.6.1.2.1.2.2.1.6.1 = STRING: .1.3.6.1.2.1.2.2.1.6.90 = STRING: 2:42:ac:11:0:2 .1.3.6.1.2.1.2.2.1.7.1 = INTEGER: 1 .1.3.6.1.2.1.2.2.1.7.90 = INTEGER: 1 .1.3.6.1.2.1.2.2.1.8.1 = INTEGER: 1 .1.3.6.1.2.1.2.2.1.8.90 = INTEGER: 1 .1.3.6.1.2.1.2.2.1.9.1 = 0 .1.3.6.1.2.1.2.2.1.9.90 = 0 .1.3.6.1.2.1.2.2.1.10.1 = Counter32: 5300203 .1.3.6.1.2.1.2.2.1.10.90 = Counter32: 2928 .1.3.6.1.2.1.2.2.1.11.1 = Counter32: 63808 .1.3.6.1.2.1.2.2.1.11.90 = Counter32: 40 .1.3.6.1.2.1.2.2.1.12.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.12.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.13.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.13.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.14.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.14.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.15.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.15.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.16.1 = Counter32: 5300203 .1.3.6.1.2.1.2.2.1.16.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.17.1 = Counter32: 63808 .1.3.6.1.2.1.2.2.1.17.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.18.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.18.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.19.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.19.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.20.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.20.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.21.1 = Gauge32: 0 .1.3.6.1.2.1.2.2.1.21.90 = Gauge32: 0 .1.3.6.1.2.1.2.2.1.22.1 = OID: .0.0 .1.3.6.1.2.1.2.2.1.22.90 = OID: .0.0 Generate table simulation data \u00b6 To generate simulation data for tables automatically, use the mib2dev.py tool shipped with snmpsim . This tool will be renamed as snmpsim-record-mibs in the upcoming 1.0 release of the library. First, install snmpsim: pip install snmpsim Then run the tool, specifying the MIB with the start and stop OIDs (which can correspond to .e.g the first and last columns in the table respectively). For example: mib2dev.py --mib-module = --start-oid = 1 .3.6.1.4.1.674.10892.1.400.20 --stop-oid = 1 .3.6.1.4.1.674.10892.1.600.12 > /path/to/mytable.snmprec The following command generates 4 rows for the IF-MIB:ifTable (1.3.6.1.2.1.2.2) : mib2dev.py --mib-module = IF-MIB --start-oid = 1 .3.6.1.2.1.2.2 --stop-oid = 1 .3.6.1.2.1.2.3 --table-size = 4 > /path/to/mytable.snmprec Known issues \u00b6 mib2dev has a known issue with IF-MIB::ifPhysAddress , that is expected to contain an hexadecimal string, but mib2dev fills it with a string. To fix this, provide a valid hextring when prompted on the command line: # Synthesizing row #1 of table 1.3.6.1.2.1.2.2.1 *** Inconsistent value: Display format eval failure: b 'driving kept zombies quaintly forward zombies' : invalid literal for int () with base 16 : 'driving kept zombies quaintly forward zombies' caused by : invalid literal for int () with base 16 : 'driving kept zombies quaintly forward zombies' *** See constraints and suggest a better one for : # Table IF-MIB::ifTable # Row IF-MIB::ifEntry # Index IF-MIB::ifIndex (type InterfaceIndex) # Column IF-MIB::ifPhysAddress (type PhysAddress) # Value ['driving kept zombies quaintly forward zombies'] ? 001122334455 Generate simulation data from a walk \u00b6 As an alternative to .snmprec files , it is possible to use a walk as simulation data . This is especially useful when debugging live devices, since you can export the device walk and use this real data locally. To do so, paste the output of a walk query into a .snmpwalk file, and add this file to the test data directory. Then, pass the name of the walk file as the community_string . For more information, see Test SNMP profiles locally . Find where MIBs are installed on your machine \u00b6 See the Using and loading MIBs Net-SNMP tutorial. Browse locally installed MIBs \u00b6 Since community resources that list MIBs and OIDs are best effort, the MIB you are investigating may not be present or may not be available in its the latest version. In that case, you can use the snmptranslate CLI tool to output similar information for MIBs installed on your system. This tool is part of Net-SNMP - see SNMP queries prerequisites . Steps Run $ snmptranslate -m -Tz -On to get a complete list of OIDs in the MIB along with their labels. Redirect to a file for nicer formatting as needed. Example: $ snmptranslate -m IF-MIB -Tz -On > out.log $ cat out.log \"org\" \"1.3\" \"dod\" \"1.3.6\" \"internet\" \"1.3.6.1\" \"directory\" \"1.3.6.1.1\" \"mgmt\" \"1.3.6.1.2\" \"mib-2\" \"1.3.6.1.2.1\" \"system\" \"1.3.6.1.2.1.1\" \"sysDescr\" \"1.3.6.1.2.1.1.1\" \"sysObjectID\" \"1.3.6.1.2.1.1.2\" \"sysUpTime\" \"1.3.6.1.2.1.1.3\" \"sysContact\" \"1.3.6.1.2.1.1.4\" \"sysName\" \"1.3.6.1.2.1.1.5\" \"sysLocation\" \"1.3.6.1.2.1.1.6\" [...] Tip Use the -M option to specify the directory where snmptranslate should look for MIBs. Useful if you want to inspect a MIB you've just downloaded but not moved to the default MIB directory. Tip Use -Tp for an alternative tree-like formatting.","title":"SNMP How-To"},{"location":"tutorials/snmp/how-to/#snmp-how-to","text":"","title":"SNMP How-To"},{"location":"tutorials/snmp/how-to/#simulate-snmp-devices","text":"SNMP is a protocol for gathering metrics from network devices, but automated testing of the integration would not be practical nor reliable if we used actual devices. Our approach is to use a simulated SNMP device that responds to SNMP queries using simulation data . This simulated device is brought up as a Docker container when starting the SNMP test environment using: ddev env start snmp [ ... ]","title":"Simulate SNMP devices"},{"location":"tutorials/snmp/how-to/#test-snmp-profiles-locally","text":"Once the environment is up and running, you can modify the instance configuration to test profiles that support simulated metrics. The following is an example of an instance configured to use the Cisco Nexus profile. init_config : profiles : cisco_nexus : definition_file : cisco-nexus.yaml instances : - community_string : cisco_nexus # (1.) ip_address : # (2.) profile : cisco_nexus name : localhost port : 1161 The community_string must match the corresponding device .snmprec file name. For example, myprofile.snmprec gives community_string: myprofile . This also applies to walk files : myprofile.snmpwalk gives community_string: myprofile . To find the IP address of the SNMP container, run: docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dd-snmp","title":"Test SNMP profiles locally"},{"location":"tutorials/snmp/how-to/#run-snmp-queries","text":"With the test environment is up and running, we can issue SNMP queries to the simulated device using a command line SNMP client.","title":"Run SNMP queries"},{"location":"tutorials/snmp/how-to/#prerequisites","text":"Make sure you have the Net-SNMP tools installed on your machine. These should come pre-installed by default on Linux and macOS. If necessary, you can download them on the Net-SNMP website .","title":"Prerequisites"},{"location":"tutorials/snmp/how-to/#available-commands","text":"The Net-SNMP tools provide a number of commands to interact with SNMP devices. The most commonly used commands are: snmpget : to issue an SNMP GET query. snmpgetnext : to issue an SNMP GETNEXT query. snmpwalk : to query an entire OID sub-tree at once. snmptable : to query rows in an SNMP table.","title":"Available commands"},{"location":"tutorials/snmp/how-to/#examples","text":"","title":"Examples"},{"location":"tutorials/snmp/how-to/#get-query","text":"To query a specific OID from a device, we can use the snmpget command. For example, the following command will query sysDescr OID of an SNMP device, which returns its human-readable description: $ snmpget -v 2c -c public -IR 127 .0.0.1:1161 system.sysDescr.0 SNMPv2-MIB::sysDescr.0 = STRING: Linux 41ba948911b9 4.9.87-linuxkit-aufs #1 SMP Wed Mar 14 15:12:16 UTC 2018 x86_64 SNMPv2-MIB::sysORUpTime.1 = Timeticks: (9) 0:00:00.09 Let's break this command down: snmpget : this command sends an SNMP GET request, and can be used to query the value of an OID. Here, we are requesting the system.sysDescr.0 OID. -v 2c : instructs your SNMP client to send the request using SNMP version 2c. See SNMP Versions . -c public : instructs the SNMP client to send the community string public along with our request. (This is a form of authentication provided by SNMP v2. See SNMP Versions .) 127.0.0.1:1161 : this is the host and port where the simulated SNMP agent is available at. (Confirm the port used by the ddev environment by inspecting the Docker port mapping via $ docker ps .) system.sysDescr.0 : this is the OID that the client should request. In practice this can refer to either a fully-resolved OID (e.g. 1.3.6.1.4.1[...] ), or a label (e.g. sysDescr.0 ). -IR : this option allows us to use labels for OIDs that aren't in the generic 1.3.6.1.2.1.* sub-tree (see: The OID tree ). TL;DR: always use this option when working with OIDs coming from vendor-specific MIBs. Tip If the above command fails, try using the explicit OID like so: $ snmpget -v 2c -c public -IR 127 .0.0.1:1161 iso.3.6.1.2.1.1.1.0","title":"GET query"},{"location":"tutorials/snmp/how-to/#table-query","text":"For tables, use the snmptable command, which will output the rows in the table in a tabular format. Its arguments and options are similar to snmpget . $ snmptable -v 2c -c public -IR -Os 127 .0.0.1:1161 hrStorageTable SNMP table: hrStorageTable hrStorageIndex hrStorageType hrStorageDescr hrStorageAllocationUnits hrStorageSize hrStorageUsed hrStorageAllocationFailures 1 hrStorageRam Physical memory 1024 Bytes 2046940 1969964 ? 3 hrStorageVirtualMemory Virtual memory 1024 Bytes 3095512 1969964 ? 6 hrStorageOther Memory buffers 1024 Bytes 2046940 73580 ? 7 hrStorageOther Cached memory 1024 Bytes 1577648 1577648 ? 8 hrStorageOther Shared memory 1024 Bytes 2940 2940 ? 10 hrStorageVirtualMemory Swap space 1024 Bytes 1048572 0 ? 33 hrStorageFixedDisk /dev 4096 Bytes 16384 0 ? 36 hrStorageFixedDisk /sys/fs/cgroup 4096 Bytes 255867 0 ? 52 hrStorageFixedDisk /etc/resolv.conf 4096 Bytes 16448139 6493059 ? 53 hrStorageFixedDisk /etc/hostname 4096 Bytes 16448139 6493059 ? 54 hrStorageFixedDisk /etc/hosts 4096 Bytes 16448139 6493059 ? 55 hrStorageFixedDisk /dev/shm 4096 Bytes 16384 0 ? 61 hrStorageFixedDisk /proc/kcore 4096 Bytes 16384 0 ? 62 hrStorageFixedDisk /proc/keys 4096 Bytes 16384 0 ? 63 hrStorageFixedDisk /proc/timer_list 4096 Bytes 16384 0 ? 64 hrStorageFixedDisk /proc/sched_debug 4096 Bytes 16384 0 ? 65 hrStorageFixedDisk /sys/firmware 4096 Bytes 255867 0 ? (In this case, we added the -Os option which prints only the last symbolic element and reduces the output of hrStorageTypes .)","title":"Table query"},{"location":"tutorials/snmp/how-to/#walk-query","text":"A walk query can be used to query all OIDs in a given sub-tree . The snmpwalk command can be used to perform a walk query. To facilitate usage of walk files for debugging, the following options are recommended: -ObentU . Here's what each option does: b : do not break OID indexes down. e : print enums numerically (for example, 24 instead of softwareLoopback(24) ). n : print OIDs numerically (for example, .1.3.6.1.2.1.2.2.1.1.1 instead of IF-MIB::ifIndex.1 ). t : print timeticks numerically (for example, 4226041 instead of Timeticks: (4226041) 11:44:20.41 ). U : don't print units. For example, the following command gets a walk of the 1.3.6.1.2.1.1 ( system ) sub-tree: $ snmpwalk -v 2c -c public -ObentU 127 .0.0.1:1161 1 .3.6.1.2.1.1 .1.3.6.1.2.1.1.1.0 = STRING: Linux 41ba948911b9 4.9.87-linuxkit-aufs #1 SMP Wed Mar 14 15:12:16 UTC 2018 x86_64 .1.3.6.1.2.1.1.2.0 = OID: .1.3.6.1.4.1.8072.3.2.10 .1.3.6.1.2.1.1.3.0 = 4226041 .1.3.6.1.2.1.1.4.0 = STRING: root@localhost .1.3.6.1.2.1.1.5.0 = STRING: 41ba948911b9 .1.3.6.1.2.1.1.6.0 = STRING: Unknown .1.3.6.1.2.1.1.8.0 = 9 .1.3.6.1.2.1.1.9.1.2.1 = OID: .1.3.6.1.6.3.11.3.1.1 .1.3.6.1.2.1.1.9.1.2.2 = OID: .1.3.6.1.6.3.15.2.1.1 .1.3.6.1.2.1.1.9.1.2.3 = OID: .1.3.6.1.6.3.10.3.1.1 .1.3.6.1.2.1.1.9.1.2.4 = OID: .1.3.6.1.6.3.1 .1.3.6.1.2.1.1.9.1.2.5 = OID: .1.3.6.1.2.1.49 .1.3.6.1.2.1.1.9.1.2.6 = OID: .1.3.6.1.2.1.4 .1.3.6.1.2.1.1.9.1.2.7 = OID: .1.3.6.1.2.1.50 .1.3.6.1.2.1.1.9.1.2.8 = OID: .1.3.6.1.6.3.16.2.2.1 .1.3.6.1.2.1.1.9.1.2.9 = OID: .1.3.6.1.6.3.13.3.1.3 .1.3.6.1.2.1.1.9.1.2.10 = OID: .1.3.6.1.2.1.92 .1.3.6.1.2.1.1.9.1.3.1 = STRING: The MIB for Message Processing and Dispatching. .1.3.6.1.2.1.1.9.1.3.2 = STRING: The management information definitions for the SNMP User-based Security Model. .1.3.6.1.2.1.1.9.1.3.3 = STRING: The SNMP Management Architecture MIB. .1.3.6.1.2.1.1.9.1.3.4 = STRING: The MIB module for SNMPv2 entities .1.3.6.1.2.1.1.9.1.3.5 = STRING: The MIB module for managing TCP implementations .1.3.6.1.2.1.1.9.1.3.6 = STRING: The MIB module for managing IP and ICMP implementations .1.3.6.1.2.1.1.9.1.3.7 = STRING: The MIB module for managing UDP implementations .1.3.6.1.2.1.1.9.1.3.8 = STRING: View-based Access Control Model for SNMP. .1.3.6.1.2.1.1.9.1.3.9 = STRING: The MIB modules for managing SNMP Notification, plus filtering. .1.3.6.1.2.1.1.9.1.3.10 = STRING: The MIB module for logging SNMP Notifications. .1.3.6.1.2.1.1.9.1.4.1 = 9 .1.3.6.1.2.1.1.9.1.4.2 = 9 .1.3.6.1.2.1.1.9.1.4.3 = 9 .1.3.6.1.2.1.1.9.1.4.4 = 9 .1.3.6.1.2.1.1.9.1.4.5 = 9 .1.3.6.1.2.1.1.9.1.4.6 = 9 .1.3.6.1.2.1.1.9.1.4.7 = 9 .1.3.6.1.2.1.1.9.1.4.8 = 9 .1.3.6.1.2.1.1.9.1.4.9 = 9 .1.3.6.1.2.1.1.9.1.4.10 = 9 As you can see, all OIDs that the device has available in the .1.3.6.1.2.1.1.* sub-tree are returned. In particular, one can recognize: sysObjectID ( .1.3.6.1.2.1.1.2.0 = OID: .1.3.6.1.4.1.8072.3.2.10 ) sysUpTime ( .1.3.6.1.2.1.1.3.0 = 4226041 ) sysName ( .1.3.6.1.2.1.1.5.0 = STRING: 41ba948911b9 ). Here is another example that queries the entire contents of ifTable (the table in IF-MIB that contains information about network interfaces): snmpwalk -v 2c -c public -OentU 127.0.0.1:1161 1.3.6.1.2.1.2.2 .1.3.6.1.2.1.2.2.1.1.1 = INTEGER: 1 .1.3.6.1.2.1.2.2.1.1.90 = INTEGER: 90 .1.3.6.1.2.1.2.2.1.2.1 = STRING: lo .1.3.6.1.2.1.2.2.1.2.90 = STRING: eth0 .1.3.6.1.2.1.2.2.1.3.1 = INTEGER: 24 .1.3.6.1.2.1.2.2.1.3.90 = INTEGER: 6 .1.3.6.1.2.1.2.2.1.4.1 = INTEGER: 65536 .1.3.6.1.2.1.2.2.1.4.90 = INTEGER: 1500 .1.3.6.1.2.1.2.2.1.5.1 = Gauge32: 10000000 .1.3.6.1.2.1.2.2.1.5.90 = Gauge32: 4294967295 .1.3.6.1.2.1.2.2.1.6.1 = STRING: .1.3.6.1.2.1.2.2.1.6.90 = STRING: 2:42:ac:11:0:2 .1.3.6.1.2.1.2.2.1.7.1 = INTEGER: 1 .1.3.6.1.2.1.2.2.1.7.90 = INTEGER: 1 .1.3.6.1.2.1.2.2.1.8.1 = INTEGER: 1 .1.3.6.1.2.1.2.2.1.8.90 = INTEGER: 1 .1.3.6.1.2.1.2.2.1.9.1 = 0 .1.3.6.1.2.1.2.2.1.9.90 = 0 .1.3.6.1.2.1.2.2.1.10.1 = Counter32: 5300203 .1.3.6.1.2.1.2.2.1.10.90 = Counter32: 2928 .1.3.6.1.2.1.2.2.1.11.1 = Counter32: 63808 .1.3.6.1.2.1.2.2.1.11.90 = Counter32: 40 .1.3.6.1.2.1.2.2.1.12.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.12.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.13.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.13.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.14.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.14.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.15.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.15.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.16.1 = Counter32: 5300203 .1.3.6.1.2.1.2.2.1.16.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.17.1 = Counter32: 63808 .1.3.6.1.2.1.2.2.1.17.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.18.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.18.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.19.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.19.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.20.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.20.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.21.1 = Gauge32: 0 .1.3.6.1.2.1.2.2.1.21.90 = Gauge32: 0 .1.3.6.1.2.1.2.2.1.22.1 = OID: .0.0 .1.3.6.1.2.1.2.2.1.22.90 = OID: .0.0","title":"Walk query"},{"location":"tutorials/snmp/how-to/#generate-table-simulation-data","text":"To generate simulation data for tables automatically, use the mib2dev.py tool shipped with snmpsim . This tool will be renamed as snmpsim-record-mibs in the upcoming 1.0 release of the library. First, install snmpsim: pip install snmpsim Then run the tool, specifying the MIB with the start and stop OIDs (which can correspond to .e.g the first and last columns in the table respectively). For example: mib2dev.py --mib-module = --start-oid = 1 .3.6.1.4.1.674.10892.1.400.20 --stop-oid = 1 .3.6.1.4.1.674.10892.1.600.12 > /path/to/mytable.snmprec The following command generates 4 rows for the IF-MIB:ifTable (1.3.6.1.2.1.2.2) : mib2dev.py --mib-module = IF-MIB --start-oid = 1 .3.6.1.2.1.2.2 --stop-oid = 1 .3.6.1.2.1.2.3 --table-size = 4 > /path/to/mytable.snmprec","title":"Generate table simulation data"},{"location":"tutorials/snmp/how-to/#known-issues","text":"mib2dev has a known issue with IF-MIB::ifPhysAddress , that is expected to contain an hexadecimal string, but mib2dev fills it with a string. To fix this, provide a valid hextring when prompted on the command line: # Synthesizing row #1 of table 1.3.6.1.2.1.2.2.1 *** Inconsistent value: Display format eval failure: b 'driving kept zombies quaintly forward zombies' : invalid literal for int () with base 16 : 'driving kept zombies quaintly forward zombies' caused by : invalid literal for int () with base 16 : 'driving kept zombies quaintly forward zombies' *** See constraints and suggest a better one for : # Table IF-MIB::ifTable # Row IF-MIB::ifEntry # Index IF-MIB::ifIndex (type InterfaceIndex) # Column IF-MIB::ifPhysAddress (type PhysAddress) # Value ['driving kept zombies quaintly forward zombies'] ? 001122334455","title":"Known issues"},{"location":"tutorials/snmp/how-to/#generate-simulation-data-from-a-walk","text":"As an alternative to .snmprec files , it is possible to use a walk as simulation data . This is especially useful when debugging live devices, since you can export the device walk and use this real data locally. To do so, paste the output of a walk query into a .snmpwalk file, and add this file to the test data directory. Then, pass the name of the walk file as the community_string . For more information, see Test SNMP profiles locally .","title":"Generate simulation data from a walk"},{"location":"tutorials/snmp/how-to/#find-where-mibs-are-installed-on-your-machine","text":"See the Using and loading MIBs Net-SNMP tutorial.","title":"Find where MIBs are installed on your machine"},{"location":"tutorials/snmp/how-to/#browse-locally-installed-mibs","text":"Since community resources that list MIBs and OIDs are best effort, the MIB you are investigating may not be present or may not be available in its the latest version. In that case, you can use the snmptranslate CLI tool to output similar information for MIBs installed on your system. This tool is part of Net-SNMP - see SNMP queries prerequisites . Steps Run $ snmptranslate -m -Tz -On to get a complete list of OIDs in the MIB along with their labels. Redirect to a file for nicer formatting as needed. Example: $ snmptranslate -m IF-MIB -Tz -On > out.log $ cat out.log \"org\" \"1.3\" \"dod\" \"1.3.6\" \"internet\" \"1.3.6.1\" \"directory\" \"1.3.6.1.1\" \"mgmt\" \"1.3.6.1.2\" \"mib-2\" \"1.3.6.1.2.1\" \"system\" \"1.3.6.1.2.1.1\" \"sysDescr\" \"1.3.6.1.2.1.1.1\" \"sysObjectID\" \"1.3.6.1.2.1.1.2\" \"sysUpTime\" \"1.3.6.1.2.1.1.3\" \"sysContact\" \"1.3.6.1.2.1.1.4\" \"sysName\" \"1.3.6.1.2.1.1.5\" \"sysLocation\" \"1.3.6.1.2.1.1.6\" [...] Tip Use the -M option to specify the directory where snmptranslate should look for MIBs. Useful if you want to inspect a MIB you've just downloaded but not moved to the default MIB directory. Tip Use -Tp for an alternative tree-like formatting.","title":"Browse locally installed MIBs"},{"location":"tutorials/snmp/introduction/","text":"Introduction to SNMP \u00b6 In this introduction, we'll cover general information about the SNMP protocol, including key concepts such as OIDs and MIBs. If you're already familiar with the SNMP protocol, feel free to skip to the next page. What is SNMP? \u00b6 Overview \u00b6 SNMP (Simple Network Management Protocol) is a protocol for monitoring network devices . It uses UDP and supports both a request/response model (commands and queries) and a notification model (traps, informs). In the request/response model, the SNMP manager (eg. the Datadog Agent) issues an SNMP command ( GET , GETNEXT , BULK ) to an SNMP agent (eg. a network device). SNMP was born in the 1980s, so it has been around for a long time. While more modern alternatives like NETCONF and OpenConfig have been gaining attention, a large amount of network devices still use SNMP as their primary monitoring interface. SNMP versions \u00b6 The SNMP protocol exists in 3 versions: v1 (legacy), v2c , and v3 . The main differences between v1/v2c and v3 are the authentication mechanism and transport layer, as summarized below. Version Authentication Transport layer v1/v2c Password (the community string ) Plain text only v3 Username/password Support for packet signing and encryption OIDs \u00b6 What is an OID? \u00b6 Identifiers for queryable quantities An OID , also known as an Object Identifier , is an identifier for a quantity (\"object\") that can be retrieved from an SNMP device. Such quantities may include uptime, temperature, network traffic, etc (quantities available will vary across devices). To make them processable by machines, OIDs are represented as dot-separated sequences of numbers, e.g. 1.3.6.1.2.1.1.1 . Global definition OIDs are globally defined , which means they have the same meaning regardless of the device that processes the SNMP query. For example, querying the 1.3.6.1.2.1.1.1 OID (also known as sysDescr ) on any SNMP agent will make it return the system description. (More on the OID/label mapping can be found in the MIBs section below.) Not all OIDs contain metrics data OIDs can refer to various types of objects, such as strings, numbers, tables, etc. In particular, this means that only a fraction of OIDs refer to numerical quantities that can actually be sent as metrics to Datadog. However, non-numerical OIDs can also be useful, especially for tagging. The OID tree \u00b6 OIDs are structured in a tree-like fashion. Each number in the OID represents a node in the tree. The wildcard notation is often used to refer to a sub-tree of OIDs, e.g. 1.3.6.1.2.* . It so happens that there are two main OID sub-trees: a sub-tree for general-purpose OIDs, and a sub-tree for vendor-specific OIDs. Generic OIDs \u00b6 Located under the sub-tree: 1.3.6.1.2.1.* (a.k.a. SNMPv2-MIB or mib-2 ). These OIDs are applicable to all kinds of network devices (although all devices may not expose all OIDs in this sub-tree). For example, 1.3.6.1.2.1.1.1 corresponds to sysDescr , which contains a free-form, human-readable description of the device. Vendor-specific OIDs \u00b6 Located under the sub-tree: 1.3.6.1.4.1.* (a.k.a. enterprises ). These OIDs are defined and managed by network device vendors themselves. Each vendor is assigned its own enterprise sub-tree in the form of 1.3.6.1.4.1..* . For example: 1.3.6.1.4.1.2.* is the sub-tree for IBM-specific OIDs. 1.3.6.1.4.1.9.* is the sub-tree for Cisco-specific OIDs. The full list of vendor sub-trees can be found here: SNMP OID 1.3.6.1.4.1 . Notable OIDs \u00b6 OID Label Description 1.3.6.1.2.1.2 sysObjectId An OID whose value is an OID that represents the device make and model (yes, it's a bit meta). 1.3.6.1.2.1.1.1 sysDescr A human-readable, free-form description of the device. 1.3.6.1.2.1.1.3 sysUpTimeInstance The device uptime. MIBs \u00b6 What is an MIB? \u00b6 OIDs are grouped in modules called MIBs (Management Information Base). An MIB describes the hierarchy of a given set of OIDs. (This is somewhat analogous to a dictionary that contains the definitions for each word in a spoken language.) For example, the IF-MIB describes the hierarchy of OIDs within the sub-tree 1.3.6.1.2.1.2.* . These OIDs contain metrics about the network interfaces available on the device. (Note how its location under the 1.3.6.1.2.* sub-tree indicates that it is a generic MIB, available on most network devices.) As part of the description of OIDs, an MIB defines a human-readable label for each OID. For example, IF-MIB describes the OID 1.3.6.1.2.1.1 and assigns it the label sysDescr . The operation that consists in finding the OID from a label is called OID resolution . Tools and resources \u00b6 The following resources can be useful when working with MIBs: MIB Discovery : a search engine for OIDs. Use it to find what an OID corresponds to, which MIB it comes from, what label it is known as, etc. Circitor MIB files repository : a repository and search engine where one can download actual .mib files. SNMP Labs MIB repository : alternate repo of many common MIBs. Note : this site hosts the underlying MIBs which the pysnmp-mibs library (used by the SNMP Python check) actually validates against. Double check any MIB you get from an alternate source with what is in this repo. Learn more \u00b6 For other high-level overviews of SNMP, see: How SNMP Works (Youtube) SNMP (Wikipedia) Tutorials: Internet Management and SNMP (YouTube) (In-depth videos about SNMP architecture, MIBs, protocol data structures, security models, monitoring code examples, etc.)","title":"Introduction to SNMP"},{"location":"tutorials/snmp/introduction/#introduction-to-snmp","text":"In this introduction, we'll cover general information about the SNMP protocol, including key concepts such as OIDs and MIBs. If you're already familiar with the SNMP protocol, feel free to skip to the next page.","title":"Introduction to SNMP"},{"location":"tutorials/snmp/introduction/#what-is-snmp","text":"","title":"What is SNMP?"},{"location":"tutorials/snmp/introduction/#overview","text":"SNMP (Simple Network Management Protocol) is a protocol for monitoring network devices . It uses UDP and supports both a request/response model (commands and queries) and a notification model (traps, informs). In the request/response model, the SNMP manager (eg. the Datadog Agent) issues an SNMP command ( GET , GETNEXT , BULK ) to an SNMP agent (eg. a network device). SNMP was born in the 1980s, so it has been around for a long time. While more modern alternatives like NETCONF and OpenConfig have been gaining attention, a large amount of network devices still use SNMP as their primary monitoring interface.","title":"Overview"},{"location":"tutorials/snmp/introduction/#snmp-versions","text":"The SNMP protocol exists in 3 versions: v1 (legacy), v2c , and v3 . The main differences between v1/v2c and v3 are the authentication mechanism and transport layer, as summarized below. Version Authentication Transport layer v1/v2c Password (the community string ) Plain text only v3 Username/password Support for packet signing and encryption","title":"SNMP versions"},{"location":"tutorials/snmp/introduction/#oids","text":"","title":"OIDs"},{"location":"tutorials/snmp/introduction/#what-is-an-oid","text":"Identifiers for queryable quantities An OID , also known as an Object Identifier , is an identifier for a quantity (\"object\") that can be retrieved from an SNMP device. Such quantities may include uptime, temperature, network traffic, etc (quantities available will vary across devices). To make them processable by machines, OIDs are represented as dot-separated sequences of numbers, e.g. 1.3.6.1.2.1.1.1 . Global definition OIDs are globally defined , which means they have the same meaning regardless of the device that processes the SNMP query. For example, querying the 1.3.6.1.2.1.1.1 OID (also known as sysDescr ) on any SNMP agent will make it return the system description. (More on the OID/label mapping can be found in the MIBs section below.) Not all OIDs contain metrics data OIDs can refer to various types of objects, such as strings, numbers, tables, etc. In particular, this means that only a fraction of OIDs refer to numerical quantities that can actually be sent as metrics to Datadog. However, non-numerical OIDs can also be useful, especially for tagging.","title":"What is an OID?"},{"location":"tutorials/snmp/introduction/#the-oid-tree","text":"OIDs are structured in a tree-like fashion. Each number in the OID represents a node in the tree. The wildcard notation is often used to refer to a sub-tree of OIDs, e.g. 1.3.6.1.2.* . It so happens that there are two main OID sub-trees: a sub-tree for general-purpose OIDs, and a sub-tree for vendor-specific OIDs.","title":"The OID tree"},{"location":"tutorials/snmp/introduction/#generic-oids","text":"Located under the sub-tree: 1.3.6.1.2.1.* (a.k.a. SNMPv2-MIB or mib-2 ). These OIDs are applicable to all kinds of network devices (although all devices may not expose all OIDs in this sub-tree). For example, 1.3.6.1.2.1.1.1 corresponds to sysDescr , which contains a free-form, human-readable description of the device.","title":"Generic OIDs"},{"location":"tutorials/snmp/introduction/#vendor-specific-oids","text":"Located under the sub-tree: 1.3.6.1.4.1.* (a.k.a. enterprises ). These OIDs are defined and managed by network device vendors themselves. Each vendor is assigned its own enterprise sub-tree in the form of 1.3.6.1.4.1..* . For example: 1.3.6.1.4.1.2.* is the sub-tree for IBM-specific OIDs. 1.3.6.1.4.1.9.* is the sub-tree for Cisco-specific OIDs. The full list of vendor sub-trees can be found here: SNMP OID 1.3.6.1.4.1 .","title":"Vendor-specific OIDs"},{"location":"tutorials/snmp/introduction/#notable-oids","text":"OID Label Description 1.3.6.1.2.1.2 sysObjectId An OID whose value is an OID that represents the device make and model (yes, it's a bit meta). 1.3.6.1.2.1.1.1 sysDescr A human-readable, free-form description of the device. 1.3.6.1.2.1.1.3 sysUpTimeInstance The device uptime.","title":"Notable OIDs"},{"location":"tutorials/snmp/introduction/#mibs","text":"","title":"MIBs"},{"location":"tutorials/snmp/introduction/#what-is-an-mib","text":"OIDs are grouped in modules called MIBs (Management Information Base). An MIB describes the hierarchy of a given set of OIDs. (This is somewhat analogous to a dictionary that contains the definitions for each word in a spoken language.) For example, the IF-MIB describes the hierarchy of OIDs within the sub-tree 1.3.6.1.2.1.2.* . These OIDs contain metrics about the network interfaces available on the device. (Note how its location under the 1.3.6.1.2.* sub-tree indicates that it is a generic MIB, available on most network devices.) As part of the description of OIDs, an MIB defines a human-readable label for each OID. For example, IF-MIB describes the OID 1.3.6.1.2.1.1 and assigns it the label sysDescr . The operation that consists in finding the OID from a label is called OID resolution .","title":"What is an MIB?"},{"location":"tutorials/snmp/introduction/#tools-and-resources","text":"The following resources can be useful when working with MIBs: MIB Discovery : a search engine for OIDs. Use it to find what an OID corresponds to, which MIB it comes from, what label it is known as, etc. Circitor MIB files repository : a repository and search engine where one can download actual .mib files. SNMP Labs MIB repository : alternate repo of many common MIBs. Note : this site hosts the underlying MIBs which the pysnmp-mibs library (used by the SNMP Python check) actually validates against. Double check any MIB you get from an alternate source with what is in this repo.","title":"Tools and resources"},{"location":"tutorials/snmp/introduction/#learn-more","text":"For other high-level overviews of SNMP, see: How SNMP Works (Youtube) SNMP (Wikipedia) Tutorials: Internet Management and SNMP (YouTube) (In-depth videos about SNMP architecture, MIBs, protocol data structures, security models, monitoring code examples, etc.)","title":"Learn more"},{"location":"tutorials/snmp/profile-format/","text":"Profile Format Reference \u00b6 Overview \u00b6 SNMP profiles are our way of providing out-of-the-box monitoring for certain makes and models of network devices. An SNMP profile is materialised as a YAML file with the following structure: sysobjectid : # extends: # metrics : # # metric_tags: # Fields \u00b6 sysobjectid \u00b6 (Required) The sysobjectid field is used to match profiles against devices during device autodiscovery. It can refer to a fully-defined OID for a specific device make and model: sysobjectid : 1.3.6.1.4.1.232.9.4.10 or a wildcard pattern to address multiple device models: sysobjectid : 1.3.6.1.131.12.4.* or a list of fully-defined OID / wildcard patterns: sysobjectid : - 1.3.6.1.131.12.4.* - 1.3.6.1.4.1.232.9.4.10 extends \u00b6 (Optional) This field can be used to include metrics and metric tags from other so-called base profiles . Base profiles can derive from other base profiles to build a hierarchy of reusable profile mixins. Important All device profiles should extend from the _base.yaml profile, which defines items that should be collected for all devices. Example: extends : - _base.yaml - _generic-if.yaml # Include basic metrics from IF-MIB. metrics \u00b6 (Required) Entries in the metrics field define which metrics will be collected by the profile. They can reference either a single OID (a.k.a symbol ), or an SNMP table. Symbol metrics \u00b6 An SNMP symbol is an object with a scalar type (i.e. Counter32 , Integer32 , OctetString , etc). In a MIB file, a symbol can be recognized as an OBJECT-TYPE node with a scalar SYNTAX , placed under an OBJECT IDENTIFIER node (which is often the root OID of the MIB): EXAMPLE-MIB DEFINITIONS ::= BEGIN -- ... example OBJECT IDENTIFIER ::= { mib-2 7 } exampleSymbol OBJECT-TYPE SYNTAX Counter32 -- ... ::= { example 1 } In profiles, symbol metrics can be specified as entries that specify the MIB and symbol fields: metrics : # Example for the above dummy MIB and symbol: - MIB : EXAMPLE-MIB symbol : OID : 1.3.5.1.2.1.7.1 name : exampleSymbol # More realistic examples: - MIB : ISILON-MIB symbol : OID : 1.3.6.1.4.1.12124.1.1.2 name : clusterHealth - MIB : ISILON-MIB symbol : OID : 1.3.6.1.4.1.12124.1.2.1.1 name : clusterIfsInBytes - MIB : ISILON-MIB symbol : OID : 1.3.6.1.4.1.12124.1.2.1.3 name : clusterIfsOutBytes Warning Symbol metrics from the same MIB must still be listed as separate metrics entries, as shown above. For example, this is not valid syntax: metrics : - MIB : ISILON-MIB symbol : - OID : 1.3.6.1.4.1.12124.1.2.1.1 name : clusterIfsInBytes - OID : 1.3.6.1.4.1.12124.1.2.1.3 name : clusterIfsOutBytes Table metrics \u00b6 An SNMP table is an object that is composed of multiple entries (\"rows\"), where each entry contains values a set of symbols (\"columns\"). In a MIB file, tables be recognized by the presence of SEQUENCE OF : exampleTable OBJECT-TYPE SYNTAX SEQUENCE OF exampleEntry -- ... ::= { example 10 } exampleEntry OBJECT-TYPE -- ... ::= { exampleTable 1 } exampleColumn1 OBJECT-TYPE -- ... ::= { exampleEntry 1 } exampleColumn2 OBJECT-TYPE -- ... ::= { exampleEntry 2 } -- ... In profiles, tables can be specified as entries containing the MIB , table and symbols fields: metrics : # Example for the dummy table above: - MIB : EXAMPLE-MIB table : # Identification of the table which metrics come from. OID : 1.3.6.1.4.1.10 name : exampleTable symbols : # List of symbols ('columns') to retrieve. # Same format as for a single OID. # Each row in the table will emit these metrics. - OID : 1.3.6.1.4.1.10.1.1 name : exampleColumn1 - OID : 1.3.6.1.4.1.10.1.2 name : exampleColumn2 # ... # More realistic example: - MIB : CISCO-PROCESS-MIB table : # Each row in this table contains information about a CPU unit of the device. OID : 1.3.6.1.4.1.9.9.109.1.1.1 name : cpmCPUTotalTable symbols : - OID : 1.3.6.1.4.1.9.9.109.1.1.1.1.12 name : cpmCPUMemoryUsed # ... Table metrics tagging \u00b6 It is possible to add tags to metrics retrieved from a table in three ways: Using a column within the same table \u00b6 metrics : - MIB : IF-MIB table : OID : 1.3.6.1.2.1.2.2 name : ifTable symbols : - OID : 1.3.6.1.2.1.2.2.1.14 name : ifInErrors # ... metric_tags : # Add an 'interface' tag to each metric of each row, # whose value is obtained from the 'ifDescr' column of the row. # This allows querying metrics by interface, e.g. 'interface:eth0'. - tag : interface column : OID : 1.3.6.1.2.1.2.2.1.2 name : ifDescr Using a column from a different table with identical indexes \u00b6 metrics : - MIB : CISCO-IF-EXTENSION-MIB forced_type : monotonic_count table : OID : 1.3.6.1.4.1.9.9.276.1.1.2 name : cieIfInterfaceTable symbols : - OID : 1.3.6.1.4.1.9.9.276.1.1.2.1.1 name : cieIfResetCount metric_tags : - MIB : IF-MIB column : OID : 1.3.6.1.2.1.31.1.1.1.1 name : ifName table : ifXTable tag : interface Using a column from a different table with different indexes \u00b6 metrics : - MIB : CPI-UNITY-MIB table : OID : 1.3.6.1.4.1.30932.1.10.1.3.110 name : cpiPduBranchTable symbols : - OID : 1.3.6.1.4.1.30932.1.10.1.3.110.1.3 name : cpiPduBranchCurrent metric_tags : - column : OID : 1.3.6.1.4.1.30932.1.10.1.2.10.1.3 name : cpiPduName table : cpiPduTable index_transform : - start : 1 end : 7 tag : pdu_name If the external table has different indexes, use index_transform to select a subset of the full index. index_transform is a list of start / end ranges to extract from the current table index to match the external table index. start and end are inclusive. External table indexes must be a subset of the indexes of the current table, or same indexes in a different order. Example In the example above, the index of cpiPduBranchTable looks like 1.6.0.36.155.53.3.246 , the first digit is the cpiPduBranchId index and the rest is the cpiPduBranchMac index. The index of cpiPduTable looks like 6.0.36.155.53.3.246 and represents cpiPduMac (equivalent to cpiPduBranchMac ). By using the index_transform with start 1 and end 7, we extract 6.0.36.155.53.3.246 from 1.6.0.36.155.53.3.246 ( cpiPduBranchTable full index), and then use it to match 6.0.36.155.53.3.246 ( cpiPduTable full index). index_transform can be more complex, the following definition will extract 2.3.5.6.7 from 1.2.3.4.5.6.7 . index_transform : - start : 1 end : 2 - start : 4 end : 6 Using an index \u00b6 Important: \" index \" refers to one digit of the index part of the row OID. Example, if the column OID is 1.2.3.1.2 and the row OID is 1.2.3.1.2.7.8.9 , the full index is 7.8.9 . In this example, when using index: 1 , we will refer to 7 , index: 2 will refer to 8 , and so on. metrics : - MIB : CISCO-PROCESS-MIB table : OID : 1.3.6.1.4.1.9.9.109.1.1.1 name : cpmCPUTotalTable symbols : - OID : 1.3.6.1.4.1.9.9.109.1.1.1.1.12 name : cpmCPUMemoryUsed metric_tags : # This tagging method is more complex, so let's walk through an example... # # In CISCO-PROCESS-MIB, we can see that entries in the `cpmCPUTotalTable` are indexed by `cpmCPUTotalIndex`, # which corresponds to some sort of CPU position for each row in the table: # # cpmCPUTotalEntry OBJECT-TYPE # -- ... # INDEX { cpmCPUTotalIndex } # <-- See? # # We want to tag metrics in this table by this CPU position. # # To do this, we look up the position of this OID in `INDEX`. Here we see it's in 1st position. # So we can reference it here using `index: 1`. # (If there were two OIDs in `INDEX`, and we wanted to use the one in 2nd position, then we would have used `index: 2`.) # # NOTE: currently only indexes that refer to a column in the same table are supported. - tag : cpu index : 1 Mapping index to tag string value \u00b6 You can use the following syntax to map indexes to tag string values. In the example below, the submitted metrics will be snmp.ipSystemStatsHCInReceives with tags like ipversion:ipv6 . metrics : - MIB : IP-MIB table : OID : 1.3.6.1.2.1.4.31.1 name : ipSystemStatsTable forced_type : monotonic_count symbols : - OID : 1.3.6.1.2.1.4.31.1.1.4 name : ipSystemStatsHCInReceives metric_tags : - index : 1 tag : ipversion mapping : 0 : unknown 1 : ipv4 2 : ipv6 3 : ipv4z 4 : ipv6z 16 : dns See meaning of index as used here in Using an index section. Tagging tips \u00b6 Note General guidelines on Datadog tagging also apply to table metric tags. In particular, be mindful of the kind of value contained in the columns used a tag sources. E.g. avoid using a DisplayString (an arbitrarily long human-readable text description) or unbounded sources (timestamps, IDs...) as tag values. Good candidates for tag values include short strings, enums, or integer indexes. Metric type inference \u00b6 By default, the Datadog metric type of a symbol will be inferred from the SNMP type (i.e. the MIB SYNTAX ): SNMP type Inferred metric type Counter32 rate Counter64 rate Gauge32 gauge Integer gauge Integer32 gauge CounterBasedGauge64 gauge Opaque gauge SNMP types not listed in this table are submitted as gauge by default. Forced metric types \u00b6 Sometimes the inferred type may not be what you want. Typically, OIDs that represent \"total number of X\" are defined as Counter32 in MIBs, but you probably want to submit them monotonic_count instead of a rate . For such cases, you can define a forced_type . Possible values and their effect are listed below. Forced type Description gauge Submit as a gauge. rate Submit as a rate. percent Multiply by 100 and submit as a rate. monotonic_count Submit as a monotonic count. monotonic_count_and_rate Submit 2 copies of the metric: one as a monotonic count, and one as a rate (suffixed with .rate ). flag_stream Submit each flag of a flag stream as individual metric with value 0 or 1 . See Flag Stream section . This works on both symbol and table metrics: metrics : # On a symbol: - MIB : TCP-MIB forced_type : monotonic_count symbol : OID : 1.3.6.1.2.1.6.5 name : tcpActiveOpens # On a table: - MIB : IP-MIB table : OID : 1.3.6.1.2.1.4.31.1 name : ipSystemStatsTable forced_type : monotonic_count symbols : - OID : 1.3.6.1.2.1.4.31.1.1.4 name : ipSystemStatsHCInReceives - OID : 1.3.6.1.2.1.4.31.1.1.6 name : ipSystemStatsHCInOctets Note When used on a table metrics entry, forced_type is applied to all symbols in the entry. So, if a table contains symbols of varying types, you should use multiple metrics entries: one for symbols with inferred metric types, and one for each forced_type . For example: metrics : - MIB : F5-BIGIP-LOCAL-MIB table : OID : 1.3.6.1.4.1.3375.2.2.5.2.3 name : ltmPoolStatTable # No `forced_type` specified => metric types will be inferred. symbols : - OID : 1.3.6.1.4.1.3375.2.2.5.2.3.1.2 name : ltmPoolStatServerPktsIn - OID : 1.3.6.1.4.1.3375.2.2.5.2.3.1.4 name : ltmPoolStatServerPktsOut # ... - MIB : F5-BIGIP-LOCAL-MIB table : OID : 1.3.6.1.4.1.3375.2.2.5.2.3 name : ltmPoolStatTable forced_type : monotonic_count # All these symbols will be submitted as monotonic counts. symbols : - OID : 1.3.6.1.4.1.3375.2.2.5.2.3.1.7 name : ltmPoolStatServerTotConns - OID : 1.3.6.1.4.1.3375.2.2.5.2.3.1.23 name : ltmPoolStatConnqServiced # ... Flag stream \u00b6 When the value is a flag stream like 010101 , you can use forced_type: flag_stream to submit each flag as individual metric with value 0 or 1 . Two options are required when using flag_stream : options.placement : position of the flag in the flag stream (1-based indexing, first element is placement 1). options.metric_suffix : suffix appended to the metric name for a specific flag, usually matching the name of the flag. Example: metrics : - MIB : PowerNet-MIB symbol : OID : 1.3.6.1.4.1.318.1.1.1.11.1.1.0 name : upsBasicStateOutputState forced_type : flag_stream options : placement : 4 metric_suffix : OnLine - MIB : PowerNet-MIB symbol : OID : 1.3.6.1.4.1.318.1.1.1.11.1.1.0 name : upsBasicStateOutputState forced_type : flag_stream options : placement : 5 metric_suffix : ReplaceBattery This example will submit two metrics snmp.upsBasicStateOutputState.OnLine and snmp.upsBasicStateOutputState.ReplaceBattery with value 0 or 1 . Example of flag_stream usage in a profile . Extract value \u00b6 If the metric value to be submitted is from a OID with string value and needs to be extracted from it, you can use extract value feature. extract_value is a regex pattern with one capture group like (\\d+)C , where the capture group is (\\d+) . Example use cases respective regex patterns: stripping the C unit from a temperature value: (\\d+)C stripping the USD unit from a currency value: USD(\\d+) stripping the F unit from a temperature value with spaces between the metric and the unit: (\\d+) *F Example: Scalar Metric Example: metrics : - MIB : MY-MIB symbol : OID : 1.2.3.4.5.6.7 name : temperature extract_value : '(\\d+)C' Table Column Metric Example: metrics : - MIB : MY-MIB table : OID : 1.2.3.4.5.6 name : myTable symbols : - OID : 1.2.3.4.5.6.7 name : temperature extract_value : '(\\d+)C' # ... In the examples above, the OID value is a snmp OctetString value 22C and we want 22 to be submitted as value for snmp.temperature . metric_tags \u00b6 (Optional) This field is used to apply tags to all metrics collected by the profile. It has the same meaning than the instance-level config option (see conf.yaml.example ). Several collection methods are supported, as illustrated below: metric_tags : - OID : 1.3.6.1.2.1.1.5.0 symbol : sysName tag : snmp_host - # With regular expression matching OID : 1.3.6.1.2.1.1.5.0 symbol : sysName match : (.*)-(.*) tags : device_type : \\1 host : \\2","title":"Profile Format Reference"},{"location":"tutorials/snmp/profile-format/#profile-format-reference","text":"","title":"Profile Format Reference"},{"location":"tutorials/snmp/profile-format/#overview","text":"SNMP profiles are our way of providing out-of-the-box monitoring for certain makes and models of network devices. An SNMP profile is materialised as a YAML file with the following structure: sysobjectid : # extends: # metrics : # # metric_tags: # ","title":"Overview"},{"location":"tutorials/snmp/profile-format/#fields","text":"","title":"Fields"},{"location":"tutorials/snmp/profile-format/#sysobjectid","text":"(Required) The sysobjectid field is used to match profiles against devices during device autodiscovery. It can refer to a fully-defined OID for a specific device make and model: sysobjectid : 1.3.6.1.4.1.232.9.4.10 or a wildcard pattern to address multiple device models: sysobjectid : 1.3.6.1.131.12.4.* or a list of fully-defined OID / wildcard patterns: sysobjectid : - 1.3.6.1.131.12.4.* - 1.3.6.1.4.1.232.9.4.10","title":"sysobjectid"},{"location":"tutorials/snmp/profile-format/#extends","text":"(Optional) This field can be used to include metrics and metric tags from other so-called base profiles . Base profiles can derive from other base profiles to build a hierarchy of reusable profile mixins. Important All device profiles should extend from the _base.yaml profile, which defines items that should be collected for all devices. Example: extends : - _base.yaml - _generic-if.yaml # Include basic metrics from IF-MIB.","title":"extends"},{"location":"tutorials/snmp/profile-format/#metrics","text":"(Required) Entries in the metrics field define which metrics will be collected by the profile. They can reference either a single OID (a.k.a symbol ), or an SNMP table.","title":"metrics"},{"location":"tutorials/snmp/profile-format/#symbol-metrics","text":"An SNMP symbol is an object with a scalar type (i.e. Counter32 , Integer32 , OctetString , etc). In a MIB file, a symbol can be recognized as an OBJECT-TYPE node with a scalar SYNTAX , placed under an OBJECT IDENTIFIER node (which is often the root OID of the MIB): EXAMPLE-MIB DEFINITIONS ::= BEGIN -- ... example OBJECT IDENTIFIER ::= { mib-2 7 } exampleSymbol OBJECT-TYPE SYNTAX Counter32 -- ... ::= { example 1 } In profiles, symbol metrics can be specified as entries that specify the MIB and symbol fields: metrics : # Example for the above dummy MIB and symbol: - MIB : EXAMPLE-MIB symbol : OID : 1.3.5.1.2.1.7.1 name : exampleSymbol # More realistic examples: - MIB : ISILON-MIB symbol : OID : 1.3.6.1.4.1.12124.1.1.2 name : clusterHealth - MIB : ISILON-MIB symbol : OID : 1.3.6.1.4.1.12124.1.2.1.1 name : clusterIfsInBytes - MIB : ISILON-MIB symbol : OID : 1.3.6.1.4.1.12124.1.2.1.3 name : clusterIfsOutBytes Warning Symbol metrics from the same MIB must still be listed as separate metrics entries, as shown above. For example, this is not valid syntax: metrics : - MIB : ISILON-MIB symbol : - OID : 1.3.6.1.4.1.12124.1.2.1.1 name : clusterIfsInBytes - OID : 1.3.6.1.4.1.12124.1.2.1.3 name : clusterIfsOutBytes","title":"Symbol metrics"},{"location":"tutorials/snmp/profile-format/#table-metrics","text":"An SNMP table is an object that is composed of multiple entries (\"rows\"), where each entry contains values a set of symbols (\"columns\"). In a MIB file, tables be recognized by the presence of SEQUENCE OF : exampleTable OBJECT-TYPE SYNTAX SEQUENCE OF exampleEntry -- ... ::= { example 10 } exampleEntry OBJECT-TYPE -- ... ::= { exampleTable 1 } exampleColumn1 OBJECT-TYPE -- ... ::= { exampleEntry 1 } exampleColumn2 OBJECT-TYPE -- ... ::= { exampleEntry 2 } -- ... In profiles, tables can be specified as entries containing the MIB , table and symbols fields: metrics : # Example for the dummy table above: - MIB : EXAMPLE-MIB table : # Identification of the table which metrics come from. OID : 1.3.6.1.4.1.10 name : exampleTable symbols : # List of symbols ('columns') to retrieve. # Same format as for a single OID. # Each row in the table will emit these metrics. - OID : 1.3.6.1.4.1.10.1.1 name : exampleColumn1 - OID : 1.3.6.1.4.1.10.1.2 name : exampleColumn2 # ... # More realistic example: - MIB : CISCO-PROCESS-MIB table : # Each row in this table contains information about a CPU unit of the device. OID : 1.3.6.1.4.1.9.9.109.1.1.1 name : cpmCPUTotalTable symbols : - OID : 1.3.6.1.4.1.9.9.109.1.1.1.1.12 name : cpmCPUMemoryUsed # ...","title":"Table metrics"},{"location":"tutorials/snmp/profile-format/#table-metrics-tagging","text":"It is possible to add tags to metrics retrieved from a table in three ways:","title":"Table metrics tagging"},{"location":"tutorials/snmp/profile-format/#using-a-column-within-the-same-table","text":"metrics : - MIB : IF-MIB table : OID : 1.3.6.1.2.1.2.2 name : ifTable symbols : - OID : 1.3.6.1.2.1.2.2.1.14 name : ifInErrors # ... metric_tags : # Add an 'interface' tag to each metric of each row, # whose value is obtained from the 'ifDescr' column of the row. # This allows querying metrics by interface, e.g. 'interface:eth0'. - tag : interface column : OID : 1.3.6.1.2.1.2.2.1.2 name : ifDescr","title":"Using a column within the same table"},{"location":"tutorials/snmp/profile-format/#using-a-column-from-a-different-table-with-identical-indexes","text":"metrics : - MIB : CISCO-IF-EXTENSION-MIB forced_type : monotonic_count table : OID : 1.3.6.1.4.1.9.9.276.1.1.2 name : cieIfInterfaceTable symbols : - OID : 1.3.6.1.4.1.9.9.276.1.1.2.1.1 name : cieIfResetCount metric_tags : - MIB : IF-MIB column : OID : 1.3.6.1.2.1.31.1.1.1.1 name : ifName table : ifXTable tag : interface","title":"Using a column from a different table with identical indexes"},{"location":"tutorials/snmp/profile-format/#using-a-column-from-a-different-table-with-different-indexes","text":"metrics : - MIB : CPI-UNITY-MIB table : OID : 1.3.6.1.4.1.30932.1.10.1.3.110 name : cpiPduBranchTable symbols : - OID : 1.3.6.1.4.1.30932.1.10.1.3.110.1.3 name : cpiPduBranchCurrent metric_tags : - column : OID : 1.3.6.1.4.1.30932.1.10.1.2.10.1.3 name : cpiPduName table : cpiPduTable index_transform : - start : 1 end : 7 tag : pdu_name If the external table has different indexes, use index_transform to select a subset of the full index. index_transform is a list of start / end ranges to extract from the current table index to match the external table index. start and end are inclusive. External table indexes must be a subset of the indexes of the current table, or same indexes in a different order. Example In the example above, the index of cpiPduBranchTable looks like 1.6.0.36.155.53.3.246 , the first digit is the cpiPduBranchId index and the rest is the cpiPduBranchMac index. The index of cpiPduTable looks like 6.0.36.155.53.3.246 and represents cpiPduMac (equivalent to cpiPduBranchMac ). By using the index_transform with start 1 and end 7, we extract 6.0.36.155.53.3.246 from 1.6.0.36.155.53.3.246 ( cpiPduBranchTable full index), and then use it to match 6.0.36.155.53.3.246 ( cpiPduTable full index). index_transform can be more complex, the following definition will extract 2.3.5.6.7 from 1.2.3.4.5.6.7 . index_transform : - start : 1 end : 2 - start : 4 end : 6","title":"Using a column from a different table with different indexes"},{"location":"tutorials/snmp/profile-format/#using-an-index","text":"Important: \" index \" refers to one digit of the index part of the row OID. Example, if the column OID is 1.2.3.1.2 and the row OID is 1.2.3.1.2.7.8.9 , the full index is 7.8.9 . In this example, when using index: 1 , we will refer to 7 , index: 2 will refer to 8 , and so on. metrics : - MIB : CISCO-PROCESS-MIB table : OID : 1.3.6.1.4.1.9.9.109.1.1.1 name : cpmCPUTotalTable symbols : - OID : 1.3.6.1.4.1.9.9.109.1.1.1.1.12 name : cpmCPUMemoryUsed metric_tags : # This tagging method is more complex, so let's walk through an example... # # In CISCO-PROCESS-MIB, we can see that entries in the `cpmCPUTotalTable` are indexed by `cpmCPUTotalIndex`, # which corresponds to some sort of CPU position for each row in the table: # # cpmCPUTotalEntry OBJECT-TYPE # -- ... # INDEX { cpmCPUTotalIndex } # <-- See? # # We want to tag metrics in this table by this CPU position. # # To do this, we look up the position of this OID in `INDEX`. Here we see it's in 1st position. # So we can reference it here using `index: 1`. # (If there were two OIDs in `INDEX`, and we wanted to use the one in 2nd position, then we would have used `index: 2`.) # # NOTE: currently only indexes that refer to a column in the same table are supported. - tag : cpu index : 1","title":"Using an index"},{"location":"tutorials/snmp/profile-format/#mapping-index-to-tag-string-value","text":"You can use the following syntax to map indexes to tag string values. In the example below, the submitted metrics will be snmp.ipSystemStatsHCInReceives with tags like ipversion:ipv6 . metrics : - MIB : IP-MIB table : OID : 1.3.6.1.2.1.4.31.1 name : ipSystemStatsTable forced_type : monotonic_count symbols : - OID : 1.3.6.1.2.1.4.31.1.1.4 name : ipSystemStatsHCInReceives metric_tags : - index : 1 tag : ipversion mapping : 0 : unknown 1 : ipv4 2 : ipv6 3 : ipv4z 4 : ipv6z 16 : dns See meaning of index as used here in Using an index section.","title":"Mapping index to tag string value"},{"location":"tutorials/snmp/profile-format/#tagging-tips","text":"Note General guidelines on Datadog tagging also apply to table metric tags. In particular, be mindful of the kind of value contained in the columns used a tag sources. E.g. avoid using a DisplayString (an arbitrarily long human-readable text description) or unbounded sources (timestamps, IDs...) as tag values. Good candidates for tag values include short strings, enums, or integer indexes.","title":"Tagging tips"},{"location":"tutorials/snmp/profile-format/#metric-type-inference","text":"By default, the Datadog metric type of a symbol will be inferred from the SNMP type (i.e. the MIB SYNTAX ): SNMP type Inferred metric type Counter32 rate Counter64 rate Gauge32 gauge Integer gauge Integer32 gauge CounterBasedGauge64 gauge Opaque gauge SNMP types not listed in this table are submitted as gauge by default.","title":"Metric type inference"},{"location":"tutorials/snmp/profile-format/#forced-metric-types","text":"Sometimes the inferred type may not be what you want. Typically, OIDs that represent \"total number of X\" are defined as Counter32 in MIBs, but you probably want to submit them monotonic_count instead of a rate . For such cases, you can define a forced_type . Possible values and their effect are listed below. Forced type Description gauge Submit as a gauge. rate Submit as a rate. percent Multiply by 100 and submit as a rate. monotonic_count Submit as a monotonic count. monotonic_count_and_rate Submit 2 copies of the metric: one as a monotonic count, and one as a rate (suffixed with .rate ). flag_stream Submit each flag of a flag stream as individual metric with value 0 or 1 . See Flag Stream section . This works on both symbol and table metrics: metrics : # On a symbol: - MIB : TCP-MIB forced_type : monotonic_count symbol : OID : 1.3.6.1.2.1.6.5 name : tcpActiveOpens # On a table: - MIB : IP-MIB table : OID : 1.3.6.1.2.1.4.31.1 name : ipSystemStatsTable forced_type : monotonic_count symbols : - OID : 1.3.6.1.2.1.4.31.1.1.4 name : ipSystemStatsHCInReceives - OID : 1.3.6.1.2.1.4.31.1.1.6 name : ipSystemStatsHCInOctets Note When used on a table metrics entry, forced_type is applied to all symbols in the entry. So, if a table contains symbols of varying types, you should use multiple metrics entries: one for symbols with inferred metric types, and one for each forced_type . For example: metrics : - MIB : F5-BIGIP-LOCAL-MIB table : OID : 1.3.6.1.4.1.3375.2.2.5.2.3 name : ltmPoolStatTable # No `forced_type` specified => metric types will be inferred. symbols : - OID : 1.3.6.1.4.1.3375.2.2.5.2.3.1.2 name : ltmPoolStatServerPktsIn - OID : 1.3.6.1.4.1.3375.2.2.5.2.3.1.4 name : ltmPoolStatServerPktsOut # ... - MIB : F5-BIGIP-LOCAL-MIB table : OID : 1.3.6.1.4.1.3375.2.2.5.2.3 name : ltmPoolStatTable forced_type : monotonic_count # All these symbols will be submitted as monotonic counts. symbols : - OID : 1.3.6.1.4.1.3375.2.2.5.2.3.1.7 name : ltmPoolStatServerTotConns - OID : 1.3.6.1.4.1.3375.2.2.5.2.3.1.23 name : ltmPoolStatConnqServiced # ...","title":"Forced metric types"},{"location":"tutorials/snmp/profile-format/#flag-stream","text":"When the value is a flag stream like 010101 , you can use forced_type: flag_stream to submit each flag as individual metric with value 0 or 1 . Two options are required when using flag_stream : options.placement : position of the flag in the flag stream (1-based indexing, first element is placement 1). options.metric_suffix : suffix appended to the metric name for a specific flag, usually matching the name of the flag. Example: metrics : - MIB : PowerNet-MIB symbol : OID : 1.3.6.1.4.1.318.1.1.1.11.1.1.0 name : upsBasicStateOutputState forced_type : flag_stream options : placement : 4 metric_suffix : OnLine - MIB : PowerNet-MIB symbol : OID : 1.3.6.1.4.1.318.1.1.1.11.1.1.0 name : upsBasicStateOutputState forced_type : flag_stream options : placement : 5 metric_suffix : ReplaceBattery This example will submit two metrics snmp.upsBasicStateOutputState.OnLine and snmp.upsBasicStateOutputState.ReplaceBattery with value 0 or 1 . Example of flag_stream usage in a profile .","title":"Flag stream"},{"location":"tutorials/snmp/profile-format/#extract-value","text":"If the metric value to be submitted is from a OID with string value and needs to be extracted from it, you can use extract value feature. extract_value is a regex pattern with one capture group like (\\d+)C , where the capture group is (\\d+) . Example use cases respective regex patterns: stripping the C unit from a temperature value: (\\d+)C stripping the USD unit from a currency value: USD(\\d+) stripping the F unit from a temperature value with spaces between the metric and the unit: (\\d+) *F Example: Scalar Metric Example: metrics : - MIB : MY-MIB symbol : OID : 1.2.3.4.5.6.7 name : temperature extract_value : '(\\d+)C' Table Column Metric Example: metrics : - MIB : MY-MIB table : OID : 1.2.3.4.5.6 name : myTable symbols : - OID : 1.2.3.4.5.6.7 name : temperature extract_value : '(\\d+)C' # ... In the examples above, the OID value is a snmp OctetString value 22C and we want 22 to be submitted as value for snmp.temperature .","title":"Extract value"},{"location":"tutorials/snmp/profile-format/#metric_tags","text":"(Optional) This field is used to apply tags to all metrics collected by the profile. It has the same meaning than the instance-level config option (see conf.yaml.example ). Several collection methods are supported, as illustrated below: metric_tags : - OID : 1.3.6.1.2.1.1.5.0 symbol : sysName tag : snmp_host - # With regular expression matching OID : 1.3.6.1.2.1.1.5.0 symbol : sysName match : (.*)-(.*) tags : device_type : \\1 host : \\2","title":"metric_tags"},{"location":"tutorials/snmp/profiles/","text":"Build an SNMP Profile \u00b6 SNMP profiles are our way of providing out-of-the-box monitoring for certain makes and models of network devices. This tutorial will walk you through the steps of building a basic SNMP profile that collects OID metrics from HP iLO4 devices. Feel free to read the Introduction to SNMP if you need a refresher on SNMP concepts such as OIDs and MIBs. Ready? Let's get started! Research \u00b6 The first step to building an SNMP profile is doing some basic research about the device, and which metrics we want to collect. General device information \u00b6 Generally, you'll want to search the web and find out about the following: Device name, manufacturer, and device sysobjectid . Understand what the device does, and what it is used for. (Which metrics are relevant varies between routers, switches, bridges, etc. See Networking hardware .) E.g. from the HP iLO Wikipedia page , we can see that iLO4 devices are used by system administrators for remote management of embedded servers. Available versions of the device, and which ones we target. E.g. HP iLO devices exist in multiple versions (version 3, version 4...). Here, we are specifically targetting HP iLO4. Supported MIBs and OIDs (often available in official documentation), and associated MIB files. E.g. we can see that HP provides a MIB package for iLO devices here . Metrics selection \u00b6 Now that we have gathered some basic information about the device and its SNMP interfaces, we should decide which metrics we want to collect. (Devices often expose thousands of metrics through SNMP. We certainly don't want to collect them all.) Devices typically expose thousands of OIDs that can span dozens of MIB, so this can feel daunting at first. Remember, never give up! Some guidelines to help you in this process: 10-40 metrics is a good amount already. Explore base profiles to see which ones could be applicable to the device. Explore manufacturer-specific MIB files looking for metrics such as: General health: status gauges... Network traffic: bytes in/out, errors in/out, ... CPU and memory usage. Temperature: temperature sensors, thermal condition, ... Power supply. Storage. Field-replaceable units ( FRU ). ... Implementation \u00b6 It might be tempting to gather as many metrics as possible, and only then start building the profile and writing tests. But we recommend you start small . This will allow you to quickly gain confidence on the various components of the SNMP development workflow: Editing profile files. Writing tests. Building and using simulation data. Add a profile file \u00b6 Add a .yaml file for the profile with the sysobjectid and a metric (you'll be able to add more later). For example: sysobjectid : 1.3.6.1.4.1.232.9.4.10 metrics : - MIB : CPQHLTH-MIB symbol : OID : 1.3.6.1.4.1.232.6.2.8.1.0 name : cpqHeSysUtilLifeTime Tip sysobjectid can also be a wildcard pattern to match a sub-tree of devices, eg 1.3.6.1.131.12.4.* . Generate a profile file from a collection of MIBs \u00b6 You can use ddev to create a profile from a list of mibs. $ ddev meta snmp generate-profile-from-mibs --help This script requires a list of ASN1 MIB files as input argument, and copies to the clipboard a list of metrics that can be used to create a profile. Options \u00b6 -f, --filters is an option to provide the path to a YAML file containing a collection of MIB names and their list of node names to be included. For example: RFC1213-MIB : - system - interfaces - ip CISCO-SYSLOG-MIB : [] SNMP-FRAMEWORK-MIB : - snmpEngine Will include system , interfaces and ip nodes from RFC1213-MIB , no node fro, CISCO-SYSLOG-MIB and node snmpEngine from SNMP-FRAMEWORK-MIB . Note that each MIB:node_name correspond to exactly one and only one OID. However, some MIBs report legacy nodes that are overwritten. To resolve, edit the MIB by removing legacy values manually before loading them with this profile generator. If a MIB is fully supported, it can be omitted from the filter as MIBs not found in a filter will be fully loaded. If a MIB is not fully supported, it can be listed with an empty node list, as CISCO-SYSLOG-MIB in the example. -a, --aliases is an option to provide the path to a YAML file containing a list of aliases to be used as metric tags for tables, in the following format: aliases : - from : MIB : ENTITY-MIB name : entPhysicalIndex to : MIB : ENTITY-MIB name : entPhysicalName MIBs tables most of the time define one or more indexes, as columns within the same table, or columns from a different table and even a different MIB. The index value can be used to tag table's metrics. This is defined in the INDEX field in row nodes. As an example, entPhysicalContainsTable in ENTITY-MIB is as follows: entPhysicalContainsEntry OBJECT-TYPE SYNTAX EntPhysicalContainsEntry MAX-ACCESS not-accessible STATUS current DESCRIPTION \"A single container/'containee' relationship.\" INDEX { entPhysicalIndex, entPhysicalChildIndex } <== this is the index definition ::= { entPhysicalContainsTable 1 } or its JSON dump, where INDEX is replaced by indices : \"entPhysicalContainsEntry\" : { \"name\" : \"entPhysicalContainsEntry\" , \"oid\" : \"1.3.6.1.2.1.47.1.3.3.1\" , \"nodetype\" : \"row\" , \"class\" : \"objecttype\" , \"maxaccess\" : \"not-accessible\" , \"indices\" : [ { \"module\" : \"ENTITY-MIB\" , \"object\" : \"entPhysicalIndex\" , \"implied\" : 0 }, { \"module\" : \"ENTITY-MIB\" , \"object\" : \"entPhysicalChildIndex\" , \"implied\" : 0 } ], \"status\" : \"current\" , \"description\" : \"A single container/'containee' relationship.\" }, Indexes can be replaced by another MIB symbol that is more human friendly. You might prefer to see the interface name versus its numerical table index. This can be achieved using metric_tag_aliases . Add unit tests \u00b6 Add a unit test in test_profiles.py to verify that the metric is successfully collected by the integration when the profile is enabled. (These unit tests are mostly used to prevent regressions and will help with maintenance.) For example: def test_hp_ilo4 ( aggregator ): run_profile_check ( 'hp_ilo4' ) common_tags = common . CHECK_TAGS + [ 'snmp_profile:hp-ilo4' ] aggregator . assert_metric ( 'snmp.cpqHeSysUtilLifeTime' , metric_type = aggregator . MONOTONIC_COUNT , tags = common_tags , count = 1 ) aggregator . assert_all_metrics_covered () We don't have simulation data yet, so the test should fail. Let's make sure it does: $ ddev test -k test_hp_ilo4 snmp:py38 [...] ======================================= FAILURES ======================================== _____________________________________ test_hp_ilo4 ______________________________________ tests/test_profiles.py:1464: in test_hp_ilo4 aggregator.assert_metric('snmp.cpqHeSysUtilLifeTime', metric_type=aggregator.GAUGE, tags=common.CHECK_TAGS, count=1) ../datadog_checks_base/datadog_checks/base/stubs/aggregator.py:253: in assert_metric self._assert(condition, msg=msg, expected_stub=expected_metric, submitted_elements=self._metrics) ../datadog_checks_base/datadog_checks/base/stubs/aggregator.py:295: in _assert assert condition, new_msg E AssertionError: Needed exactly 1 candidates for 'snmp.cpqHeSysUtilLifeTime', got 0 [...] Good. Now, onto adding simulation data. Add simulation data \u00b6 Add a .snmprec file named after the community_string , which is the value we gave to run_profile_check() : $ touch snmp/tests/compose/data/hp_ilo4.snmprec Add lines to the .snmprec file to specify the sysobjectid and the OID listed in the profile: 1.3.6.1.2.1.1.2.0|6|1.3.6.1.4.1.232.9.4.10 1.3.6.1.4.1.232.6.2.8.1.0|2|1051200 Run the test again, and make sure it passes this time: $ ddev test -k test_hp_ilo4 snmp:py38 [...] tests/test_profiles.py::test_hp_ilo4 PASSED [100%] =================================================== 1 passed, 107 deselected in 9.87s ==================================================== ________________________________________________________________ summary _________________________________________________________________ py38: commands succeeded congratulations :) Rinse and repeat \u00b6 We have now covered the basic workflow \u2014 add metrics, expand tests, add simulation data. You can now go ahead and add more metrics to the profile! Next steps \u00b6 Congratulations! You should now be able to write a basic SNMP profile. We kept this tutorial as simple as possible, but profiles offer many more options to collect metrics from SNMP devices. To learn more about what can be done in profiles, read the Profile format reference . To learn more about .snmprec files, see the Simulation data format reference .","title":"Build an SNMP Profile"},{"location":"tutorials/snmp/profiles/#build-an-snmp-profile","text":"SNMP profiles are our way of providing out-of-the-box monitoring for certain makes and models of network devices. This tutorial will walk you through the steps of building a basic SNMP profile that collects OID metrics from HP iLO4 devices. Feel free to read the Introduction to SNMP if you need a refresher on SNMP concepts such as OIDs and MIBs. Ready? Let's get started!","title":"Build an SNMP Profile"},{"location":"tutorials/snmp/profiles/#research","text":"The first step to building an SNMP profile is doing some basic research about the device, and which metrics we want to collect.","title":"Research"},{"location":"tutorials/snmp/profiles/#general-device-information","text":"Generally, you'll want to search the web and find out about the following: Device name, manufacturer, and device sysobjectid . Understand what the device does, and what it is used for. (Which metrics are relevant varies between routers, switches, bridges, etc. See Networking hardware .) E.g. from the HP iLO Wikipedia page , we can see that iLO4 devices are used by system administrators for remote management of embedded servers. Available versions of the device, and which ones we target. E.g. HP iLO devices exist in multiple versions (version 3, version 4...). Here, we are specifically targetting HP iLO4. Supported MIBs and OIDs (often available in official documentation), and associated MIB files. E.g. we can see that HP provides a MIB package for iLO devices here .","title":"General device information"},{"location":"tutorials/snmp/profiles/#metrics-selection","text":"Now that we have gathered some basic information about the device and its SNMP interfaces, we should decide which metrics we want to collect. (Devices often expose thousands of metrics through SNMP. We certainly don't want to collect them all.) Devices typically expose thousands of OIDs that can span dozens of MIB, so this can feel daunting at first. Remember, never give up! Some guidelines to help you in this process: 10-40 metrics is a good amount already. Explore base profiles to see which ones could be applicable to the device. Explore manufacturer-specific MIB files looking for metrics such as: General health: status gauges... Network traffic: bytes in/out, errors in/out, ... CPU and memory usage. Temperature: temperature sensors, thermal condition, ... Power supply. Storage. Field-replaceable units ( FRU ). ...","title":"Metrics selection"},{"location":"tutorials/snmp/profiles/#implementation","text":"It might be tempting to gather as many metrics as possible, and only then start building the profile and writing tests. But we recommend you start small . This will allow you to quickly gain confidence on the various components of the SNMP development workflow: Editing profile files. Writing tests. Building and using simulation data.","title":"Implementation"},{"location":"tutorials/snmp/profiles/#add-a-profile-file","text":"Add a .yaml file for the profile with the sysobjectid and a metric (you'll be able to add more later). For example: sysobjectid : 1.3.6.1.4.1.232.9.4.10 metrics : - MIB : CPQHLTH-MIB symbol : OID : 1.3.6.1.4.1.232.6.2.8.1.0 name : cpqHeSysUtilLifeTime Tip sysobjectid can also be a wildcard pattern to match a sub-tree of devices, eg 1.3.6.1.131.12.4.* .","title":"Add a profile file"},{"location":"tutorials/snmp/profiles/#generate-a-profile-file-from-a-collection-of-mibs","text":"You can use ddev to create a profile from a list of mibs. $ ddev meta snmp generate-profile-from-mibs --help This script requires a list of ASN1 MIB files as input argument, and copies to the clipboard a list of metrics that can be used to create a profile.","title":"Generate a profile file from a collection of MIBs"},{"location":"tutorials/snmp/profiles/#options","text":"-f, --filters is an option to provide the path to a YAML file containing a collection of MIB names and their list of node names to be included. For example: RFC1213-MIB : - system - interfaces - ip CISCO-SYSLOG-MIB : [] SNMP-FRAMEWORK-MIB : - snmpEngine Will include system , interfaces and ip nodes from RFC1213-MIB , no node fro, CISCO-SYSLOG-MIB and node snmpEngine from SNMP-FRAMEWORK-MIB . Note that each MIB:node_name correspond to exactly one and only one OID. However, some MIBs report legacy nodes that are overwritten. To resolve, edit the MIB by removing legacy values manually before loading them with this profile generator. If a MIB is fully supported, it can be omitted from the filter as MIBs not found in a filter will be fully loaded. If a MIB is not fully supported, it can be listed with an empty node list, as CISCO-SYSLOG-MIB in the example. -a, --aliases is an option to provide the path to a YAML file containing a list of aliases to be used as metric tags for tables, in the following format: aliases : - from : MIB : ENTITY-MIB name : entPhysicalIndex to : MIB : ENTITY-MIB name : entPhysicalName MIBs tables most of the time define one or more indexes, as columns within the same table, or columns from a different table and even a different MIB. The index value can be used to tag table's metrics. This is defined in the INDEX field in row nodes. As an example, entPhysicalContainsTable in ENTITY-MIB is as follows: entPhysicalContainsEntry OBJECT-TYPE SYNTAX EntPhysicalContainsEntry MAX-ACCESS not-accessible STATUS current DESCRIPTION \"A single container/'containee' relationship.\" INDEX { entPhysicalIndex, entPhysicalChildIndex } <== this is the index definition ::= { entPhysicalContainsTable 1 } or its JSON dump, where INDEX is replaced by indices : \"entPhysicalContainsEntry\" : { \"name\" : \"entPhysicalContainsEntry\" , \"oid\" : \"1.3.6.1.2.1.47.1.3.3.1\" , \"nodetype\" : \"row\" , \"class\" : \"objecttype\" , \"maxaccess\" : \"not-accessible\" , \"indices\" : [ { \"module\" : \"ENTITY-MIB\" , \"object\" : \"entPhysicalIndex\" , \"implied\" : 0 }, { \"module\" : \"ENTITY-MIB\" , \"object\" : \"entPhysicalChildIndex\" , \"implied\" : 0 } ], \"status\" : \"current\" , \"description\" : \"A single container/'containee' relationship.\" }, Indexes can be replaced by another MIB symbol that is more human friendly. You might prefer to see the interface name versus its numerical table index. This can be achieved using metric_tag_aliases .","title":"Options"},{"location":"tutorials/snmp/profiles/#add-unit-tests","text":"Add a unit test in test_profiles.py to verify that the metric is successfully collected by the integration when the profile is enabled. (These unit tests are mostly used to prevent regressions and will help with maintenance.) For example: def test_hp_ilo4 ( aggregator ): run_profile_check ( 'hp_ilo4' ) common_tags = common . CHECK_TAGS + [ 'snmp_profile:hp-ilo4' ] aggregator . assert_metric ( 'snmp.cpqHeSysUtilLifeTime' , metric_type = aggregator . MONOTONIC_COUNT , tags = common_tags , count = 1 ) aggregator . assert_all_metrics_covered () We don't have simulation data yet, so the test should fail. Let's make sure it does: $ ddev test -k test_hp_ilo4 snmp:py38 [...] ======================================= FAILURES ======================================== _____________________________________ test_hp_ilo4 ______________________________________ tests/test_profiles.py:1464: in test_hp_ilo4 aggregator.assert_metric('snmp.cpqHeSysUtilLifeTime', metric_type=aggregator.GAUGE, tags=common.CHECK_TAGS, count=1) ../datadog_checks_base/datadog_checks/base/stubs/aggregator.py:253: in assert_metric self._assert(condition, msg=msg, expected_stub=expected_metric, submitted_elements=self._metrics) ../datadog_checks_base/datadog_checks/base/stubs/aggregator.py:295: in _assert assert condition, new_msg E AssertionError: Needed exactly 1 candidates for 'snmp.cpqHeSysUtilLifeTime', got 0 [...] Good. Now, onto adding simulation data.","title":"Add unit tests"},{"location":"tutorials/snmp/profiles/#add-simulation-data","text":"Add a .snmprec file named after the community_string , which is the value we gave to run_profile_check() : $ touch snmp/tests/compose/data/hp_ilo4.snmprec Add lines to the .snmprec file to specify the sysobjectid and the OID listed in the profile: 1.3.6.1.2.1.1.2.0|6|1.3.6.1.4.1.232.9.4.10 1.3.6.1.4.1.232.6.2.8.1.0|2|1051200 Run the test again, and make sure it passes this time: $ ddev test -k test_hp_ilo4 snmp:py38 [...] tests/test_profiles.py::test_hp_ilo4 PASSED [100%] =================================================== 1 passed, 107 deselected in 9.87s ==================================================== ________________________________________________________________ summary _________________________________________________________________ py38: commands succeeded congratulations :)","title":"Add simulation data"},{"location":"tutorials/snmp/profiles/#rinse-and-repeat","text":"We have now covered the basic workflow \u2014 add metrics, expand tests, add simulation data. You can now go ahead and add more metrics to the profile!","title":"Rinse and repeat"},{"location":"tutorials/snmp/profiles/#next-steps","text":"Congratulations! You should now be able to write a basic SNMP profile. We kept this tutorial as simple as possible, but profiles offer many more options to collect metrics from SNMP devices. To learn more about what can be done in profiles, read the Profile format reference . To learn more about .snmprec files, see the Simulation data format reference .","title":"Next steps"},{"location":"tutorials/snmp/sim-format/","text":"Simulation Data Format Reference \u00b6 Conventions \u00b6 Simulation data for profiles is contained in .snmprec files located in the tests directory . Simulation files must be named after the SNMP community string used in the profile unit tests. For example: cisco-nexus.snmprec . File contents \u00b6 Each line in a .snmprec file corresponds to a value for an OID. Lines must be formatted as follows: || For the list of supported types, see the snmpsim simulation data file format documentation. Warning Due to a limitation of snmpsim , contents of .snmprec files must be sorted in lexicographic order . Use $ sort -V /path/to/profile.snmprec to sort lines from the terminal. Symbols \u00b6 For symbol metrics , add a single line corresponding to the symbol OID. For example: 1.3.6.1.4.1.232.6.2.8.1.0|2|1051200 Tables \u00b6 Tip Adding simulation data for tables can be particularly tedious. This section documents the manual process, but automatic generation is possible \u2014 see How to generate table simulation data . For table metrics , add one copy of the metric per row, appending the index to the OID. For example, to simulate 3 rows in the table 1.3.6.1.4.1.6.13 that has OIDs 1.3.6.1.4.1.6.13.1.6 and 1.3.6.1.4.1.6.13.1.8 , you could write: 1.3.6.1.4.1.6.13.1.6.0|2|1051200 1.3.6.1.4.1.6.13.1.6.1|2|1446 1.3.6.1.4.1.6.13.1.6.2|2|23 1.3.6.1.4.1.6.13.1.8.0|2|165 1.3.6.1.4.1.6.13.1.8.1|2|976 1.3.6.1.4.1.6.13.1.8.2|2|0 Note If the table uses table metric tags , you may need to add additional OID simulation data for those tags.","title":"Simulation Data Format Reference"},{"location":"tutorials/snmp/sim-format/#simulation-data-format-reference","text":"","title":"Simulation Data Format Reference"},{"location":"tutorials/snmp/sim-format/#conventions","text":"Simulation data for profiles is contained in .snmprec files located in the tests directory . Simulation files must be named after the SNMP community string used in the profile unit tests. For example: cisco-nexus.snmprec .","title":"Conventions"},{"location":"tutorials/snmp/sim-format/#file-contents","text":"Each line in a .snmprec file corresponds to a value for an OID. Lines must be formatted as follows: || For the list of supported types, see the snmpsim simulation data file format documentation. Warning Due to a limitation of snmpsim , contents of .snmprec files must be sorted in lexicographic order . Use $ sort -V /path/to/profile.snmprec to sort lines from the terminal.","title":"File contents"},{"location":"tutorials/snmp/sim-format/#symbols","text":"For symbol metrics , add a single line corresponding to the symbol OID. For example: 1.3.6.1.4.1.232.6.2.8.1.0|2|1051200","title":"Symbols"},{"location":"tutorials/snmp/sim-format/#tables","text":"Tip Adding simulation data for tables can be particularly tedious. This section documents the manual process, but automatic generation is possible \u2014 see How to generate table simulation data . For table metrics , add one copy of the metric per row, appending the index to the OID. For example, to simulate 3 rows in the table 1.3.6.1.4.1.6.13 that has OIDs 1.3.6.1.4.1.6.13.1.6 and 1.3.6.1.4.1.6.13.1.8 , you could write: 1.3.6.1.4.1.6.13.1.6.0|2|1051200 1.3.6.1.4.1.6.13.1.6.1|2|1446 1.3.6.1.4.1.6.13.1.6.2|2|23 1.3.6.1.4.1.6.13.1.8.0|2|165 1.3.6.1.4.1.6.13.1.8.1|2|976 1.3.6.1.4.1.6.13.1.8.2|2|0 Note If the table uses table metric tags , you may need to add additional OID simulation data for those tags.","title":"Tables"},{"location":"tutorials/snmp/tools/","text":"Tools \u00b6 Using tcpdump with SNMP \u00b6 The tcpdump command shows the exact request and response content of SNMP GET , GETNEXT and other SNMP calls. In a shell run tcpdump : tcpdump -vv -nni lo0 -T snmp host localhost and port 161 -nn : turn off host and protocol name resolution (to avoid generating DNS packets) -i INTERFACE : listen on INTERFACE (default: lowest numbered interface) -T snmp : type/protocol, snmp in our case In another separate shell run snmpwalk or snmpget : snmpwalk -O n -v2c -c localhost:1161 1.3.6 After you've run snmpwalk , you'll see results like this from tcpdump : tcpdump -vv -nni lo0 -T snmp host localhost and port 161 tcpdump: listening on lo0, link-type NULL (BSD loopback), capture size 262144 bytes 17:25:43.639639 IP (tos 0x0, ttl 64, id 29570, offset 0, flags [none], proto UDP (17), length 76, bad cksum 0 (->91d)!) 127.0.0.1.59540 > 127.0.0.1.1161: { SNMPv2c C=\"cisco-nexus\" { GetRequest(28) R=1921760388 .1.3.6.1.2.1.1.2.0 } } 17:25:43.645088 IP (tos 0x0, ttl 64, id 26543, offset 0, flags [none], proto UDP (17), length 88, bad cksum 0 (->14e4)!) 127.0.0.1.1161 > 127.0.0.1.59540: { SNMPv2c C=\"cisco-nexus\" { GetResponse(40) R=1921760388 .1.3.6.1.2.1.1.2.0=.1.3.6.1.4.1.9.12.3.1.3.1.2 } } From the Docker Agent container \u00b6 If you want to run snmpget , snmpwalk , and tcpdump from the Docker Agent container you can install them by running the following commands (in the container): apt update apt install -y snmp tcpdump","title":"Tools"},{"location":"tutorials/snmp/tools/#tools","text":"","title":"Tools"},{"location":"tutorials/snmp/tools/#using-tcpdump-with-snmp","text":"The tcpdump command shows the exact request and response content of SNMP GET , GETNEXT and other SNMP calls. In a shell run tcpdump : tcpdump -vv -nni lo0 -T snmp host localhost and port 161 -nn : turn off host and protocol name resolution (to avoid generating DNS packets) -i INTERFACE : listen on INTERFACE (default: lowest numbered interface) -T snmp : type/protocol, snmp in our case In another separate shell run snmpwalk or snmpget : snmpwalk -O n -v2c -c localhost:1161 1.3.6 After you've run snmpwalk , you'll see results like this from tcpdump : tcpdump -vv -nni lo0 -T snmp host localhost and port 161 tcpdump: listening on lo0, link-type NULL (BSD loopback), capture size 262144 bytes 17:25:43.639639 IP (tos 0x0, ttl 64, id 29570, offset 0, flags [none], proto UDP (17), length 76, bad cksum 0 (->91d)!) 127.0.0.1.59540 > 127.0.0.1.1161: { SNMPv2c C=\"cisco-nexus\" { GetRequest(28) R=1921760388 .1.3.6.1.2.1.1.2.0 } } 17:25:43.645088 IP (tos 0x0, ttl 64, id 26543, offset 0, flags [none], proto UDP (17), length 88, bad cksum 0 (->14e4)!) 127.0.0.1.1161 > 127.0.0.1.59540: { SNMPv2c C=\"cisco-nexus\" { GetResponse(40) R=1921760388 .1.3.6.1.2.1.1.2.0=.1.3.6.1.4.1.9.12.3.1.3.1.2 } }","title":"Using tcpdump with SNMP"},{"location":"tutorials/snmp/tools/#from-the-docker-agent-container","text":"If you want to run snmpget , snmpwalk , and tcpdump from the Docker Agent container you can install them by running the following commands (in the container): apt update apt install -y snmp tcpdump","title":"From the Docker Agent container"}]} \ No newline at end of file +{"config":{"indexing":"full","lang":["en"],"min_search_length":3,"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"Agent Integrations \u00b6 Welcome to the wonderful world of developing Agent Integrations for Datadog. Here we document how we do things, the processes for various tasks, coding conventions & best practices, the internals of our testing infrastructure, and so much more. If you are intrigued, continue reading. If not, continue all the same Getting started \u00b6 To work on any integration (a.k.a. Check ), you must setup your development environment. After that you may immediately begin testing or read through the best practices we strive to follow. Also, feel free to check out how ddev works and browse the API reference of the base package. Navigation \u00b6 Desktop readers can use keyboard shortcuts to navigate. Keys Action , (comma) p Navigate to the \"previous\" page . (period) n Navigate to the \"next\" page / s Display the search modal","title":"About"},{"location":"#agent-integrations","text":"Welcome to the wonderful world of developing Agent Integrations for Datadog. Here we document how we do things, the processes for various tasks, coding conventions & best practices, the internals of our testing infrastructure, and so much more. If you are intrigued, continue reading. If not, continue all the same","title":"Agent Integrations"},{"location":"#getting-started","text":"To work on any integration (a.k.a. Check ), you must setup your development environment. After that you may immediately begin testing or read through the best practices we strive to follow. Also, feel free to check out how ddev works and browse the API reference of the base package.","title":"Getting started"},{"location":"#navigation","text":"Desktop readers can use keyboard shortcuts to navigate. Keys Action , (comma) p Navigate to the \"previous\" page . (period) n Navigate to the \"next\" page / s Display the search modal","title":"Navigation"},{"location":"e2e/","text":"E2E \u00b6 Any integration that makes use of our pytest plugin in its test suite supports end-to-end testing on a live Datadog Agent . The entrypoint for E2E management is the command group ddev env . Discovery \u00b6 Use the ls command to see what environments are available, for example: $ ddev env ls envoy envoy: py27 py38 You'll notice that only environments that actually run tests are available. Running simply ddev env ls with no arguments will display the active environments. Creation \u00b6 To start an environment run ddev env start , for example: $ ddev env start envoy py38 Setting up environment `py38`... success! Updating `datadog/agent-dev:master`... success! Detecting the major version... Agent 7 detected Writing configuration for `py38`... success! Starting the Agent... success! Config file (copied to your clipboard): C:\\Users\\ofek\\AppData\\Local\\dd-checks-dev\\envs\\envoy\\py38\\config\\envoy.yaml To run this check, do: ddev env check envoy py38 To stop this check, do: ddev env stop envoy py38 This sets up the selected environment and an instance of the Agent running in a Docker container. The default configuration is defined by each environment's test suite and is saved to a file, which is then mounted to the Agent container so you may freely modify it. Let's see what we have running: $ docker ps --format \"table {{.Image}}\\t{{.Status}}\\t{{.Ports}}\\t{{.Names}}\" IMAGE STATUS PORTS NAMES datadog/agent-dev:master-py3 Up 4 seconds (health: starting) dd_envoy_py38 default_service2 Up 5 seconds 80/tcp, 10000/tcp default_service2_1 envoyproxy/envoy:latest Up 5 seconds 0.0.0.0:8001->8001/tcp, 10000/tcp, 0.0.0.0:8000->80/tcp default_front-envoy_1 default_xds Up 5 seconds 8080/tcp default_xds_1 default_service1 Up 5 seconds 80/tcp, 10000/tcp default_service1_1 Agent version \u00b6 You can select a particular build of the Agent to use with the --agent / -a option. Any Docker image is valid e.g. datadog/agent:7.17.0 . A custom nightly build will be used by default, which is re-built on every commit to the Datadog Agent repository . Integration version \u00b6 By default the version of the integration used will be the one shipped with the chosen Agent version, as if you had passed in the --prod flag. If you wish to modify an integration and test changes in real time, use the --dev flag. Doing so will mount and install the integration in the Agent container. All modifications to the integration's directory will be propagated to the Agent, whether it be a code change or switching to a different Git branch. If you modify the base package then you will need to mount that with the --base flag, which implicitly activates --dev . Testing \u00b6 To run tests against the live Agent, use the ddev env test command. It is similar to the test command except it is capable of running tests marked as E2E , and only runs such tests. Automation \u00b6 You can use the --new-env / -ne flag to automate environment management. For example running: ddev env test apache:py38 vault:py38 -ne will start the py38 environment for Apache, run E2E tests, tear down the environment, and then do the same for Vault. Tip Since running tests implies code changes are being introduced, --new-env enables --dev by default. Execution \u00b6 Similar to the Agent's check command, you can perform manual check runs using ddev env check , for example: $ ddev env check envoy py38 --log-level debug ... ========= Collector ========= Running Checks ============== envoy (1.12.0) -------------- Instance ID: envoy:c705bd922a3c275c [OK] Configuration Source: file:/etc/datadog-agent/conf.d/envoy.d/envoy.yaml Total Runs: 1 Metric Samples: Last Run: 546, Total: 546 Events: Last Run: 0, Total: 0 Service Checks: Last Run: 1, Total: 1 Average Execution Time : 25ms Last Execution Date : 2020-02-17 00:58:05.000000 UTC Last Successful Execution Date : 2020-02-17 00:58:05.000000 UTC Debugging \u00b6 You may start an interactive debugging session using the --breakpoint / -b option. The option accepts an integer representing the line number at which to break. For convenience, 0 and -1 are shortcuts to the first and last line of the integration's check method, respectively. $ ddev env check envoy py38 -b 0 > /opt/datadog-agent/embedded/lib/python3.8/site-packages/datadog_checks/envoy/envoy.py(34)check() -> custom_tags = instance.get('tags', []) (Pdb) list 29 self.blacklisted_metrics = set() 30 31 self.caching_metrics = None 32 33 def check(self, instance): 34 B-> custom_tags = instance.get('tags', []) 35 36 try: 37 stats_url = instance['stats_url'] 38 except KeyError: 39 msg = 'Envoy configuration setting `stats_url` is required' (Pdb) print(instance) {'stats_url': 'http://localhost:8001/stats'} Caveat The line number must be within the integration's check method. Refreshing state \u00b6 Testing and manual check runs always reflect the current state of code and configuration however, if you want to see the result of changes in-app , you will need to refresh the environment by running ddev env reload . Removal \u00b6 To stop an environment run ddev env stop . Any environments that haven't been explicitly stopped will show as active in the output of ddev env ls , even persisting through system restarts. If you are confident that environments are no longer active, you can run ddev env prune to remove all accumulated environment state.","title":"E2E"},{"location":"e2e/#e2e","text":"Any integration that makes use of our pytest plugin in its test suite supports end-to-end testing on a live Datadog Agent . The entrypoint for E2E management is the command group ddev env .","title":"E2E"},{"location":"e2e/#discovery","text":"Use the ls command to see what environments are available, for example: $ ddev env ls envoy envoy: py27 py38 You'll notice that only environments that actually run tests are available. Running simply ddev env ls with no arguments will display the active environments.","title":"Discovery"},{"location":"e2e/#creation","text":"To start an environment run ddev env start , for example: $ ddev env start envoy py38 Setting up environment `py38`... success! Updating `datadog/agent-dev:master`... success! Detecting the major version... Agent 7 detected Writing configuration for `py38`... success! Starting the Agent... success! Config file (copied to your clipboard): C:\\Users\\ofek\\AppData\\Local\\dd-checks-dev\\envs\\envoy\\py38\\config\\envoy.yaml To run this check, do: ddev env check envoy py38 To stop this check, do: ddev env stop envoy py38 This sets up the selected environment and an instance of the Agent running in a Docker container. The default configuration is defined by each environment's test suite and is saved to a file, which is then mounted to the Agent container so you may freely modify it. Let's see what we have running: $ docker ps --format \"table {{.Image}}\\t{{.Status}}\\t{{.Ports}}\\t{{.Names}}\" IMAGE STATUS PORTS NAMES datadog/agent-dev:master-py3 Up 4 seconds (health: starting) dd_envoy_py38 default_service2 Up 5 seconds 80/tcp, 10000/tcp default_service2_1 envoyproxy/envoy:latest Up 5 seconds 0.0.0.0:8001->8001/tcp, 10000/tcp, 0.0.0.0:8000->80/tcp default_front-envoy_1 default_xds Up 5 seconds 8080/tcp default_xds_1 default_service1 Up 5 seconds 80/tcp, 10000/tcp default_service1_1","title":"Creation"},{"location":"e2e/#agent-version","text":"You can select a particular build of the Agent to use with the --agent / -a option. Any Docker image is valid e.g. datadog/agent:7.17.0 . A custom nightly build will be used by default, which is re-built on every commit to the Datadog Agent repository .","title":"Agent version"},{"location":"e2e/#integration-version","text":"By default the version of the integration used will be the one shipped with the chosen Agent version, as if you had passed in the --prod flag. If you wish to modify an integration and test changes in real time, use the --dev flag. Doing so will mount and install the integration in the Agent container. All modifications to the integration's directory will be propagated to the Agent, whether it be a code change or switching to a different Git branch. If you modify the base package then you will need to mount that with the --base flag, which implicitly activates --dev .","title":"Integration version"},{"location":"e2e/#testing","text":"To run tests against the live Agent, use the ddev env test command. It is similar to the test command except it is capable of running tests marked as E2E , and only runs such tests.","title":"Testing"},{"location":"e2e/#automation","text":"You can use the --new-env / -ne flag to automate environment management. For example running: ddev env test apache:py38 vault:py38 -ne will start the py38 environment for Apache, run E2E tests, tear down the environment, and then do the same for Vault. Tip Since running tests implies code changes are being introduced, --new-env enables --dev by default.","title":"Automation"},{"location":"e2e/#execution","text":"Similar to the Agent's check command, you can perform manual check runs using ddev env check , for example: $ ddev env check envoy py38 --log-level debug ... ========= Collector ========= Running Checks ============== envoy (1.12.0) -------------- Instance ID: envoy:c705bd922a3c275c [OK] Configuration Source: file:/etc/datadog-agent/conf.d/envoy.d/envoy.yaml Total Runs: 1 Metric Samples: Last Run: 546, Total: 546 Events: Last Run: 0, Total: 0 Service Checks: Last Run: 1, Total: 1 Average Execution Time : 25ms Last Execution Date : 2020-02-17 00:58:05.000000 UTC Last Successful Execution Date : 2020-02-17 00:58:05.000000 UTC","title":"Execution"},{"location":"e2e/#debugging","text":"You may start an interactive debugging session using the --breakpoint / -b option. The option accepts an integer representing the line number at which to break. For convenience, 0 and -1 are shortcuts to the first and last line of the integration's check method, respectively. $ ddev env check envoy py38 -b 0 > /opt/datadog-agent/embedded/lib/python3.8/site-packages/datadog_checks/envoy/envoy.py(34)check() -> custom_tags = instance.get('tags', []) (Pdb) list 29 self.blacklisted_metrics = set() 30 31 self.caching_metrics = None 32 33 def check(self, instance): 34 B-> custom_tags = instance.get('tags', []) 35 36 try: 37 stats_url = instance['stats_url'] 38 except KeyError: 39 msg = 'Envoy configuration setting `stats_url` is required' (Pdb) print(instance) {'stats_url': 'http://localhost:8001/stats'} Caveat The line number must be within the integration's check method.","title":"Debugging"},{"location":"e2e/#refreshing-state","text":"Testing and manual check runs always reflect the current state of code and configuration however, if you want to see the result of changes in-app , you will need to refresh the environment by running ddev env reload .","title":"Refreshing state"},{"location":"e2e/#removal","text":"To stop an environment run ddev env stop . Any environments that haven't been explicitly stopped will show as active in the output of ddev env ls , even persisting through system restarts. If you are confident that environments are no longer active, you can run ddev env prune to remove all accumulated environment state.","title":"Removal"},{"location":"setup/","text":"Setup \u00b6 This will be relatively painless, we promise! Integrations \u00b6 You will need to clone integrations-core and/or integrations-extras depending on which integrations you intend to work on. Python \u00b6 To work on any integration you must install Python 3.8+. After installation, restart your terminal and ensure that your newly installed Python comes first in your PATH . macOS We recommend using Homebrew . First update the formulae and Homebrew itself: brew update then either install Python: brew install python or upgrade it: brew upgrade python After it completes, check the output to see if it asked you to run any extra commands and if so, execute them. Verify successful PATH modification: which -a python Windows Windows users have it the easiest. Simply download the latest x86-64 executable installer and run it. When prompted, be sure to select the option to add to your PATH . Also, it is recommended that you choose the per-user installation method. Verify successful PATH modification: where python Linux Ah, you enjoy difficult things. Are you using Gentoo? We recommend using either Miniconda or pyenv . Whatever you do, never modify the system Python. Verify successful PATH modification: which -a python ddev \u00b6 Installation \u00b6 You have 2 options to install the CLI provided by the package datadog-checks-dev . Warning For either option, if you are on macOS/Linux do not use sudo ! Doing so will result in a broken installation. Development \u00b6 If you cloned integrations-core and want to always use the version based on the current branch, run: python -m pip install -e \"path/to/datadog_checks_dev[cli]\" Note Be aware that this method does not keep track of dependencies so you will need to re-run the command if/when the required dependencies are changed. Stable \u00b6 The latest released version may be installed from PyPI : python -m pip install --upgrade \"datadog-checks-dev[cli]\" Configuration \u00b6 Upon the first invocation, ddev will create its config file if it does not yet exist. You will need to set the location of each cloned repository: ddev config set /path/to/integrations- The may be either core or extras . By default, the repo core will be the target of all commands. If you want to switch to integrations-extras , run: ddev config set repo extras Docker \u00b6 Docker is used in nearly every integration's test suite therefore we simply require it to avoid confusion. macOS Install Docker Desktop for Mac . Right-click the Docker taskbar item and update Preferences > File Sharing with any locations you need to open. Windows Install Docker Desktop for Windows . Right-click the Docker taskbar item and update Settings > Shared Drives with any locations you need to open e.g. C:\\ . Linux Install Docker Engine for your distribution: Ubuntu Docker CE for Ubuntu Debian Docker CE for Debian Fedora Docker CE for Fedora CentOS Docker CE for CentOS Add your user to the docker group: sudo usermod -aG docker $USER Sign out and then back in again so your changes take effect. After installation, restart your terminal one last time.","title":"Setup"},{"location":"setup/#setup","text":"This will be relatively painless, we promise!","title":"Setup"},{"location":"setup/#integrations","text":"You will need to clone integrations-core and/or integrations-extras depending on which integrations you intend to work on.","title":"Integrations"},{"location":"setup/#python","text":"To work on any integration you must install Python 3.8+. After installation, restart your terminal and ensure that your newly installed Python comes first in your PATH . macOS We recommend using Homebrew . First update the formulae and Homebrew itself: brew update then either install Python: brew install python or upgrade it: brew upgrade python After it completes, check the output to see if it asked you to run any extra commands and if so, execute them. Verify successful PATH modification: which -a python Windows Windows users have it the easiest. Simply download the latest x86-64 executable installer and run it. When prompted, be sure to select the option to add to your PATH . Also, it is recommended that you choose the per-user installation method. Verify successful PATH modification: where python Linux Ah, you enjoy difficult things. Are you using Gentoo? We recommend using either Miniconda or pyenv . Whatever you do, never modify the system Python. Verify successful PATH modification: which -a python","title":"Python"},{"location":"setup/#ddev","text":"","title":"ddev"},{"location":"setup/#installation","text":"You have 2 options to install the CLI provided by the package datadog-checks-dev . Warning For either option, if you are on macOS/Linux do not use sudo ! Doing so will result in a broken installation.","title":"Installation"},{"location":"setup/#development","text":"If you cloned integrations-core and want to always use the version based on the current branch, run: python -m pip install -e \"path/to/datadog_checks_dev[cli]\" Note Be aware that this method does not keep track of dependencies so you will need to re-run the command if/when the required dependencies are changed.","title":"Development"},{"location":"setup/#stable","text":"The latest released version may be installed from PyPI : python -m pip install --upgrade \"datadog-checks-dev[cli]\"","title":"Stable"},{"location":"setup/#configuration","text":"Upon the first invocation, ddev will create its config file if it does not yet exist. You will need to set the location of each cloned repository: ddev config set /path/to/integrations- The may be either core or extras . By default, the repo core will be the target of all commands. If you want to switch to integrations-extras , run: ddev config set repo extras","title":"Configuration"},{"location":"setup/#docker","text":"Docker is used in nearly every integration's test suite therefore we simply require it to avoid confusion. macOS Install Docker Desktop for Mac . Right-click the Docker taskbar item and update Preferences > File Sharing with any locations you need to open. Windows Install Docker Desktop for Windows . Right-click the Docker taskbar item and update Settings > Shared Drives with any locations you need to open e.g. C:\\ . Linux Install Docker Engine for your distribution: Ubuntu Docker CE for Ubuntu Debian Docker CE for Debian Fedora Docker CE for Fedora CentOS Docker CE for CentOS Add your user to the docker group: sudo usermod -aG docker $USER Sign out and then back in again so your changes take effect. After installation, restart your terminal one last time.","title":"Docker"},{"location":"testing/","text":"Testing \u00b6 The entrypoint for testing any integration is the command ddev test , which accepts an arbitrary number of integrations as arguments. Under the hood, we use tox for environment management and pytest as our test framework. Discovery \u00b6 Use the --list / -l flag to see what environments are available, for example: $ ddev test postgres envoy -l postgres: py27-10 py27-11 py27-93 py27-94 py27-95 py27-96 py38-10 py38-11 py38-93 py38-94 py38-95 py38-96 format_style style envoy: py27 py38 bench format_style style You'll notice that all environments for running tests are prefixed with pyXY , indicating the Python version to use. If you don't have a particular version installed (for example Python 2.7), such environments will be skipped. The second part of a test environment's name corresponds to the version of the product. For example, the 11 in py38-11 implies tests will run against version 11.x of PostgreSQL. If there is no version suffix, it means that either: the version is pinned, usually set to pull the latest release, or there is no concept of a product, such as the disk check Usage \u00b6 Explicit \u00b6 Passing just the integration name will run every test environment e.g. executing ddev test envoy will run the environments py27 , py38 , and style . You may select a subset of environments to run by appending a : followed by a comma-separated list of environments. For example, executing: ddev test postgres:py38-11,style envoy:py38 will run, in order, the environments py38-11 and style for the PostgreSQL check and the environment py38 for the Envoy check. Detection \u00b6 If no integrations are specified then only integrations that were changed will be tested, based on a diff between the latest commit to the current and master branches. The criteria for an integration to be considered changed is based on the file extension of paths in the diff. So for example if only Markdown files were modified then nothing will be tested. The integrations will be tested in lexicographical order. Coverage \u00b6 To measure code coverage, use the --cov / -c flag. Doing so will display a summary of coverage statistics after successful execution of integrations' tests. $ ddev test tls -c ... ---------- Coverage report ---------- Name Stmts Miss Branch BrPart Cover ------------------------------------------------------------------- datadog_checks\\tls\\__about__.py 1 0 0 0 100% datadog_checks\\tls\\__init__.py 3 0 0 0 100% datadog_checks\\tls\\tls.py 185 4 50 2 97% datadog_checks\\tls\\utils.py 43 0 16 0 100% tests\\__init__.py 0 0 0 0 100% tests\\conftest.py 105 0 0 0 100% tests\\test_config.py 47 0 0 0 100% tests\\test_local.py 113 0 0 0 100% tests\\test_remote.py 189 0 2 0 100% tests\\test_utils.py 15 0 0 0 100% tests\\utils.py 36 0 2 0 100% ------------------------------------------------------------------- TOTAL 737 4 70 2 99% To also show any line numbers that were not hit, use the --cov-missing / -cm flag instead. $ ddev test tls -cm ... ---------- Coverage report ---------- Name Stmts Miss Branch BrPart Cover Missing ----------------------------------------------------------------------------- datadog_checks\\tls\\__about__.py 1 0 0 0 100% datadog_checks\\tls\\__init__.py 3 0 0 0 100% datadog_checks\\tls\\tls.py 185 4 50 2 97% 160-167, 288->275, 297->300, 300 datadog_checks\\tls\\utils.py 43 0 16 0 100% tests\\__init__.py 0 0 0 0 100% tests\\conftest.py 105 0 0 0 100% tests\\test_config.py 47 0 0 0 100% tests\\test_local.py 113 0 0 0 100% tests\\test_remote.py 189 0 2 0 100% tests\\test_utils.py 15 0 0 0 100% tests\\utils.py 36 0 2 0 100% ----------------------------------------------------------------------------- TOTAL 737 4 70 2 99% Style \u00b6 To run only the style checking environments, use the --style / -s shortcut flag. You may also only run the formatter environment using the --format-style / -fs shortcut flag. The formatter will automatically resolve the most common errors caught by the style checker. Advanced \u00b6 There are a number of shortcut options available that correspond to pytest options . --marker / -m ( pytest : -m ) - Only run tests matching a given marker expression e.g. ddev test elastic:py38-7.2 -m unit --filter / -k ( pytest : -k ) - Only run tests matching a given substring expression e.g. ddev test redisdb -k replication --debug / -d ( pytest : --log-level=debug -s ) - Set the log level to debug --pdb ( pytest : --pdb -x ) - Drop to PDB on first failure, then end test session --verbose / -v ( pytest : -v --tb=auto ) - Increase verbosity (can be used additively) and disables shortened tracebacks You may also pass arguments directly to pytest using the --pytest-args / -pa option. For example, you could re-write -d as -pa \"--log-level=debug -s\" .","title":"Testing"},{"location":"testing/#testing","text":"The entrypoint for testing any integration is the command ddev test , which accepts an arbitrary number of integrations as arguments. Under the hood, we use tox for environment management and pytest as our test framework.","title":"Testing"},{"location":"testing/#discovery","text":"Use the --list / -l flag to see what environments are available, for example: $ ddev test postgres envoy -l postgres: py27-10 py27-11 py27-93 py27-94 py27-95 py27-96 py38-10 py38-11 py38-93 py38-94 py38-95 py38-96 format_style style envoy: py27 py38 bench format_style style You'll notice that all environments for running tests are prefixed with pyXY , indicating the Python version to use. If you don't have a particular version installed (for example Python 2.7), such environments will be skipped. The second part of a test environment's name corresponds to the version of the product. For example, the 11 in py38-11 implies tests will run against version 11.x of PostgreSQL. If there is no version suffix, it means that either: the version is pinned, usually set to pull the latest release, or there is no concept of a product, such as the disk check","title":"Discovery"},{"location":"testing/#usage","text":"","title":"Usage"},{"location":"testing/#explicit","text":"Passing just the integration name will run every test environment e.g. executing ddev test envoy will run the environments py27 , py38 , and style . You may select a subset of environments to run by appending a : followed by a comma-separated list of environments. For example, executing: ddev test postgres:py38-11,style envoy:py38 will run, in order, the environments py38-11 and style for the PostgreSQL check and the environment py38 for the Envoy check.","title":"Explicit"},{"location":"testing/#detection","text":"If no integrations are specified then only integrations that were changed will be tested, based on a diff between the latest commit to the current and master branches. The criteria for an integration to be considered changed is based on the file extension of paths in the diff. So for example if only Markdown files were modified then nothing will be tested. The integrations will be tested in lexicographical order.","title":"Detection"},{"location":"testing/#coverage","text":"To measure code coverage, use the --cov / -c flag. Doing so will display a summary of coverage statistics after successful execution of integrations' tests. $ ddev test tls -c ... ---------- Coverage report ---------- Name Stmts Miss Branch BrPart Cover ------------------------------------------------------------------- datadog_checks\\tls\\__about__.py 1 0 0 0 100% datadog_checks\\tls\\__init__.py 3 0 0 0 100% datadog_checks\\tls\\tls.py 185 4 50 2 97% datadog_checks\\tls\\utils.py 43 0 16 0 100% tests\\__init__.py 0 0 0 0 100% tests\\conftest.py 105 0 0 0 100% tests\\test_config.py 47 0 0 0 100% tests\\test_local.py 113 0 0 0 100% tests\\test_remote.py 189 0 2 0 100% tests\\test_utils.py 15 0 0 0 100% tests\\utils.py 36 0 2 0 100% ------------------------------------------------------------------- TOTAL 737 4 70 2 99% To also show any line numbers that were not hit, use the --cov-missing / -cm flag instead. $ ddev test tls -cm ... ---------- Coverage report ---------- Name Stmts Miss Branch BrPart Cover Missing ----------------------------------------------------------------------------- datadog_checks\\tls\\__about__.py 1 0 0 0 100% datadog_checks\\tls\\__init__.py 3 0 0 0 100% datadog_checks\\tls\\tls.py 185 4 50 2 97% 160-167, 288->275, 297->300, 300 datadog_checks\\tls\\utils.py 43 0 16 0 100% tests\\__init__.py 0 0 0 0 100% tests\\conftest.py 105 0 0 0 100% tests\\test_config.py 47 0 0 0 100% tests\\test_local.py 113 0 0 0 100% tests\\test_remote.py 189 0 2 0 100% tests\\test_utils.py 15 0 0 0 100% tests\\utils.py 36 0 2 0 100% ----------------------------------------------------------------------------- TOTAL 737 4 70 2 99%","title":"Coverage"},{"location":"testing/#style","text":"To run only the style checking environments, use the --style / -s shortcut flag. You may also only run the formatter environment using the --format-style / -fs shortcut flag. The formatter will automatically resolve the most common errors caught by the style checker.","title":"Style"},{"location":"testing/#advanced","text":"There are a number of shortcut options available that correspond to pytest options . --marker / -m ( pytest : -m ) - Only run tests matching a given marker expression e.g. ddev test elastic:py38-7.2 -m unit --filter / -k ( pytest : -k ) - Only run tests matching a given substring expression e.g. ddev test redisdb -k replication --debug / -d ( pytest : --log-level=debug -s ) - Set the log level to debug --pdb ( pytest : --pdb -x ) - Drop to PDB on first failure, then end test session --verbose / -v ( pytest : -v --tb=auto ) - Increase verbosity (can be used additively) and disables shortened tracebacks You may also pass arguments directly to pytest using the --pytest-args / -pa option. For example, you could re-write -d as -pa \"--log-level=debug -s\" .","title":"Advanced"},{"location":"architecture/snmp/","text":"SNMP \u00b6 Note This section is meant for developers that want to understand the working of the SNMP integration. Be sure you are familiar with SNMP concepts , and you have read through the official SNMP integration docs . Overview \u00b6 While most integrations are either Python, JMX, or implemented in the Agent in Go, the SNMP integration is a bit more complex. Here's an overview of what this integration involves: A Python check , responsible for: Collecting metrics from a specific device IP. Metrics typically come from profiles , but they can also be specified explicitly . Auto-discovering devices over a network. (Pending deprecation in favor of Agent auto-discovery.) An Agent service listener , responsible for auto-discovering devices over a network and forwarding discovered instances to the existing Agent check scheduling pipeline. Also known as \"Agent SNMP auto-discovery\". The diagram below shows how these components interact for a typical VM-based setup (single Agent on a host). For Datadog Cluster Agent (DCA) deployments, see Cluster Agent Integration . Python Check \u00b6 Dependencies \u00b6 The Python check uses PySNMP to make SNMP queries and manipulate SNMP data (OIDs, variables, and MIBs). Device Monitoring \u00b6 The primary functionality of the Python check is to collect metrics from a given device given its IP address. As all Python checks, it supports multi-instances configuration, where each instance represents a device: instances : - ip_address : \"192.168.0.12\" # Python Auto-Discovery \u00b6 Approach \u00b6 The Python check includes a multithreaded implementation of device auto-discovery. It runs on instances that use network_address instead of ip_address : instances : - network_address : \"192.168.0.0/28\" # The main tasks performed by device auto-discovery are: Find new devices : For each IP in the network_address CIDR range, the check queries the device sysObjectID . If the query succeeds and the sysObjectID matches one of the registered profiles, the device is added as a discovered instance. This logic is run at regular intervals in a separate thread. Cache devices : To improve performance, discovered instances are cached on disk based on a hash of the instance. Since options from the network_address instance are copied into discovered instances, the cache is invalidated if the network_address changes. Check devices : On each check run, the check runs a check on all discovered instances. This is done in parallel using a threadpool. The check waits for all sub-checks to finish. Handle failures : Discovered instances that fail after a configured number of times are dropped. They may be rediscovered later. Submit discovery-related metrics : the check submits the total number of discovered devices for a given network_address instance. Caveats \u00b6 The approach described above is not ideal for several reasons: The check code is harder to understand since the two distinct paths (\"single device\" vs \"entire network\") live in a single integration. Each network instance manages several long-running threads that span well beyond the lifespan of a single check run. Each network check pseudo-schedules other instances, which is normally the responsibility of the Agent. For this reason, auto-discovery was eventually implemented in the Agent as a proper service listener (see below), and users should be discouraged from using Python auto-discovery. When the deprecation period expires, we will be able to remove auto-discovery logic from the Python check, making it exclusively focused on checking single devices. Agent Auto-Discovery \u00b6 Dependencies \u00b6 Agent auto-discovery uses GoSNMP to get the sysObjectID of devices in the network. Standalone Agent \u00b6 Agent auto-discovery implements the same logic than the Python auto-discovery, but as a service listener in the Agent Go package. This approach leverages the existing Agent scheduling logic, and makes it possible to scale device auto-discovery using the Datadog Cluster Agent (see Cluster Agent Integration ). Pending official documentation, here is an example configuration: # datadog.yaml listeners : - name : snmp snmp_listener : configs : - network : 10.0.0.0/28 version : 2 community : public - network : 10.0.1.0/30 version : 3 user : my-snmp-user authentication_protocol : SHA authentication_key : \"*****\" privacy_protocol : AES privacy_key : \"*****\" ignored_ip_addresses : - 10.0.1.0 - 10.0.1.1 Cluster Agent Support \u00b6 For Kubernetes environments, the Cluster Agent can be configured to use the SNMP Agent auto-discovery (via snmp listener) logic as a source of Cluster checks . The Datadog Cluster Agent (DCA) uses the snmp_listener config (Agent auto-discovery) to listen for IP ranges, then schedules snmp check instances to be run by one or more normal Datadog Agents. Agent auto-discovery combined with Cluster Agent is very scalable, it can be used to monitor a large number of snmp devices. Example Cluster Agent setup with SNMP Agent auto-discovery using Datadog helm-chart \u00b6 First you need to add Datadog Helm repository . ``` $ helm repo add datadog https://helm.datadoghq.com $ helm repo update ``` Then run: helm install datadog-monitoring --set datadog.apiKey = -f cluster-agent-values.yaml datadog/datadog Example cluster-agent-values.yaml datadog : ## @param apiKey - string - required ## Set this to your Datadog API key before the Agent runs. ## ref: https://app.datadoghq.com/account/settings#agent/kubernetes # apiKey : ## @param clusterName - string - optional ## Set a unique cluster name to allow scoping hosts and Cluster Checks easily ## The name must be unique and must be dot-separated tokens where a token can be up to 40 characters with the following restrictions: ## * Lowercase letters, numbers, and hyphens only. ## * Must start with a letter. ## * Must end with a number or a letter. ## Compared to the rules of GKE, dots are allowed whereas they are not allowed on GKE: ## https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#Cluster.FIELDS.name # clusterName : my-snmp-cluster ## @param clusterChecks - object - required ## Enable the Cluster Checks feature on both the cluster-agents and the daemonset ## ref: https://docs.datadoghq.com/agent/autodiscovery/clusterchecks/ ## Autodiscovery via Kube Service annotations is automatically enabled # clusterChecks : enabled : true ## @param tags - list of key:value elements - optional ## List of tags to attach to every metric, event and service check collected by this Agent. ## ## Learn more about tagging: https://docs.datadoghq.com/tagging/ # tags : - 'env:test-snmp-cluster-agent' ## @param clusterAgent - object - required ## This is the Datadog Cluster Agent implementation that handles cluster-wide ## metrics more cleanly, separates concerns for better rbac, and implements ## the external metrics API so you can autoscale HPAs based on datadog metrics ## ref: https://docs.datadoghq.com/agent/kubernetes/cluster/ # clusterAgent : ## @param enabled - boolean - required ## Set this to true to enable Datadog Cluster Agent # enabled : true ## @param confd - list of objects - optional ## Provide additional cluster check configurations ## Each key will become a file in /conf.d ## ref: https://docs.datadoghq.com/agent/autodiscovery/ # confd : # Static checks http_check.yaml : |- cluster_check: true instances: - name: 'Check Example Site1' url: http://example.net - name: 'Check Example Site2' url: http://example.net - name: 'Check Example Site3' url: http://example.net # Autodiscovery template needed for `snmp_listener` to create instance configs snmp.yaml : |- cluster_check: true # AD config below is copied from: https://github.com/DataDog/datadog-agent/blob/master/cmd/agent/dist/conf.d/snmp.d/auto_conf.yaml ad_identifiers: - snmp init_config: instances: - ## @param ip_address - string - optional ## The IP address of the device to monitor. # ip_address: \"%%host%%\" ## @param port - integer - optional - default: 161 ## Default SNMP port. # port: \"%%port%%\" ## @param snmp_version - integer - optional - default: 2 ## If you are using SNMP v1 set snmp_version to 1 (required) ## If you are using SNMP v3 set snmp_version to 3 (required) # snmp_version: \"%%extra_version%%\" ## @param timeout - integer - optional - default: 5 ## Amount of second before timing out. # timeout: \"%%extra_timeout%%\" ## @param retries - integer - optional - default: 5 ## Amount of retries before failure. # retries: \"%%extra_retries%%\" ## @param community_string - string - optional ## Only useful for SNMP v1 & v2. # community_string: \"%%extra_community%%\" ## @param user - string - optional ## USERNAME to connect to your SNMP devices. # user: \"%%extra_user%%\" ## @param authKey - string - optional ## Authentication key to use with your Authentication type. # authKey: \"%%extra_auth_key%%\" ## @param authProtocol - string - optional ## Authentication type to use when connecting to your SNMP devices. ## It can be one of: MD5, SHA, SHA224, SHA256, SHA384, SHA512. ## Default to MD5 when `authKey` is specified. # authProtocol: \"%%extra_auth_protocol%%\" ## @param privKey - string - optional ## Privacy type key to use with your Privacy type. # privKey: \"%%extra_priv_key%%\" ## @param privProtocol - string - optional ## Privacy type to use when connecting to your SNMP devices. ## It can be one of: DES, 3DES, AES, AES192, AES256, AES192C, AES256C. ## Default to DES when `privKey` is specified. # privProtocol: \"%%extra_priv_protocol%%\" ## @param context_engine_id - string - optional ## ID of your context engine; typically unneeded. ## (optional SNMP v3-only parameter) # context_engine_id: \"%%extra_context_engine_id%%\" ## @param context_name - string - optional ## Name of your context (optional SNMP v3-only parameter). # context_name: \"%%extra_context_name%%\" ## @param tags - list of key:value element - optional ## List of tags to attach to every metric, event and service check emitted by this integration. ## ## Learn more about tagging: https://docs.datadoghq.com/tagging/ # tags: # The autodiscovery subnet the device is part of. # Used by Agent autodiscovery to pass subnet name. - \"autodiscovery_subnet:%%extra_autodiscovery_subnet%%\" ## @param extra_tags - string - optional ## Comma separated tags to attach to every metric, event and service check emitted by this integration. ## Example: ## extra_tags: \"tag1:val1,tag2:val2\" # extra_tags: \"%%extra_tags%%\" ## @param oid_batch_size - integer - optional - default: 60 ## The number of OIDs handled by each batch. Increasing this number improves performance but ## uses more resources. # oid_batch_size: \"%%extra_oid_batch_size%%\" ## @param datadog-cluster.yaml - object - optional ## Specify custom contents for the datadog cluster agent config (datadog-cluster.yaml). # datadog_cluster_yaml : listeners : - name : snmp # See here for all `snmp_listener` configs: https://github.com/DataDog/datadog-agent/blob/master/pkg/config/config_template.yaml snmp_listener : workers : 2 discovery_interval : 10 configs : - network : 192.168.1.16/29 version : 2 port : 1161 community : cisco_icm - network : 192.168.1.16/29 version : 2 port : 1161 community : f5 TODO: architecture diagram, example setup, affected files and repos, local testing tools, etc.","title":"SNMP"},{"location":"architecture/snmp/#snmp","text":"Note This section is meant for developers that want to understand the working of the SNMP integration. Be sure you are familiar with SNMP concepts , and you have read through the official SNMP integration docs .","title":"SNMP"},{"location":"architecture/snmp/#overview","text":"While most integrations are either Python, JMX, or implemented in the Agent in Go, the SNMP integration is a bit more complex. Here's an overview of what this integration involves: A Python check , responsible for: Collecting metrics from a specific device IP. Metrics typically come from profiles , but they can also be specified explicitly . Auto-discovering devices over a network. (Pending deprecation in favor of Agent auto-discovery.) An Agent service listener , responsible for auto-discovering devices over a network and forwarding discovered instances to the existing Agent check scheduling pipeline. Also known as \"Agent SNMP auto-discovery\". The diagram below shows how these components interact for a typical VM-based setup (single Agent on a host). For Datadog Cluster Agent (DCA) deployments, see Cluster Agent Integration .","title":"Overview"},{"location":"architecture/snmp/#python-check","text":"","title":"Python Check"},{"location":"architecture/snmp/#dependencies","text":"The Python check uses PySNMP to make SNMP queries and manipulate SNMP data (OIDs, variables, and MIBs).","title":"Dependencies"},{"location":"architecture/snmp/#device-monitoring","text":"The primary functionality of the Python check is to collect metrics from a given device given its IP address. As all Python checks, it supports multi-instances configuration, where each instance represents a device: instances : - ip_address : \"192.168.0.12\" # ","title":"Device Monitoring"},{"location":"architecture/snmp/#python-auto-discovery","text":"","title":"Python Auto-Discovery"},{"location":"architecture/snmp/#approach","text":"The Python check includes a multithreaded implementation of device auto-discovery. It runs on instances that use network_address instead of ip_address : instances : - network_address : \"192.168.0.0/28\" # The main tasks performed by device auto-discovery are: Find new devices : For each IP in the network_address CIDR range, the check queries the device sysObjectID . If the query succeeds and the sysObjectID matches one of the registered profiles, the device is added as a discovered instance. This logic is run at regular intervals in a separate thread. Cache devices : To improve performance, discovered instances are cached on disk based on a hash of the instance. Since options from the network_address instance are copied into discovered instances, the cache is invalidated if the network_address changes. Check devices : On each check run, the check runs a check on all discovered instances. This is done in parallel using a threadpool. The check waits for all sub-checks to finish. Handle failures : Discovered instances that fail after a configured number of times are dropped. They may be rediscovered later. Submit discovery-related metrics : the check submits the total number of discovered devices for a given network_address instance.","title":"Approach"},{"location":"architecture/snmp/#caveats","text":"The approach described above is not ideal for several reasons: The check code is harder to understand since the two distinct paths (\"single device\" vs \"entire network\") live in a single integration. Each network instance manages several long-running threads that span well beyond the lifespan of a single check run. Each network check pseudo-schedules other instances, which is normally the responsibility of the Agent. For this reason, auto-discovery was eventually implemented in the Agent as a proper service listener (see below), and users should be discouraged from using Python auto-discovery. When the deprecation period expires, we will be able to remove auto-discovery logic from the Python check, making it exclusively focused on checking single devices.","title":"Caveats"},{"location":"architecture/snmp/#agent-auto-discovery","text":"","title":"Agent Auto-Discovery"},{"location":"architecture/snmp/#dependencies_1","text":"Agent auto-discovery uses GoSNMP to get the sysObjectID of devices in the network.","title":"Dependencies"},{"location":"architecture/snmp/#standalone-agent","text":"Agent auto-discovery implements the same logic than the Python auto-discovery, but as a service listener in the Agent Go package. This approach leverages the existing Agent scheduling logic, and makes it possible to scale device auto-discovery using the Datadog Cluster Agent (see Cluster Agent Integration ). Pending official documentation, here is an example configuration: # datadog.yaml listeners : - name : snmp snmp_listener : configs : - network : 10.0.0.0/28 version : 2 community : public - network : 10.0.1.0/30 version : 3 user : my-snmp-user authentication_protocol : SHA authentication_key : \"*****\" privacy_protocol : AES privacy_key : \"*****\" ignored_ip_addresses : - 10.0.1.0 - 10.0.1.1","title":"Standalone Agent"},{"location":"architecture/snmp/#cluster-agent-support","text":"For Kubernetes environments, the Cluster Agent can be configured to use the SNMP Agent auto-discovery (via snmp listener) logic as a source of Cluster checks . The Datadog Cluster Agent (DCA) uses the snmp_listener config (Agent auto-discovery) to listen for IP ranges, then schedules snmp check instances to be run by one or more normal Datadog Agents. Agent auto-discovery combined with Cluster Agent is very scalable, it can be used to monitor a large number of snmp devices.","title":"Cluster Agent Support"},{"location":"architecture/snmp/#example-cluster-agent-setup-with-snmp-agent-auto-discovery-using-datadog-helm-chart","text":"First you need to add Datadog Helm repository . ``` $ helm repo add datadog https://helm.datadoghq.com $ helm repo update ``` Then run: helm install datadog-monitoring --set datadog.apiKey = -f cluster-agent-values.yaml datadog/datadog Example cluster-agent-values.yaml datadog : ## @param apiKey - string - required ## Set this to your Datadog API key before the Agent runs. ## ref: https://app.datadoghq.com/account/settings#agent/kubernetes # apiKey : ## @param clusterName - string - optional ## Set a unique cluster name to allow scoping hosts and Cluster Checks easily ## The name must be unique and must be dot-separated tokens where a token can be up to 40 characters with the following restrictions: ## * Lowercase letters, numbers, and hyphens only. ## * Must start with a letter. ## * Must end with a number or a letter. ## Compared to the rules of GKE, dots are allowed whereas they are not allowed on GKE: ## https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#Cluster.FIELDS.name # clusterName : my-snmp-cluster ## @param clusterChecks - object - required ## Enable the Cluster Checks feature on both the cluster-agents and the daemonset ## ref: https://docs.datadoghq.com/agent/autodiscovery/clusterchecks/ ## Autodiscovery via Kube Service annotations is automatically enabled # clusterChecks : enabled : true ## @param tags - list of key:value elements - optional ## List of tags to attach to every metric, event and service check collected by this Agent. ## ## Learn more about tagging: https://docs.datadoghq.com/tagging/ # tags : - 'env:test-snmp-cluster-agent' ## @param clusterAgent - object - required ## This is the Datadog Cluster Agent implementation that handles cluster-wide ## metrics more cleanly, separates concerns for better rbac, and implements ## the external metrics API so you can autoscale HPAs based on datadog metrics ## ref: https://docs.datadoghq.com/agent/kubernetes/cluster/ # clusterAgent : ## @param enabled - boolean - required ## Set this to true to enable Datadog Cluster Agent # enabled : true ## @param confd - list of objects - optional ## Provide additional cluster check configurations ## Each key will become a file in /conf.d ## ref: https://docs.datadoghq.com/agent/autodiscovery/ # confd : # Static checks http_check.yaml : |- cluster_check: true instances: - name: 'Check Example Site1' url: http://example.net - name: 'Check Example Site2' url: http://example.net - name: 'Check Example Site3' url: http://example.net # Autodiscovery template needed for `snmp_listener` to create instance configs snmp.yaml : |- cluster_check: true # AD config below is copied from: https://github.com/DataDog/datadog-agent/blob/master/cmd/agent/dist/conf.d/snmp.d/auto_conf.yaml ad_identifiers: - snmp init_config: instances: - ## @param ip_address - string - optional ## The IP address of the device to monitor. # ip_address: \"%%host%%\" ## @param port - integer - optional - default: 161 ## Default SNMP port. # port: \"%%port%%\" ## @param snmp_version - integer - optional - default: 2 ## If you are using SNMP v1 set snmp_version to 1 (required) ## If you are using SNMP v3 set snmp_version to 3 (required) # snmp_version: \"%%extra_version%%\" ## @param timeout - integer - optional - default: 5 ## Amount of second before timing out. # timeout: \"%%extra_timeout%%\" ## @param retries - integer - optional - default: 5 ## Amount of retries before failure. # retries: \"%%extra_retries%%\" ## @param community_string - string - optional ## Only useful for SNMP v1 & v2. # community_string: \"%%extra_community%%\" ## @param user - string - optional ## USERNAME to connect to your SNMP devices. # user: \"%%extra_user%%\" ## @param authKey - string - optional ## Authentication key to use with your Authentication type. # authKey: \"%%extra_auth_key%%\" ## @param authProtocol - string - optional ## Authentication type to use when connecting to your SNMP devices. ## It can be one of: MD5, SHA, SHA224, SHA256, SHA384, SHA512. ## Default to MD5 when `authKey` is specified. # authProtocol: \"%%extra_auth_protocol%%\" ## @param privKey - string - optional ## Privacy type key to use with your Privacy type. # privKey: \"%%extra_priv_key%%\" ## @param privProtocol - string - optional ## Privacy type to use when connecting to your SNMP devices. ## It can be one of: DES, 3DES, AES, AES192, AES256, AES192C, AES256C. ## Default to DES when `privKey` is specified. # privProtocol: \"%%extra_priv_protocol%%\" ## @param context_engine_id - string - optional ## ID of your context engine; typically unneeded. ## (optional SNMP v3-only parameter) # context_engine_id: \"%%extra_context_engine_id%%\" ## @param context_name - string - optional ## Name of your context (optional SNMP v3-only parameter). # context_name: \"%%extra_context_name%%\" ## @param tags - list of key:value element - optional ## List of tags to attach to every metric, event and service check emitted by this integration. ## ## Learn more about tagging: https://docs.datadoghq.com/tagging/ # tags: # The autodiscovery subnet the device is part of. # Used by Agent autodiscovery to pass subnet name. - \"autodiscovery_subnet:%%extra_autodiscovery_subnet%%\" ## @param extra_tags - string - optional ## Comma separated tags to attach to every metric, event and service check emitted by this integration. ## Example: ## extra_tags: \"tag1:val1,tag2:val2\" # extra_tags: \"%%extra_tags%%\" ## @param oid_batch_size - integer - optional - default: 60 ## The number of OIDs handled by each batch. Increasing this number improves performance but ## uses more resources. # oid_batch_size: \"%%extra_oid_batch_size%%\" ## @param datadog-cluster.yaml - object - optional ## Specify custom contents for the datadog cluster agent config (datadog-cluster.yaml). # datadog_cluster_yaml : listeners : - name : snmp # See here for all `snmp_listener` configs: https://github.com/DataDog/datadog-agent/blob/master/pkg/config/config_template.yaml snmp_listener : workers : 2 discovery_interval : 10 configs : - network : 192.168.1.16/29 version : 2 port : 1161 community : cisco_icm - network : 192.168.1.16/29 version : 2 port : 1161 community : f5 TODO: architecture diagram, example setup, affected files and repos, local testing tools, etc.","title":"Example Cluster Agent setup with SNMP Agent auto-discovery using Datadog helm-chart"},{"location":"architecture/vsphere/","text":"vSphere \u00b6 High-Level information \u00b6 Product overview \u00b6 vSphere is a VMware product dedicated to managing a (usually) on-premise infrastructure. From physical machines running VMware ESXi that are called ESXi Hosts, users can spin up or migrate Virtual Machines from one host to another. vSphere is an integrated solution and provides an easy managing interface over concepts like data storage, or computing resource. Terminology \u00b6 This section details some of vSphere specific elements. This section does not intend to be an extensive list, but rather a place for those unfamiliar with the product to have the basics required to understand how the Datadog integration works. vSphere - The complete suite of tools and technologies detailed in this article. vCenter server - The main machine which controls ESXi hosts and provides both a web UI and an API to control the vSphere environment. vCSA (vCenter Server Appliance) - A specific kind of vCenter where the software runs in a dedicated Linux machine (more recent). By opposition, the legacy vCenter is typically installed on an existing Windows machine. ESXi host - The physical machine controlled by vCenter where the ESXi (bare-metal) virtualizer is installed. The host boots a minimal OS that can run Virtual Machines. VM - What anyone using vSphere really needs in the end, instances that can run applications and code. Note: Datadog monitors both ESXi hosts and VMs and it calls them both \"host\" (they are in the host map). Attributes/tags - It is possible to add attributes and tags to any vSphere resource, note that those two are now very similar with \"attributes\" being the deprecated thing to use. Datacenter - A set of resources grouped together. A single vCenter server can handle multiple datacenters. Datastore - A virtual vSphere concept to represent data storing capabilities. It can be an NFS server that ESXi hosts have read/write access to, it can be a mounted disk on the host and more. Datastores are often shared between multiple hosts. This allows Virtual Machines to be migrated from one host to another. Cluster - A logical grouping of computational resources, you can add multiple ESXi hosts in your cluster and then you can create VM in the cluster (and not on a specific host, vSphere will take care of placing your VM in one of the ESXi hosts and migrating it when needed). Photon OS - An open-source minimal Linux distribution and used by both ESXi and vCSA as a base. The integration \u00b6 Setup \u00b6 The Datadog vSphere integration runs from a single agent and pulls all the information from a single vCenter endpoint. Because the agent cannot run directly on Photon OS, it is usually required that the agent runs within a dedicated VM inside the vSphere infrastructure. Once the agent is running, the minimal configuration (as of version 5.x) is as follows: init_config : instances : - host : username : password : use_legacy_check_version : false empty_default_hostname : true host is the endpoint used to access the vSphere Client from a web browser. The host is either a FQDN or an IP, not an http url. username and password are the credentials to log in to vCenter. use_legacy_check_version is a backward compatibility flag. It should always be set to false and this flag will be removed in a future version of the integration. Setting it to true tells the agent to use an older and deprecated version of the vSphere integration. empty_default_hostname is a field used by the agent directly (and not the integration). By default, the agent does not allow submitting metrics without attaching an explicit host tag unless this flag is set to true. The vSphere integration uses that behavior for some metrics and service checks. For example, the vsphere.vm.count metric which gives a count of the VMs in the infra is not submitted with a host tag. This is particularly important if the agent runs inside a vSphere VM. If the vsphere.vm.count was submitted with a host tag, the Datadog backend would attach all the other host tags to the metric, for example vsphere_type:vm or vsphere_host: which makes the metric almost impossible to use. Concepts \u00b6 Collection level \u00b6 vSphere metrics are documented in their documentation page an each metric has a defined \"collection level\". That level determines the amount of data gathered by the integration and especially which metrics are available. More details here . By default, only the level 1 metrics are collected but this can be increased in the integration configuration file. Realtime vs historical \u00b6 Each ESXi host collects and stores data for each metric on himself and every VM it hosts every 20 seconds. Those data points are stored for up to one hour and are called realtime. Note: Each metric concerns always either a VM or an ESXi hosts. Metrics that concern datastore for example are not collected in the ESXi hosts. Additionally, the vCenter server collects data from all the ESXi hosts and stores the datapoint with some aggregation rollup into its own database. Those data points are called \"historical\". Finally, the vCenter server also collects metrics for other kinds of resources (like Datastore, ClusterComputeResource, Datacenter...) Those data points are necessarily \"historical\". The reason for such an important distinction is that historical metrics are much MUCH slower to collect than realtime metrics. The vSphere integration will always collect the \"realtime\" data for metrics that concern ESXi hosts and VMs. But the integration also collects metrics for Datastores, ClusterComputeResources, Datacenters, and maybe others in the future. That's why, in the context of the Datadog vSphere integration, we usually simplify by considering that: VMs and ESXi hosts are \"realtime resources\". Metrics for such resources are quick and easy to get by querying vCenter that will in turn query all the ESXi hosts. Datastores, ClusterComputeResources, and Datacenters are \"historical resources\" and are much slower to collect. To collect all metrics (realtime and historical), it is advised to use two \"check instances\". One with collection_type: realtime and one with collection_type: historical . This way all metrics will be collected but because both check instances are on different schedules, the slowness of collecting historical metrics won't affect the rate at which realtime metrics are collected. vSphere tags and attributes \u00b6 Similarly to how Datadog allows you to add tags to your different hosts (thins like the os or the instance-type of your machines), vSphere has \"tags\" and \"attributes\". A lot of details can be found here: https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vcenterhost.doc/GUID-E8E854DD-AA97-4E0C-8419-CE84F93C4058.html#:~:text=Tags%20and%20attributes%20allow%20you,that%20tag%20to%20a%20category. But the overall idea is that both tags and attributes are additional information that you can attach to your vSphere resources and that \"tags\" are newer and more featureful than \"attributes\". Filtering \u00b6 A very flexible filtering system has been implemented with the vSphere integration. This allows fine-tuned configuration so that: You only pay for the host and VMs you really want to monitor. You reduce the load on your vCenter server by running just the queries that you need. You improve the check runtime which otherwise increases linearly with the size of their infrastructure and that was seen to take up to 10min in some large environments. We provide two types of filtering, one based on metrics, the other based on resources. The metric filter is fairly simple, for each resource type, you can provide some regexes. If a metric match any of the filter, it will be fetched and submitted. The configuration looks like this: metric_filters : vm : - cpu\\..* - mem\\..* host : - WHATEVER # Excludes everything datacenter : - .* The resource filter on the other hand, allows to exclude some vSphere resources (VM, ESXi host, etc.), based on an \"attribute\" of that resource. The possible attributes as of today are: - name , literally the name of the resource (as defined in vCenter) - inventory_path , a path-like string that represents the location of the resource in the inventory tree as each resource only ever has a single parent and recursively up to the root. For example: /my.datacenter.local/vm/staging/myservice/vm_name - tag , see the tags and attributes section. Used to filter resources based on the attached tags. - attribute , see the tags and attributes section. Used to filter resources based on the attached attributes. - hostname (only for VMs), the name of the ESXi host where the VM is running. - guest_hostname (only for VMs), the name of the OS as reported from within the machine. VMware tools have to be installed on the VM otherwise, vCenter is not able to fetch this information. A possible filtering configuration would look like this: resource_filters : - resource : vm property : name patterns : - - - resource : vm property : hostname patterns : - - resource : vm property : tag type : blacklist patterns : - '^env:staging$' - resource : vm property : tag type : whitelist # type defaults to whitelist patterns : - '^env:.*$' - resource : vm property : guest_hostname patterns : - - resource : host property : inventory_path patterns : - Instance tag \u00b6 In vSphere each metric is defined by three \"dimensions\". The resource on which the metric applies (for example the VM called \"abc1\") The name of the metric (for example cpu.usage). An additional available dimension that varies between metrics. (for example the cpu core id) This is similar to how Datadog represent metrics, except that the context cardinality is limited to two \"keys\", the name of the resource (usually the \"host\" tag), and there is space for one additional tag key. This available tag key is defined as the \"instance\" property, or \"instance tag\" in vSphere, and this dimension is not collected by default by the Datadog integration as it can have too big performance implications in large systems when compared to their added value from a monitoring perspective. Also when fetching metrics with the instance tag, vSphere only provides the value of the instance tag, it doesn't expose a human-readable \"key\" for that tag. In the cpu.usage metric with the core_id as the instance tag, the integration has to \"know\" that the meaning of the instance tag and that's why we rely on a hardcoded list in the integration. Because this instance tag can provide additional visibility, it is possible to enable it for some metrics from the configuration. For example, if we're really interested in getting the usage of the cpu per core, the setup can look like this: collect_per_instance_filters : vm : - cpu\\.usage\\..*","title":"vSphere"},{"location":"architecture/vsphere/#vsphere","text":"","title":"vSphere"},{"location":"architecture/vsphere/#high-level-information","text":"","title":"High-Level information"},{"location":"architecture/vsphere/#product-overview","text":"vSphere is a VMware product dedicated to managing a (usually) on-premise infrastructure. From physical machines running VMware ESXi that are called ESXi Hosts, users can spin up or migrate Virtual Machines from one host to another. vSphere is an integrated solution and provides an easy managing interface over concepts like data storage, or computing resource.","title":"Product overview"},{"location":"architecture/vsphere/#terminology","text":"This section details some of vSphere specific elements. This section does not intend to be an extensive list, but rather a place for those unfamiliar with the product to have the basics required to understand how the Datadog integration works. vSphere - The complete suite of tools and technologies detailed in this article. vCenter server - The main machine which controls ESXi hosts and provides both a web UI and an API to control the vSphere environment. vCSA (vCenter Server Appliance) - A specific kind of vCenter where the software runs in a dedicated Linux machine (more recent). By opposition, the legacy vCenter is typically installed on an existing Windows machine. ESXi host - The physical machine controlled by vCenter where the ESXi (bare-metal) virtualizer is installed. The host boots a minimal OS that can run Virtual Machines. VM - What anyone using vSphere really needs in the end, instances that can run applications and code. Note: Datadog monitors both ESXi hosts and VMs and it calls them both \"host\" (they are in the host map). Attributes/tags - It is possible to add attributes and tags to any vSphere resource, note that those two are now very similar with \"attributes\" being the deprecated thing to use. Datacenter - A set of resources grouped together. A single vCenter server can handle multiple datacenters. Datastore - A virtual vSphere concept to represent data storing capabilities. It can be an NFS server that ESXi hosts have read/write access to, it can be a mounted disk on the host and more. Datastores are often shared between multiple hosts. This allows Virtual Machines to be migrated from one host to another. Cluster - A logical grouping of computational resources, you can add multiple ESXi hosts in your cluster and then you can create VM in the cluster (and not on a specific host, vSphere will take care of placing your VM in one of the ESXi hosts and migrating it when needed). Photon OS - An open-source minimal Linux distribution and used by both ESXi and vCSA as a base.","title":"Terminology"},{"location":"architecture/vsphere/#the-integration","text":"","title":"The integration"},{"location":"architecture/vsphere/#setup","text":"The Datadog vSphere integration runs from a single agent and pulls all the information from a single vCenter endpoint. Because the agent cannot run directly on Photon OS, it is usually required that the agent runs within a dedicated VM inside the vSphere infrastructure. Once the agent is running, the minimal configuration (as of version 5.x) is as follows: init_config : instances : - host : username : password : use_legacy_check_version : false empty_default_hostname : true host is the endpoint used to access the vSphere Client from a web browser. The host is either a FQDN or an IP, not an http url. username and password are the credentials to log in to vCenter. use_legacy_check_version is a backward compatibility flag. It should always be set to false and this flag will be removed in a future version of the integration. Setting it to true tells the agent to use an older and deprecated version of the vSphere integration. empty_default_hostname is a field used by the agent directly (and not the integration). By default, the agent does not allow submitting metrics without attaching an explicit host tag unless this flag is set to true. The vSphere integration uses that behavior for some metrics and service checks. For example, the vsphere.vm.count metric which gives a count of the VMs in the infra is not submitted with a host tag. This is particularly important if the agent runs inside a vSphere VM. If the vsphere.vm.count was submitted with a host tag, the Datadog backend would attach all the other host tags to the metric, for example vsphere_type:vm or vsphere_host: which makes the metric almost impossible to use.","title":"Setup"},{"location":"architecture/vsphere/#concepts","text":"","title":"Concepts"},{"location":"architecture/vsphere/#collection-level","text":"vSphere metrics are documented in their documentation page an each metric has a defined \"collection level\". That level determines the amount of data gathered by the integration and especially which metrics are available. More details here . By default, only the level 1 metrics are collected but this can be increased in the integration configuration file.","title":"Collection level"},{"location":"architecture/vsphere/#realtime-vs-historical","text":"Each ESXi host collects and stores data for each metric on himself and every VM it hosts every 20 seconds. Those data points are stored for up to one hour and are called realtime. Note: Each metric concerns always either a VM or an ESXi hosts. Metrics that concern datastore for example are not collected in the ESXi hosts. Additionally, the vCenter server collects data from all the ESXi hosts and stores the datapoint with some aggregation rollup into its own database. Those data points are called \"historical\". Finally, the vCenter server also collects metrics for other kinds of resources (like Datastore, ClusterComputeResource, Datacenter...) Those data points are necessarily \"historical\". The reason for such an important distinction is that historical metrics are much MUCH slower to collect than realtime metrics. The vSphere integration will always collect the \"realtime\" data for metrics that concern ESXi hosts and VMs. But the integration also collects metrics for Datastores, ClusterComputeResources, Datacenters, and maybe others in the future. That's why, in the context of the Datadog vSphere integration, we usually simplify by considering that: VMs and ESXi hosts are \"realtime resources\". Metrics for such resources are quick and easy to get by querying vCenter that will in turn query all the ESXi hosts. Datastores, ClusterComputeResources, and Datacenters are \"historical resources\" and are much slower to collect. To collect all metrics (realtime and historical), it is advised to use two \"check instances\". One with collection_type: realtime and one with collection_type: historical . This way all metrics will be collected but because both check instances are on different schedules, the slowness of collecting historical metrics won't affect the rate at which realtime metrics are collected.","title":"Realtime vs historical"},{"location":"architecture/vsphere/#vsphere-tags-and-attributes","text":"Similarly to how Datadog allows you to add tags to your different hosts (thins like the os or the instance-type of your machines), vSphere has \"tags\" and \"attributes\". A lot of details can be found here: https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vcenterhost.doc/GUID-E8E854DD-AA97-4E0C-8419-CE84F93C4058.html#:~:text=Tags%20and%20attributes%20allow%20you,that%20tag%20to%20a%20category. But the overall idea is that both tags and attributes are additional information that you can attach to your vSphere resources and that \"tags\" are newer and more featureful than \"attributes\".","title":"vSphere tags and attributes"},{"location":"architecture/vsphere/#filtering","text":"A very flexible filtering system has been implemented with the vSphere integration. This allows fine-tuned configuration so that: You only pay for the host and VMs you really want to monitor. You reduce the load on your vCenter server by running just the queries that you need. You improve the check runtime which otherwise increases linearly with the size of their infrastructure and that was seen to take up to 10min in some large environments. We provide two types of filtering, one based on metrics, the other based on resources. The metric filter is fairly simple, for each resource type, you can provide some regexes. If a metric match any of the filter, it will be fetched and submitted. The configuration looks like this: metric_filters : vm : - cpu\\..* - mem\\..* host : - WHATEVER # Excludes everything datacenter : - .* The resource filter on the other hand, allows to exclude some vSphere resources (VM, ESXi host, etc.), based on an \"attribute\" of that resource. The possible attributes as of today are: - name , literally the name of the resource (as defined in vCenter) - inventory_path , a path-like string that represents the location of the resource in the inventory tree as each resource only ever has a single parent and recursively up to the root. For example: /my.datacenter.local/vm/staging/myservice/vm_name - tag , see the tags and attributes section. Used to filter resources based on the attached tags. - attribute , see the tags and attributes section. Used to filter resources based on the attached attributes. - hostname (only for VMs), the name of the ESXi host where the VM is running. - guest_hostname (only for VMs), the name of the OS as reported from within the machine. VMware tools have to be installed on the VM otherwise, vCenter is not able to fetch this information. A possible filtering configuration would look like this: resource_filters : - resource : vm property : name patterns : - - - resource : vm property : hostname patterns : - - resource : vm property : tag type : blacklist patterns : - '^env:staging$' - resource : vm property : tag type : whitelist # type defaults to whitelist patterns : - '^env:.*$' - resource : vm property : guest_hostname patterns : - - resource : host property : inventory_path patterns : - ","title":"Filtering"},{"location":"architecture/vsphere/#instance-tag","text":"In vSphere each metric is defined by three \"dimensions\". The resource on which the metric applies (for example the VM called \"abc1\") The name of the metric (for example cpu.usage). An additional available dimension that varies between metrics. (for example the cpu core id) This is similar to how Datadog represent metrics, except that the context cardinality is limited to two \"keys\", the name of the resource (usually the \"host\" tag), and there is space for one additional tag key. This available tag key is defined as the \"instance\" property, or \"instance tag\" in vSphere, and this dimension is not collected by default by the Datadog integration as it can have too big performance implications in large systems when compared to their added value from a monitoring perspective. Also when fetching metrics with the instance tag, vSphere only provides the value of the instance tag, it doesn't expose a human-readable \"key\" for that tag. In the cpu.usage metric with the core_id as the instance tag, the integration has to \"know\" that the meaning of the instance tag and that's why we rely on a hardcoded list in the integration. Because this instance tag can provide additional visibility, it is possible to enable it for some metrics from the configuration. For example, if we're really interested in getting the usage of the cpu per core, the setup can look like this: collect_per_instance_filters : vm : - cpu\\.usage\\..*","title":"Instance tag"},{"location":"architecture/win32_event_log/","text":"Windows Event Log \u00b6 Overview \u00b6 Users set a path with which to collect events from. It can be the name of a channel (like System , Application , etc.) or the full path to a log file. There are 3 ways to select filter criteria rather than collecting all events: query - A raw XPath or structured XML query used to filter events. This overrides any selected filters . filters - A mapping of properties to allowed values. Every filter (equivalent to the and operator) must match any value (equivalent to the or operator). This option is a convenience for a query that is relatively basic. Rather than collect all events and perform filtering within the check, the filters are converted to an XPath expression. This approach offloads all filtering to the kernel (like query ), which increases performance and reduces bandwidth usage when connecting to a remote machine. included_messages / excluded_messages - These are regular expression patterns used to filter by events' messages specifically (if a message is found), with the exclude list taking precedence. These may be used in place of or with query / filters , as there exists no query construct by which to select a message attribute. A pull subscription model is used. At every check run, the cached event log handle waits to be signaled for a configurable number of seconds. If signaled, the check then polls all available events in batches of a configurable size. At configurable intervals, the most recently encountered event is saved to the filesystem. This is useful for preventing duplicate events being sent as a consequence of Agent restarts, especially when the start option is set to oldest . Logs \u00b6 Events may alternatively be configured to be submitted as logs. The code for that resides here . Only a subset of the check's functionality is available. Namely, each log configuration will collect all events of the given channel without filtering, tagging, nor remote connection options. This implementation uses the push subscription model . There is a bit of C in charge of rendering the relevant data and registering the Go tailer callback that ultimately sends the log to the backend. Legacy mode \u00b6 Setting legacy_mode to true in the check will use WMI to collect events, which is significantly more resource intensive. This mode has entirely different configuration options and will be removed in a future release. Agent 6 can only use this mode as Python 2 does not support the new implementation.","title":"Windows Event Log"},{"location":"architecture/win32_event_log/#windows-event-log","text":"","title":"Windows Event Log"},{"location":"architecture/win32_event_log/#overview","text":"Users set a path with which to collect events from. It can be the name of a channel (like System , Application , etc.) or the full path to a log file. There are 3 ways to select filter criteria rather than collecting all events: query - A raw XPath or structured XML query used to filter events. This overrides any selected filters . filters - A mapping of properties to allowed values. Every filter (equivalent to the and operator) must match any value (equivalent to the or operator). This option is a convenience for a query that is relatively basic. Rather than collect all events and perform filtering within the check, the filters are converted to an XPath expression. This approach offloads all filtering to the kernel (like query ), which increases performance and reduces bandwidth usage when connecting to a remote machine. included_messages / excluded_messages - These are regular expression patterns used to filter by events' messages specifically (if a message is found), with the exclude list taking precedence. These may be used in place of or with query / filters , as there exists no query construct by which to select a message attribute. A pull subscription model is used. At every check run, the cached event log handle waits to be signaled for a configurable number of seconds. If signaled, the check then polls all available events in batches of a configurable size. At configurable intervals, the most recently encountered event is saved to the filesystem. This is useful for preventing duplicate events being sent as a consequence of Agent restarts, especially when the start option is set to oldest .","title":"Overview"},{"location":"architecture/win32_event_log/#logs","text":"Events may alternatively be configured to be submitted as logs. The code for that resides here . Only a subset of the check's functionality is available. Namely, each log configuration will collect all events of the given channel without filtering, tagging, nor remote connection options. This implementation uses the push subscription model . There is a bit of C in charge of rendering the relevant data and registering the Go tailer callback that ultimately sends the log to the backend.","title":"Logs"},{"location":"architecture/win32_event_log/#legacy-mode","text":"Setting legacy_mode to true in the check will use WMI to collect events, which is significantly more resource intensive. This mode has entirely different configuration options and will be removed in a future release. Agent 6 can only use this mode as Python 2 does not support the new implementation.","title":"Legacy mode"},{"location":"base/about/","text":"About \u00b6 The Base package provides all the functionality and utilities necessary for writing Agent Integrations. Most importantly it provides the AgentCheck base class from which every Check must be inherited. You would use it like so: from datadog_checks.base import AgentCheck class AwesomeCheck ( AgentCheck ): __NAMESPACE__ = 'awesome' def check ( self , instance ): self . gauge ( 'test' , 1.23 , tags = [ 'foo:bar' ]) The check method is what the Datadog Agent will execute. In this example we created a Check and gave it a namespace of awesome . This means that by default, every submission's name will be prefixed with awesome. . We submitted a gauge metric named awesome.test with a value of 1.23 tagged by foo:bar . The magic hidden by the usability of the API is that this actually calls a C binding which communicates with the Agent (written in Go).","title":"About"},{"location":"base/about/#about","text":"The Base package provides all the functionality and utilities necessary for writing Agent Integrations. Most importantly it provides the AgentCheck base class from which every Check must be inherited. You would use it like so: from datadog_checks.base import AgentCheck class AwesomeCheck ( AgentCheck ): __NAMESPACE__ = 'awesome' def check ( self , instance ): self . gauge ( 'test' , 1.23 , tags = [ 'foo:bar' ]) The check method is what the Datadog Agent will execute. In this example we created a Check and gave it a namespace of awesome . This means that by default, every submission's name will be prefixed with awesome. . We submitted a gauge metric named awesome.test with a value of 1.23 tagged by foo:bar . The magic hidden by the usability of the API is that this actually calls a C binding which communicates with the Agent (written in Go).","title":"About"},{"location":"base/api/","text":"API \u00b6 datadog_checks.base.checks.base.AgentCheck \u00b6 The base class for any Agent based integration. In general, you don't need to and you should not override anything from the base class except the check method but sometimes it might be useful for a Check to have its own constructor. When overriding __init__ you have to remember that, depending on the configuration, the Agent might create several different Check instances and the method would be called as many times. Agent 6,7 signature: AgentCheck(name, init_config, instances) # instances contain only 1 instance AgentCheck.check(instance) Agent 8 signature: AgentCheck(name, init_config, instance) # one instance AgentCheck.check() # no more instance argument for check method Note when loading a Custom check, the Agent will inspect the module searching for a subclass of AgentCheck . If such a class exists but has been derived in turn, it'll be ignored - you should never derive from an existing Check . __init__ ( self , * args , ** kwargs ) special \u00b6 name ( str ) - the name of the check init_config ( dict ) - the init_config section of the configuration. instance ( List[dict] ) - a one-element list containing the instance options from the configuration file (a list is used to keep backward compatibility with older versions of the Agent). Source code in def __init__ ( self , * args , ** kwargs ): # type: (*Any, **Any) -> None \"\"\" - **name** (_str_) - the name of the check - **init_config** (_dict_) - the `init_config` section of the configuration. - **instance** (_List[dict]_) - a one-element list containing the instance options from the configuration file (a list is used to keep backward compatibility with older versions of the Agent). \"\"\" # NOTE: these variable assignments exist to ease type checking when eventually assigned as attributes. name = kwargs . get ( 'name' , '' ) init_config = kwargs . get ( 'init_config' , {}) agentConfig = kwargs . get ( 'agentConfig' , {}) instances = kwargs . get ( 'instances' , []) if len ( args ) > 0 : name = args [ 0 ] if len ( args ) > 1 : init_config = args [ 1 ] if len ( args ) > 2 : # agent pass instances as tuple but in test we are usually using list, so we are testing for both if len ( args ) > 3 or not isinstance ( args [ 2 ], ( list , tuple )) or 'instances' in kwargs : # old-style init: the 3rd argument is `agentConfig` agentConfig = args [ 2 ] if len ( args ) > 3 : instances = args [ 3 ] else : # new-style init: the 3rd argument is `instances` instances = args [ 2 ] # NOTE: Agent 6+ should pass exactly one instance... But we are not abiding by that rule on our side # everywhere just yet. It's complicated... See: https://github.com/DataDog/integrations-core/pull/5573 instance = instances [ 0 ] if instances else None self . check_id = '' self . name = name # type: str self . init_config = init_config # type: InitConfigType self . agentConfig = agentConfig # type: AgentConfigType self . instance = instance # type: InstanceType self . instances = instances # type: List[InstanceType] self . warnings = [] # type: List[str] # `self.hostname` is deprecated, use `datadog_agent.get_hostname()` instead self . hostname = datadog_agent . get_hostname () # type: str logger = logging . getLogger ( ' {} . {} ' . format ( __name__ , self . name )) self . log = CheckLoggingAdapter ( logger , self ) # TODO: Remove with Agent 5 # Set proxy settings self . proxies = self . _get_requests_proxy () if not self . init_config : self . _use_agent_proxy = True else : self . _use_agent_proxy = is_affirmative ( self . init_config . get ( 'use_agent_proxy' , True )) # TODO: Remove with Agent 5 self . default_integration_http_timeout = float ( self . agentConfig . get ( 'default_integration_http_timeout' , 9 )) self . _deprecations = { 'increment' : ( False , ( 'DEPRECATION NOTICE: `AgentCheck.increment`/`AgentCheck.decrement` are deprecated, please ' 'use `AgentCheck.gauge` or `AgentCheck.count` instead, with a different metric name' ), ), 'device_name' : ( False , ( 'DEPRECATION NOTICE: `device_name` is deprecated, please use a `device:` ' 'tag in the `tags` list instead' ), ), 'in_developer_mode' : ( False , 'DEPRECATION NOTICE: `in_developer_mode` is deprecated, please stop using it.' , ), 'no_proxy' : ( False , ( 'DEPRECATION NOTICE: The `no_proxy` config option has been renamed ' 'to `skip_proxy` and will be removed in a future release.' ), ), 'service_tag' : ( False , ( 'DEPRECATION NOTICE: The `service` tag is deprecated and has been renamed to ` %s `. ' 'Set `disable_legacy_service_tag` to `true` to disable this warning. ' 'The default will become `true` and cannot be changed in Agent version 8.' ), ), } # type: Dict[str, Tuple[bool, str]] # Setup metric limits self . metric_limiter = self . _get_metric_limiter ( self . name , instance = self . instance ) # Lazily load and validate config self . _config_model_instance = None # type: Any self . _config_model_shared = None # type: Any # Functions that will be called exactly once (if successful) before the first check run self . check_initializations = deque ([ self . send_config_metadata ]) # type: Deque[Callable[[], None]] if not PY2 : self . check_initializations . append ( self . load_configuration_models ) count ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ) \u00b6 Sample a raw count metric. name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def count ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ): # type: (str, float, Sequence[str], str, str, bool) -> None \"\"\"Sample a raw count metric. - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" self . _submit_metric ( aggregator . COUNT , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw ) event ( self , event ) \u00b6 Send an event. An event is a dictionary with the following keys and data types: { \"timestamp\" : int , # the epoch timestamp for the event \"event_type\" : str , # the event name \"api_key\" : str , # the api key for your account \"msg_title\" : str , # the title of the event \"msg_text\" : str , # the text body of the event \"aggregation_key\" : str , # a key to use for aggregating events \"alert_type\" : str , # (optional) one of ('error', 'warning', 'success', 'info'), defaults to 'info' \"source_type_name\" : str , # (optional) the source type name \"host\" : str , # (optional) the name of the host \"tags\" : list , # (optional) a list of tags to associate with this event \"priority\" : str , # (optional) specifies the priority of the event (\"normal\" or \"low\") } event ( dict ) - the event to be sent Source code in def event ( self , event ): # type: (Event) -> None \"\"\"Send an event. An event is a dictionary with the following keys and data types: ```python { \"timestamp\": int, # the epoch timestamp for the event \"event_type\": str, # the event name \"api_key\": str, # the api key for your account \"msg_title\": str, # the title of the event \"msg_text\": str, # the text body of the event \"aggregation_key\": str, # a key to use for aggregating events \"alert_type\": str, # (optional) one of ('error', 'warning', 'success', 'info'), defaults to 'info' \"source_type_name\": str, # (optional) the source type name \"host\": str, # (optional) the name of the host \"tags\": list, # (optional) a list of tags to associate with this event \"priority\": str, # (optional) specifies the priority of the event (\"normal\" or \"low\") } ``` - **event** (_dict_) - the event to be sent \"\"\" # Enforce types of some fields, considerably facilitates handling in go bindings downstream for key , value in iteritems ( event ): if not isinstance ( value , ( text_type , binary_type )): continue try : event [ key ] = to_native_string ( value ) # type: ignore # ^ Mypy complains about dynamic key assignment -- arguably for good reason. # Ideally we should convert this to a dict literal so that submitted events only include known keys. except UnicodeError : self . log . warning ( 'Encoding error with field ` %s `, cannot submit event' , key ) return if event . get ( 'tags' ): event [ 'tags' ] = self . _normalize_tags_type ( event [ 'tags' ]) if event . get ( 'timestamp' ): event [ 'timestamp' ] = int ( event [ 'timestamp' ]) if event . get ( 'aggregation_key' ): event [ 'aggregation_key' ] = to_native_string ( event [ 'aggregation_key' ]) if self . __NAMESPACE__ : event . setdefault ( 'source_type_name' , self . __NAMESPACE__ ) aggregator . submit_event ( self , self . check_id , event ) gauge ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ) \u00b6 Sample a gauge metric. Parameters: name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def gauge ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ): # type: (str, float, Sequence[str], str, str, bool) -> None \"\"\"Sample a gauge metric. **Parameters:** - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" self . _submit_metric ( aggregator . GAUGE , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw ) histogram ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ) \u00b6 Sample a histogram metric. name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def histogram ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ): # type: (str, float, Sequence[str], str, str, bool) -> None \"\"\"Sample a histogram metric. - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" self . _submit_metric ( aggregator . HISTOGRAM , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw ) historate ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ) \u00b6 Sample a histogram based on rate metrics. name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def historate ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ): # type: (str, float, Sequence[str], str, str, bool) -> None \"\"\"Sample a histogram based on rate metrics. - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" self . _submit_metric ( aggregator . HISTORATE , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw ) metadata_entrypoint ( method ) classmethod \u00b6 Skip execution of the decorated method if metadata collection is disabled on the Agent. Usage: class MyCheck ( AgentCheck ): @AgentCheck . metadata_entrypoint def collect_metadata ( self ): ... Source code in @classmethod def metadata_entrypoint ( cls , method ): # type: (Callable[..., None]) -> Callable[..., None] \"\"\" Skip execution of the decorated method if metadata collection is disabled on the Agent. Usage: ```python class MyCheck(AgentCheck): @AgentCheck.metadata_entrypoint def collect_metadata(self): ... ``` \"\"\" @functools . wraps ( method ) def entrypoint ( self , * args , ** kwargs ): # type: (AgentCheck, *Any, **Any) -> None if not self . is_metadata_collection_enabled (): return # NOTE: error handling still at the discretion of the wrapped method. method ( self , * args , ** kwargs ) return entrypoint monotonic_count ( self , name , value , tags = None , hostname = None , device_name = None , raw = False , flush_first_value = False ) \u00b6 Sample an increasing counter metric. name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix flush_first_value ( bool ) - whether to sample the first value Source code in def monotonic_count ( self , name , value , tags = None , hostname = None , device_name = None , raw = False , flush_first_value = False ): # type: (str, float, Sequence[str], str, str, bool, bool) -> None \"\"\"Sample an increasing counter metric. - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix - **flush_first_value** (_bool_) - whether to sample the first value \"\"\" self . _submit_metric ( aggregator . MONOTONIC_COUNT , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw , flush_first_value = flush_first_value , ) rate ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ) \u00b6 Sample a point, with the rate calculated at the end of the check. name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def rate ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ): # type: (str, float, Sequence[str], str, str, bool) -> None \"\"\"Sample a point, with the rate calculated at the end of the check. - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" self . _submit_metric ( aggregator . RATE , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw ) service_check ( self , name , status , tags = None , hostname = None , message = None , raw = False ) \u00b6 Send the status of a service. name ( str ) - the name of the service check status ( int ) - a constant describing the service status. tags ( List[str] ) - a list of tags to associate with this service check message ( str ) - additional information or a description of why this status occurred. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def service_check ( self , name , status , tags = None , hostname = None , message = None , raw = False ): # type: (str, ServiceCheckStatus, Sequence[str], str, str, bool) -> None \"\"\"Send the status of a service. - **name** (_str_) - the name of the service check - **status** (_int_) - a constant describing the service status. - **tags** (_List[str]_) - a list of tags to associate with this service check - **message** (_str_) - additional information or a description of why this status occurred. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" tags = self . _normalize_tags_type ( tags or []) if hostname is None : hostname = '' if message is None : message = '' else : message = to_native_string ( message ) message = self . sanitize ( message ) aggregator . submit_service_check ( self , self . check_id , self . _format_namespace ( name , raw ), status , tags , hostname , message ) set_metadata ( self , name , value , ** options ) \u00b6 Updates the cached metadata name with value , which is then sent by the Agent at regular intervals. :param str name: the name of the metadata :param object value: the value for the metadata. if name has no transformer defined then the raw value will be submitted and therefore it must be a str :param options: keyword arguments to pass to any defined transformer Source code in def set_metadata ( self , name , value , ** options ): # type: (str, Any, **Any) -> None \"\"\"Updates the cached metadata ``name`` with ``value``, which is then sent by the Agent at regular intervals. :param str name: the name of the metadata :param object value: the value for the metadata. if ``name`` has no transformer defined then the raw ``value`` will be submitted and therefore it must be a ``str`` :param options: keyword arguments to pass to any defined transformer \"\"\" self . metadata_manager . submit ( name , value , options ) Stubs \u00b6 datadog_checks.base.stubs.aggregator.AggregatorStub \u00b6 This implements the methods defined by the Agent's C bindings which in turn call the Go backend . It also provides utility methods for test assertions. assert_all_metrics_covered ( self ) \u00b6 Source code in def assert_all_metrics_covered ( self ): # use `condition` to avoid building the `msg` if not needed condition = self . metrics_asserted_pct >= 100.0 msg = '' if not condition : prefix = ' \\n\\t - ' msg = 'Some metrics are missing:' msg += ' \\n Asserted Metrics: {}{} ' . format ( prefix , prefix . join ( sorted ( self . _asserted ))) msg += ' \\n Missing Metrics: {}{} ' . format ( prefix , prefix . join ( sorted ( self . not_asserted ()))) assert condition , msg assert_event ( self , msg_text , count = None , at_least = 1 , exact_match = True , tags = None , ** kwargs ) \u00b6 Source code in def assert_event ( self , msg_text , count = None , at_least = 1 , exact_match = True , tags = None , ** kwargs ): candidates = [] for e in self . events : if exact_match and msg_text != e [ 'msg_text' ] or msg_text not in e [ 'msg_text' ]: continue if tags and set ( tags ) != set ( e [ 'tags' ]): continue for name , value in iteritems ( kwargs ): if e [ name ] != value : break else : candidates . append ( e ) msg = \"Candidates size assertion for ` {} `, count: {} , at_least: {} ) failed\" . format ( msg_text , count , at_least ) if count is not None : assert len ( candidates ) == count , msg else : assert len ( candidates ) >= at_least , msg assert_metric ( self , name , value = None , tags = None , count = None , at_least = 1 , hostname = None , metric_type = None , device = None ) \u00b6 Assert a metric was processed by this stub Source code in def assert_metric ( self , name , value = None , tags = None , count = None , at_least = 1 , hostname = None , metric_type = None , device = None ): \"\"\" Assert a metric was processed by this stub \"\"\" self . _asserted . add ( name ) expected_tags = normalize_tags ( tags , sort = True ) candidates = [] for metric in self . metrics ( name ): if value is not None and not self . is_aggregate ( metric . type ) and value != metric . value : continue if expected_tags and expected_tags != sorted ( metric . tags ): continue if hostname is not None and hostname != metric . hostname : continue if metric_type is not None and metric_type != metric . type : continue if device is not None and device != metric . device : continue candidates . append ( metric ) expected_metric = MetricStub ( name , metric_type , value , tags , hostname , device ) if value is not None and candidates and all ( self . is_aggregate ( m . type ) for m in candidates ): got = sum ( m . value for m in candidates ) msg = \"Expected count value for ' {} ': {} , got {} \" . format ( name , value , got ) condition = value == got elif count is not None : msg = \"Needed exactly {} candidates for ' {} ', got {} \" . format ( count , name , len ( candidates )) condition = len ( candidates ) == count else : msg = \"Needed at least {} candidates for ' {} ', got {} \" . format ( at_least , name , len ( candidates )) condition = len ( candidates ) >= at_least self . _assert ( condition , msg = msg , expected_stub = expected_metric , submitted_elements = self . _metrics ) assert_metric_has_tag ( self , metric_name , tag , count = None , at_least = 1 ) \u00b6 Assert a metric is tagged with tag Source code in def assert_metric_has_tag ( self , metric_name , tag , count = None , at_least = 1 ): \"\"\" Assert a metric is tagged with tag \"\"\" self . _asserted . add ( metric_name ) candidates = [] for metric in self . metrics ( metric_name ): if tag in metric . tags : candidates . append ( metric ) msg = \"Candidates size assertion for ` {} `, count: {} , at_least: {} ) failed\" . format ( metric_name , count , at_least ) if count is not None : assert len ( candidates ) == count , msg else : assert len ( candidates ) >= at_least , msg assert_metric_has_tag_prefix ( self , metric_name , tag_prefix , count = None , at_least = 1 ) \u00b6 Source code in def assert_metric_has_tag_prefix ( self , metric_name , tag_prefix , count = None , at_least = 1 ): candidates = [] self . _asserted . add ( metric_name ) for metric in self . metrics ( metric_name ): tags = metric . tags gtags = [ t for t in tags if t . startswith ( tag_prefix )] if len ( gtags ) > 0 : candidates . append ( metric ) msg = \"Candidates size assertion for ` {} `, count: {} , at_least: {} ) failed\" . format ( metric_name , count , at_least ) if count is not None : assert len ( candidates ) == count , msg else : assert len ( candidates ) >= at_least , msg assert_no_duplicate_metrics ( self ) \u00b6 Assert no duplicate metrics have been submitted. Metrics are considered duplicate when all following fields match: metric name type (gauge, rate, etc) tags hostname Source code in def assert_no_duplicate_metrics ( self ): \"\"\" Assert no duplicate metrics have been submitted. Metrics are considered duplicate when all following fields match: - metric name - type (gauge, rate, etc) - tags - hostname \"\"\" # metric types that intended to be called multiple times are ignored ignored_types = [ self . COUNT , self . MONOTONIC_COUNT , self . COUNTER ] metric_stubs = [ m for metrics in self . _metrics . values () for m in metrics if m . type not in ignored_types ] def stub_to_key_fn ( stub ): return stub . name , stub . type , str ( sorted ( stub . tags )), stub . hostname self . _assert_no_duplicate_stub ( 'metric' , metric_stubs , stub_to_key_fn ) assert_no_duplicate_service_checks ( self ) \u00b6 Assert no duplicate service checks have been submitted. Service checks are considered duplicate when all following fields match: - metric name - status - tags - hostname Source code in def assert_no_duplicate_service_checks ( self ): \"\"\" Assert no duplicate service checks have been submitted. Service checks are considered duplicate when all following fields match: - metric name - status - tags - hostname \"\"\" service_check_stubs = [ m for metrics in self . _service_checks . values () for m in metrics ] def stub_to_key_fn ( stub ): return stub . name , stub . status , str ( sorted ( stub . tags )), stub . hostname self . _assert_no_duplicate_stub ( 'service_check' , service_check_stubs , stub_to_key_fn ) assert_service_check ( self , name , status = None , tags = None , count = None , at_least = 1 , hostname = None , message = None ) \u00b6 Assert a service check was processed by this stub Source code in def assert_service_check ( self , name , status = None , tags = None , count = None , at_least = 1 , hostname = None , message = None ): \"\"\" Assert a service check was processed by this stub \"\"\" tags = normalize_tags ( tags , sort = True ) candidates = [] for sc in self . service_checks ( name ): if status is not None and status != sc . status : continue if tags and tags != sorted ( sc . tags ): continue if hostname is not None and hostname != sc . hostname : continue if message is not None and message != sc . message : continue candidates . append ( sc ) expected_service_check = ServiceCheckStub ( None , name = name , status = status , tags = tags , hostname = hostname , message = message ) if count is not None : msg = \"Needed exactly {} candidates for ' {} ', got {} \" . format ( count , name , len ( candidates )) condition = len ( candidates ) == count else : msg = \"Needed at least {} candidates for ' {} ', got {} \" . format ( at_least , name , len ( candidates )) condition = len ( candidates ) >= at_least self . _assert ( condition = condition , msg = msg , expected_stub = expected_service_check , submitted_elements = self . _service_checks ) reset ( self ) \u00b6 Set the stub to its initial state Source code in def reset ( self ): \"\"\" Set the stub to its initial state \"\"\" self . _metrics = defaultdict ( list ) self . _asserted = set () self . _service_checks = defaultdict ( list ) self . _events = [] self . _event_platform_events = defaultdict ( list ) datadog_checks.base.stubs.datadog_agent.DatadogAgentStub \u00b6 This implements the methods defined by the Agent's C bindings which in turn call the Go backend . It also provides utility methods for test assertions. assert_metadata ( self , check_id , data ) \u00b6 Source code in def assert_metadata ( self , check_id , data ): actual = {} for name in data : key = ( check_id , name ) if key in self . _metadata : actual [ name ] = self . _metadata [ key ] assert data == actual assert_metadata_count ( self , count ) \u00b6 Source code in def assert_metadata_count ( self , count ): assert len ( self . _metadata ) == count reset ( self ) \u00b6 Source code in def reset ( self ): self . _metadata . clear () self . _cache . clear () self . _config = self . get_default_config ()","title":"API"},{"location":"base/api/#api","text":"","title":"API"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck","text":"The base class for any Agent based integration. In general, you don't need to and you should not override anything from the base class except the check method but sometimes it might be useful for a Check to have its own constructor. When overriding __init__ you have to remember that, depending on the configuration, the Agent might create several different Check instances and the method would be called as many times. Agent 6,7 signature: AgentCheck(name, init_config, instances) # instances contain only 1 instance AgentCheck.check(instance) Agent 8 signature: AgentCheck(name, init_config, instance) # one instance AgentCheck.check() # no more instance argument for check method Note when loading a Custom check, the Agent will inspect the module searching for a subclass of AgentCheck . If such a class exists but has been derived in turn, it'll be ignored - you should never derive from an existing Check .","title":"AgentCheck"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck.__init__","text":"name ( str ) - the name of the check init_config ( dict ) - the init_config section of the configuration. instance ( List[dict] ) - a one-element list containing the instance options from the configuration file (a list is used to keep backward compatibility with older versions of the Agent). Source code in def __init__ ( self , * args , ** kwargs ): # type: (*Any, **Any) -> None \"\"\" - **name** (_str_) - the name of the check - **init_config** (_dict_) - the `init_config` section of the configuration. - **instance** (_List[dict]_) - a one-element list containing the instance options from the configuration file (a list is used to keep backward compatibility with older versions of the Agent). \"\"\" # NOTE: these variable assignments exist to ease type checking when eventually assigned as attributes. name = kwargs . get ( 'name' , '' ) init_config = kwargs . get ( 'init_config' , {}) agentConfig = kwargs . get ( 'agentConfig' , {}) instances = kwargs . get ( 'instances' , []) if len ( args ) > 0 : name = args [ 0 ] if len ( args ) > 1 : init_config = args [ 1 ] if len ( args ) > 2 : # agent pass instances as tuple but in test we are usually using list, so we are testing for both if len ( args ) > 3 or not isinstance ( args [ 2 ], ( list , tuple )) or 'instances' in kwargs : # old-style init: the 3rd argument is `agentConfig` agentConfig = args [ 2 ] if len ( args ) > 3 : instances = args [ 3 ] else : # new-style init: the 3rd argument is `instances` instances = args [ 2 ] # NOTE: Agent 6+ should pass exactly one instance... But we are not abiding by that rule on our side # everywhere just yet. It's complicated... See: https://github.com/DataDog/integrations-core/pull/5573 instance = instances [ 0 ] if instances else None self . check_id = '' self . name = name # type: str self . init_config = init_config # type: InitConfigType self . agentConfig = agentConfig # type: AgentConfigType self . instance = instance # type: InstanceType self . instances = instances # type: List[InstanceType] self . warnings = [] # type: List[str] # `self.hostname` is deprecated, use `datadog_agent.get_hostname()` instead self . hostname = datadog_agent . get_hostname () # type: str logger = logging . getLogger ( ' {} . {} ' . format ( __name__ , self . name )) self . log = CheckLoggingAdapter ( logger , self ) # TODO: Remove with Agent 5 # Set proxy settings self . proxies = self . _get_requests_proxy () if not self . init_config : self . _use_agent_proxy = True else : self . _use_agent_proxy = is_affirmative ( self . init_config . get ( 'use_agent_proxy' , True )) # TODO: Remove with Agent 5 self . default_integration_http_timeout = float ( self . agentConfig . get ( 'default_integration_http_timeout' , 9 )) self . _deprecations = { 'increment' : ( False , ( 'DEPRECATION NOTICE: `AgentCheck.increment`/`AgentCheck.decrement` are deprecated, please ' 'use `AgentCheck.gauge` or `AgentCheck.count` instead, with a different metric name' ), ), 'device_name' : ( False , ( 'DEPRECATION NOTICE: `device_name` is deprecated, please use a `device:` ' 'tag in the `tags` list instead' ), ), 'in_developer_mode' : ( False , 'DEPRECATION NOTICE: `in_developer_mode` is deprecated, please stop using it.' , ), 'no_proxy' : ( False , ( 'DEPRECATION NOTICE: The `no_proxy` config option has been renamed ' 'to `skip_proxy` and will be removed in a future release.' ), ), 'service_tag' : ( False , ( 'DEPRECATION NOTICE: The `service` tag is deprecated and has been renamed to ` %s `. ' 'Set `disable_legacy_service_tag` to `true` to disable this warning. ' 'The default will become `true` and cannot be changed in Agent version 8.' ), ), } # type: Dict[str, Tuple[bool, str]] # Setup metric limits self . metric_limiter = self . _get_metric_limiter ( self . name , instance = self . instance ) # Lazily load and validate config self . _config_model_instance = None # type: Any self . _config_model_shared = None # type: Any # Functions that will be called exactly once (if successful) before the first check run self . check_initializations = deque ([ self . send_config_metadata ]) # type: Deque[Callable[[], None]] if not PY2 : self . check_initializations . append ( self . load_configuration_models )","title":"__init__()"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck.count","text":"Sample a raw count metric. name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def count ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ): # type: (str, float, Sequence[str], str, str, bool) -> None \"\"\"Sample a raw count metric. - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" self . _submit_metric ( aggregator . COUNT , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw )","title":"count()"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck.event","text":"Send an event. An event is a dictionary with the following keys and data types: { \"timestamp\" : int , # the epoch timestamp for the event \"event_type\" : str , # the event name \"api_key\" : str , # the api key for your account \"msg_title\" : str , # the title of the event \"msg_text\" : str , # the text body of the event \"aggregation_key\" : str , # a key to use for aggregating events \"alert_type\" : str , # (optional) one of ('error', 'warning', 'success', 'info'), defaults to 'info' \"source_type_name\" : str , # (optional) the source type name \"host\" : str , # (optional) the name of the host \"tags\" : list , # (optional) a list of tags to associate with this event \"priority\" : str , # (optional) specifies the priority of the event (\"normal\" or \"low\") } event ( dict ) - the event to be sent Source code in def event ( self , event ): # type: (Event) -> None \"\"\"Send an event. An event is a dictionary with the following keys and data types: ```python { \"timestamp\": int, # the epoch timestamp for the event \"event_type\": str, # the event name \"api_key\": str, # the api key for your account \"msg_title\": str, # the title of the event \"msg_text\": str, # the text body of the event \"aggregation_key\": str, # a key to use for aggregating events \"alert_type\": str, # (optional) one of ('error', 'warning', 'success', 'info'), defaults to 'info' \"source_type_name\": str, # (optional) the source type name \"host\": str, # (optional) the name of the host \"tags\": list, # (optional) a list of tags to associate with this event \"priority\": str, # (optional) specifies the priority of the event (\"normal\" or \"low\") } ``` - **event** (_dict_) - the event to be sent \"\"\" # Enforce types of some fields, considerably facilitates handling in go bindings downstream for key , value in iteritems ( event ): if not isinstance ( value , ( text_type , binary_type )): continue try : event [ key ] = to_native_string ( value ) # type: ignore # ^ Mypy complains about dynamic key assignment -- arguably for good reason. # Ideally we should convert this to a dict literal so that submitted events only include known keys. except UnicodeError : self . log . warning ( 'Encoding error with field ` %s `, cannot submit event' , key ) return if event . get ( 'tags' ): event [ 'tags' ] = self . _normalize_tags_type ( event [ 'tags' ]) if event . get ( 'timestamp' ): event [ 'timestamp' ] = int ( event [ 'timestamp' ]) if event . get ( 'aggregation_key' ): event [ 'aggregation_key' ] = to_native_string ( event [ 'aggregation_key' ]) if self . __NAMESPACE__ : event . setdefault ( 'source_type_name' , self . __NAMESPACE__ ) aggregator . submit_event ( self , self . check_id , event )","title":"event()"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck.gauge","text":"Sample a gauge metric. Parameters: name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def gauge ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ): # type: (str, float, Sequence[str], str, str, bool) -> None \"\"\"Sample a gauge metric. **Parameters:** - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" self . _submit_metric ( aggregator . GAUGE , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw )","title":"gauge()"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck.histogram","text":"Sample a histogram metric. name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def histogram ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ): # type: (str, float, Sequence[str], str, str, bool) -> None \"\"\"Sample a histogram metric. - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" self . _submit_metric ( aggregator . HISTOGRAM , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw )","title":"histogram()"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck.historate","text":"Sample a histogram based on rate metrics. name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def historate ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ): # type: (str, float, Sequence[str], str, str, bool) -> None \"\"\"Sample a histogram based on rate metrics. - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" self . _submit_metric ( aggregator . HISTORATE , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw )","title":"historate()"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck.metadata_entrypoint","text":"Skip execution of the decorated method if metadata collection is disabled on the Agent. Usage: class MyCheck ( AgentCheck ): @AgentCheck . metadata_entrypoint def collect_metadata ( self ): ... Source code in @classmethod def metadata_entrypoint ( cls , method ): # type: (Callable[..., None]) -> Callable[..., None] \"\"\" Skip execution of the decorated method if metadata collection is disabled on the Agent. Usage: ```python class MyCheck(AgentCheck): @AgentCheck.metadata_entrypoint def collect_metadata(self): ... ``` \"\"\" @functools . wraps ( method ) def entrypoint ( self , * args , ** kwargs ): # type: (AgentCheck, *Any, **Any) -> None if not self . is_metadata_collection_enabled (): return # NOTE: error handling still at the discretion of the wrapped method. method ( self , * args , ** kwargs ) return entrypoint","title":"metadata_entrypoint()"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck.monotonic_count","text":"Sample an increasing counter metric. name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix flush_first_value ( bool ) - whether to sample the first value Source code in def monotonic_count ( self , name , value , tags = None , hostname = None , device_name = None , raw = False , flush_first_value = False ): # type: (str, float, Sequence[str], str, str, bool, bool) -> None \"\"\"Sample an increasing counter metric. - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix - **flush_first_value** (_bool_) - whether to sample the first value \"\"\" self . _submit_metric ( aggregator . MONOTONIC_COUNT , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw , flush_first_value = flush_first_value , )","title":"monotonic_count()"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck.rate","text":"Sample a point, with the rate calculated at the end of the check. name ( str ) - the name of the metric value ( float ) - the value for the metric tags ( List[str] ) - a list of tags to associate with this metric hostname ( str ) - a hostname to associate with this metric. Defaults to the current host. device_name ( str ) - deprecated add a tag in the form device: to the tags list instead. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def rate ( self , name , value , tags = None , hostname = None , device_name = None , raw = False ): # type: (str, float, Sequence[str], str, str, bool) -> None \"\"\"Sample a point, with the rate calculated at the end of the check. - **name** (_str_) - the name of the metric - **value** (_float_) - the value for the metric - **tags** (_List[str]_) - a list of tags to associate with this metric - **hostname** (_str_) - a hostname to associate with this metric. Defaults to the current host. - **device_name** (_str_) - **deprecated** add a tag in the form `device:` to the `tags` list instead. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" self . _submit_metric ( aggregator . RATE , name , value , tags = tags , hostname = hostname , device_name = device_name , raw = raw )","title":"rate()"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck.service_check","text":"Send the status of a service. name ( str ) - the name of the service check status ( int ) - a constant describing the service status. tags ( List[str] ) - a list of tags to associate with this service check message ( str ) - additional information or a description of why this status occurred. raw ( bool ) - whether to ignore any defined namespace prefix Source code in def service_check ( self , name , status , tags = None , hostname = None , message = None , raw = False ): # type: (str, ServiceCheckStatus, Sequence[str], str, str, bool) -> None \"\"\"Send the status of a service. - **name** (_str_) - the name of the service check - **status** (_int_) - a constant describing the service status. - **tags** (_List[str]_) - a list of tags to associate with this service check - **message** (_str_) - additional information or a description of why this status occurred. - **raw** (_bool_) - whether to ignore any defined namespace prefix \"\"\" tags = self . _normalize_tags_type ( tags or []) if hostname is None : hostname = '' if message is None : message = '' else : message = to_native_string ( message ) message = self . sanitize ( message ) aggregator . submit_service_check ( self , self . check_id , self . _format_namespace ( name , raw ), status , tags , hostname , message )","title":"service_check()"},{"location":"base/api/#datadog_checks.base.checks.base.AgentCheck.set_metadata","text":"Updates the cached metadata name with value , which is then sent by the Agent at regular intervals. :param str name: the name of the metadata :param object value: the value for the metadata. if name has no transformer defined then the raw value will be submitted and therefore it must be a str :param options: keyword arguments to pass to any defined transformer Source code in def set_metadata ( self , name , value , ** options ): # type: (str, Any, **Any) -> None \"\"\"Updates the cached metadata ``name`` with ``value``, which is then sent by the Agent at regular intervals. :param str name: the name of the metadata :param object value: the value for the metadata. if ``name`` has no transformer defined then the raw ``value`` will be submitted and therefore it must be a ``str`` :param options: keyword arguments to pass to any defined transformer \"\"\" self . metadata_manager . submit ( name , value , options )","title":"set_metadata()"},{"location":"base/api/#stubs","text":"","title":"Stubs"},{"location":"base/api/#datadog_checks.base.stubs.aggregator.AggregatorStub","text":"This implements the methods defined by the Agent's C bindings which in turn call the Go backend . It also provides utility methods for test assertions.","title":"AggregatorStub"},{"location":"base/api/#datadog_checks.base.stubs.aggregator.AggregatorStub.assert_all_metrics_covered","text":"Source code in def assert_all_metrics_covered ( self ): # use `condition` to avoid building the `msg` if not needed condition = self . metrics_asserted_pct >= 100.0 msg = '' if not condition : prefix = ' \\n\\t - ' msg = 'Some metrics are missing:' msg += ' \\n Asserted Metrics: {}{} ' . format ( prefix , prefix . join ( sorted ( self . _asserted ))) msg += ' \\n Missing Metrics: {}{} ' . format ( prefix , prefix . join ( sorted ( self . not_asserted ()))) assert condition , msg","title":"assert_all_metrics_covered()"},{"location":"base/api/#datadog_checks.base.stubs.aggregator.AggregatorStub.assert_event","text":"Source code in def assert_event ( self , msg_text , count = None , at_least = 1 , exact_match = True , tags = None , ** kwargs ): candidates = [] for e in self . events : if exact_match and msg_text != e [ 'msg_text' ] or msg_text not in e [ 'msg_text' ]: continue if tags and set ( tags ) != set ( e [ 'tags' ]): continue for name , value in iteritems ( kwargs ): if e [ name ] != value : break else : candidates . append ( e ) msg = \"Candidates size assertion for ` {} `, count: {} , at_least: {} ) failed\" . format ( msg_text , count , at_least ) if count is not None : assert len ( candidates ) == count , msg else : assert len ( candidates ) >= at_least , msg","title":"assert_event()"},{"location":"base/api/#datadog_checks.base.stubs.aggregator.AggregatorStub.assert_metric","text":"Assert a metric was processed by this stub Source code in def assert_metric ( self , name , value = None , tags = None , count = None , at_least = 1 , hostname = None , metric_type = None , device = None ): \"\"\" Assert a metric was processed by this stub \"\"\" self . _asserted . add ( name ) expected_tags = normalize_tags ( tags , sort = True ) candidates = [] for metric in self . metrics ( name ): if value is not None and not self . is_aggregate ( metric . type ) and value != metric . value : continue if expected_tags and expected_tags != sorted ( metric . tags ): continue if hostname is not None and hostname != metric . hostname : continue if metric_type is not None and metric_type != metric . type : continue if device is not None and device != metric . device : continue candidates . append ( metric ) expected_metric = MetricStub ( name , metric_type , value , tags , hostname , device ) if value is not None and candidates and all ( self . is_aggregate ( m . type ) for m in candidates ): got = sum ( m . value for m in candidates ) msg = \"Expected count value for ' {} ': {} , got {} \" . format ( name , value , got ) condition = value == got elif count is not None : msg = \"Needed exactly {} candidates for ' {} ', got {} \" . format ( count , name , len ( candidates )) condition = len ( candidates ) == count else : msg = \"Needed at least {} candidates for ' {} ', got {} \" . format ( at_least , name , len ( candidates )) condition = len ( candidates ) >= at_least self . _assert ( condition , msg = msg , expected_stub = expected_metric , submitted_elements = self . _metrics )","title":"assert_metric()"},{"location":"base/api/#datadog_checks.base.stubs.aggregator.AggregatorStub.assert_metric_has_tag","text":"Assert a metric is tagged with tag Source code in def assert_metric_has_tag ( self , metric_name , tag , count = None , at_least = 1 ): \"\"\" Assert a metric is tagged with tag \"\"\" self . _asserted . add ( metric_name ) candidates = [] for metric in self . metrics ( metric_name ): if tag in metric . tags : candidates . append ( metric ) msg = \"Candidates size assertion for ` {} `, count: {} , at_least: {} ) failed\" . format ( metric_name , count , at_least ) if count is not None : assert len ( candidates ) == count , msg else : assert len ( candidates ) >= at_least , msg","title":"assert_metric_has_tag()"},{"location":"base/api/#datadog_checks.base.stubs.aggregator.AggregatorStub.assert_metric_has_tag_prefix","text":"Source code in def assert_metric_has_tag_prefix ( self , metric_name , tag_prefix , count = None , at_least = 1 ): candidates = [] self . _asserted . add ( metric_name ) for metric in self . metrics ( metric_name ): tags = metric . tags gtags = [ t for t in tags if t . startswith ( tag_prefix )] if len ( gtags ) > 0 : candidates . append ( metric ) msg = \"Candidates size assertion for ` {} `, count: {} , at_least: {} ) failed\" . format ( metric_name , count , at_least ) if count is not None : assert len ( candidates ) == count , msg else : assert len ( candidates ) >= at_least , msg","title":"assert_metric_has_tag_prefix()"},{"location":"base/api/#datadog_checks.base.stubs.aggregator.AggregatorStub.assert_no_duplicate_metrics","text":"Assert no duplicate metrics have been submitted. Metrics are considered duplicate when all following fields match: metric name type (gauge, rate, etc) tags hostname Source code in def assert_no_duplicate_metrics ( self ): \"\"\" Assert no duplicate metrics have been submitted. Metrics are considered duplicate when all following fields match: - metric name - type (gauge, rate, etc) - tags - hostname \"\"\" # metric types that intended to be called multiple times are ignored ignored_types = [ self . COUNT , self . MONOTONIC_COUNT , self . COUNTER ] metric_stubs = [ m for metrics in self . _metrics . values () for m in metrics if m . type not in ignored_types ] def stub_to_key_fn ( stub ): return stub . name , stub . type , str ( sorted ( stub . tags )), stub . hostname self . _assert_no_duplicate_stub ( 'metric' , metric_stubs , stub_to_key_fn )","title":"assert_no_duplicate_metrics()"},{"location":"base/api/#datadog_checks.base.stubs.aggregator.AggregatorStub.assert_no_duplicate_service_checks","text":"Assert no duplicate service checks have been submitted. Service checks are considered duplicate when all following fields match: - metric name - status - tags - hostname Source code in def assert_no_duplicate_service_checks ( self ): \"\"\" Assert no duplicate service checks have been submitted. Service checks are considered duplicate when all following fields match: - metric name - status - tags - hostname \"\"\" service_check_stubs = [ m for metrics in self . _service_checks . values () for m in metrics ] def stub_to_key_fn ( stub ): return stub . name , stub . status , str ( sorted ( stub . tags )), stub . hostname self . _assert_no_duplicate_stub ( 'service_check' , service_check_stubs , stub_to_key_fn )","title":"assert_no_duplicate_service_checks()"},{"location":"base/api/#datadog_checks.base.stubs.aggregator.AggregatorStub.assert_service_check","text":"Assert a service check was processed by this stub Source code in def assert_service_check ( self , name , status = None , tags = None , count = None , at_least = 1 , hostname = None , message = None ): \"\"\" Assert a service check was processed by this stub \"\"\" tags = normalize_tags ( tags , sort = True ) candidates = [] for sc in self . service_checks ( name ): if status is not None and status != sc . status : continue if tags and tags != sorted ( sc . tags ): continue if hostname is not None and hostname != sc . hostname : continue if message is not None and message != sc . message : continue candidates . append ( sc ) expected_service_check = ServiceCheckStub ( None , name = name , status = status , tags = tags , hostname = hostname , message = message ) if count is not None : msg = \"Needed exactly {} candidates for ' {} ', got {} \" . format ( count , name , len ( candidates )) condition = len ( candidates ) == count else : msg = \"Needed at least {} candidates for ' {} ', got {} \" . format ( at_least , name , len ( candidates )) condition = len ( candidates ) >= at_least self . _assert ( condition = condition , msg = msg , expected_stub = expected_service_check , submitted_elements = self . _service_checks )","title":"assert_service_check()"},{"location":"base/api/#datadog_checks.base.stubs.aggregator.AggregatorStub.reset","text":"Set the stub to its initial state Source code in def reset ( self ): \"\"\" Set the stub to its initial state \"\"\" self . _metrics = defaultdict ( list ) self . _asserted = set () self . _service_checks = defaultdict ( list ) self . _events = [] self . _event_platform_events = defaultdict ( list )","title":"reset()"},{"location":"base/api/#datadog_checks.base.stubs.datadog_agent.DatadogAgentStub","text":"This implements the methods defined by the Agent's C bindings which in turn call the Go backend . It also provides utility methods for test assertions.","title":"DatadogAgentStub"},{"location":"base/api/#datadog_checks.base.stubs.datadog_agent.DatadogAgentStub.assert_metadata","text":"Source code in def assert_metadata ( self , check_id , data ): actual = {} for name in data : key = ( check_id , name ) if key in self . _metadata : actual [ name ] = self . _metadata [ key ] assert data == actual","title":"assert_metadata()"},{"location":"base/api/#datadog_checks.base.stubs.datadog_agent.DatadogAgentStub.assert_metadata_count","text":"Source code in def assert_metadata_count ( self , count ): assert len ( self . _metadata ) == count","title":"assert_metadata_count()"},{"location":"base/api/#datadog_checks.base.stubs.datadog_agent.DatadogAgentStub.reset","text":"Source code in def reset ( self ): self . _metadata . clear () self . _cache . clear () self . _config = self . get_default_config ()","title":"reset()"},{"location":"base/basics/","text":"Basics \u00b6 The AgentCheck base class contains the logic that all Checks inherit. In addition to the integrations inheriting from AgentCheck, other classes that inherit from AgentCheck include: PDHBaseCheck OpenMetricsBaseCheck KubeLeaderElectionBaseCheck Getting Started \u00b6 The Datadog Agent looks for __version__ and a subclass of AgentCheck at the root of every Check package. Below is an example of the __init__.py file for a hypothetical Awesome Check: from .__about__ import __version__ from .check import AwesomeCheck __all__ = [ '__version__' , 'AwesomeCheck' ] The version is used in the Agent's status output (if no __version__ is found, it will default to 0.0.0 ): ========= Collector ========= Running Checks ============== AwesomeCheck (0.0.1) ------------------- Instance ID: 1234 [OK] Configuration Source: file:/etc/datadog-agent/conf.d/awesomecheck.d/awesomecheck.yaml Total Runs: 12 Metric Samples: Last Run: 242, Total: 2,904 Events: Last Run: 0, Total: 0 Service Checks: Last Run: 0, Total: 0 Average Execution Time : 49ms Last Execution Date : 2020-10-26 19:09:22.000000 UTC Last Successful Execution Date : 2020-10-26 19:09:22.000000 UTC ... Checks \u00b6 AgentCheck contains functions that you use to execute Checks and submit data to Datadog. Metrics \u00b6 This list enumerates what is collected from your system by each integration. For more information on metrics, see the Metric Types documentation. You can find the metrics for each integration in that integration's metadata.csv file. You can also set up custom metrics , so if the integration doesn\u2019t offer a metric out of the box, you can usually add it. Gauge \u00b6 The gauge metric submission type represents a snapshot of events in one time interval. This representative snapshot value is the last value submitted to the Agent during a time interval. A gauge can be used to take a measure of something reporting continuously\u2014like the available disk space or memory used. For more information, see the API documentation Count \u00b6 The count metric submission type represents the total number of event occurrences in one time interval. A count can be used to track the total number of connections made to a database or the total number of requests to an endpoint. This number of events can increase or decrease over time\u2014it is not monotonically increasing. For more information, see the API documentation . Monotonic Count \u00b6 Similar to Count, Monotonic Count represents the total number of event occurrences in one time interval. However, this value can ONLY increment. For more information, see the API documentation . Rate \u00b6 The rate metric submission type represents the total number of event occurrences per second in one time interval. A rate can be used to track how often something is happening\u2014like the frequency of connections made to a database or the flow of requests made to an endpoint. For more information, see the API documentation . Histogram \u00b6 The histogram metric submission type represents the statistical distribution of a set of values calculated Agent-side in one time interval. Datadog\u2019s histogram metric type is an extension of the StatsD timing metric type: the Agent aggregates the values that are sent in a defined time interval and produces different metrics which represent the set of values. For more information, see the API documentation . Historate \u00b6 Similar to the histogram metric, the historate represents statistical distribution over one time interval, although this is based on rate metrics. For more information, see the API documentation . Service Checks \u00b6 Service checks are a type of monitor used to track the uptime status of the service. For more information, see the Service checks guide. For more information, see the API documentation . Events \u00b6 Events are informational messages about your system that are consumed by the events stream so that you can build monitors on them. For more information, see the API documentation . Namespacing \u00b6 Within every integration, you can specify the value of __NAMESPACE__ : from datadog_checks.base import AgentCheck class AwesomeCheck ( AgentCheck ): __NAMESPACE__ = 'awesome' ... This is an optional addition, but it makes submissions easier since it prefixes every metric with the __NAMESPACE__ automatically. In this case it would append awesome. to each metric submitted to Datadog. If you wish to ignore the namespace for any reason, you can append an optional Boolean raw=True to each submission: self . gauge ( 'test' , 1.23 , tags = [ 'foo:bar' ], raw = True ) ... You submitted a gauge metric named test with a value of 1.23 tagged by foo:bar ignoring the namespace. Check Initializations \u00b6 In the AgentCheck class, there is a useful property called check_initializations , which you can use to execute functions that are called once before the first check run. You can fill up check_initializations with instructions in the __init__ function of an integration. For example, you could use it to parse configuration information before running a check. Listed below is an example with Airflow: class AirflowCheck ( AgentCheck ): def __init__ ( self , name , init_config , instances ): super ( AirflowCheck , self ) . __init__ ( name , init_config , instances ) self . _url = self . instance . get ( 'url' , '' ) self . _tags = self . instance . get ( 'tags' , []) # The Agent only makes one attempt to instantiate each AgentCheck so any errors occurring # in `__init__` are logged just once, making it difficult to spot. Therefore, # potential configuration errors are emitted as part of the check run phase. # The configuration is only parsed once if it succeed, otherwise it's retried. self . check_initializations . append ( self . _parse_config ) ...","title":"Basics"},{"location":"base/basics/#basics","text":"The AgentCheck base class contains the logic that all Checks inherit. In addition to the integrations inheriting from AgentCheck, other classes that inherit from AgentCheck include: PDHBaseCheck OpenMetricsBaseCheck KubeLeaderElectionBaseCheck","title":"Basics"},{"location":"base/basics/#getting-started","text":"The Datadog Agent looks for __version__ and a subclass of AgentCheck at the root of every Check package. Below is an example of the __init__.py file for a hypothetical Awesome Check: from .__about__ import __version__ from .check import AwesomeCheck __all__ = [ '__version__' , 'AwesomeCheck' ] The version is used in the Agent's status output (if no __version__ is found, it will default to 0.0.0 ): ========= Collector ========= Running Checks ============== AwesomeCheck (0.0.1) ------------------- Instance ID: 1234 [OK] Configuration Source: file:/etc/datadog-agent/conf.d/awesomecheck.d/awesomecheck.yaml Total Runs: 12 Metric Samples: Last Run: 242, Total: 2,904 Events: Last Run: 0, Total: 0 Service Checks: Last Run: 0, Total: 0 Average Execution Time : 49ms Last Execution Date : 2020-10-26 19:09:22.000000 UTC Last Successful Execution Date : 2020-10-26 19:09:22.000000 UTC ...","title":"Getting Started"},{"location":"base/basics/#checks","text":"AgentCheck contains functions that you use to execute Checks and submit data to Datadog.","title":"Checks"},{"location":"base/basics/#metrics","text":"This list enumerates what is collected from your system by each integration. For more information on metrics, see the Metric Types documentation. You can find the metrics for each integration in that integration's metadata.csv file. You can also set up custom metrics , so if the integration doesn\u2019t offer a metric out of the box, you can usually add it.","title":"Metrics"},{"location":"base/basics/#gauge","text":"The gauge metric submission type represents a snapshot of events in one time interval. This representative snapshot value is the last value submitted to the Agent during a time interval. A gauge can be used to take a measure of something reporting continuously\u2014like the available disk space or memory used. For more information, see the API documentation","title":"Gauge"},{"location":"base/basics/#count","text":"The count metric submission type represents the total number of event occurrences in one time interval. A count can be used to track the total number of connections made to a database or the total number of requests to an endpoint. This number of events can increase or decrease over time\u2014it is not monotonically increasing. For more information, see the API documentation .","title":"Count"},{"location":"base/basics/#monotonic-count","text":"Similar to Count, Monotonic Count represents the total number of event occurrences in one time interval. However, this value can ONLY increment. For more information, see the API documentation .","title":"Monotonic Count"},{"location":"base/basics/#rate","text":"The rate metric submission type represents the total number of event occurrences per second in one time interval. A rate can be used to track how often something is happening\u2014like the frequency of connections made to a database or the flow of requests made to an endpoint. For more information, see the API documentation .","title":"Rate"},{"location":"base/basics/#histogram","text":"The histogram metric submission type represents the statistical distribution of a set of values calculated Agent-side in one time interval. Datadog\u2019s histogram metric type is an extension of the StatsD timing metric type: the Agent aggregates the values that are sent in a defined time interval and produces different metrics which represent the set of values. For more information, see the API documentation .","title":"Histogram"},{"location":"base/basics/#historate","text":"Similar to the histogram metric, the historate represents statistical distribution over one time interval, although this is based on rate metrics. For more information, see the API documentation .","title":"Historate"},{"location":"base/basics/#service-checks","text":"Service checks are a type of monitor used to track the uptime status of the service. For more information, see the Service checks guide. For more information, see the API documentation .","title":"Service Checks"},{"location":"base/basics/#events","text":"Events are informational messages about your system that are consumed by the events stream so that you can build monitors on them. For more information, see the API documentation .","title":"Events"},{"location":"base/basics/#namespacing","text":"Within every integration, you can specify the value of __NAMESPACE__ : from datadog_checks.base import AgentCheck class AwesomeCheck ( AgentCheck ): __NAMESPACE__ = 'awesome' ... This is an optional addition, but it makes submissions easier since it prefixes every metric with the __NAMESPACE__ automatically. In this case it would append awesome. to each metric submitted to Datadog. If you wish to ignore the namespace for any reason, you can append an optional Boolean raw=True to each submission: self . gauge ( 'test' , 1.23 , tags = [ 'foo:bar' ], raw = True ) ... You submitted a gauge metric named test with a value of 1.23 tagged by foo:bar ignoring the namespace.","title":"Namespacing"},{"location":"base/basics/#check-initializations","text":"In the AgentCheck class, there is a useful property called check_initializations , which you can use to execute functions that are called once before the first check run. You can fill up check_initializations with instructions in the __init__ function of an integration. For example, you could use it to parse configuration information before running a check. Listed below is an example with Airflow: class AirflowCheck ( AgentCheck ): def __init__ ( self , name , init_config , instances ): super ( AirflowCheck , self ) . __init__ ( name , init_config , instances ) self . _url = self . instance . get ( 'url' , '' ) self . _tags = self . instance . get ( 'tags' , []) # The Agent only makes one attempt to instantiate each AgentCheck so any errors occurring # in `__init__` are logged just once, making it difficult to spot. Therefore, # potential configuration errors are emitted as part of the check run phase. # The configuration is only parsed once if it succeed, otherwise it's retried. self . check_initializations . append ( self . _parse_config ) ...","title":"Check Initializations"},{"location":"base/databases/","text":"Databases \u00b6 No matter the database you wish to monitor, the base package provides a standard way to define and collect data from arbitrary queries. The core premise is that you define a function that accepts a query (usually a str ) and it returns a sequence of equal length results. Interface \u00b6 All the functionality is exposed by the Query and QueryManager classes. datadog_checks.base.utils.db.query.Query \u00b6 This class accepts a single dict argument which is necessary to run the query. The representation is based on our custom_queries format originally designed and implemented in !1528 . It is now part of all our database integrations and other products have since adopted this format. __init__ ( self , query_data ) special \u00b6 Source code in def __init__ ( self , query_data ): # type: (Dict[str, Any]) -> Query self . query_data = deepcopy ( query_data or {}) # type: Dict[str, Any] self . name = None # type: str self . query = None # type: str self . columns = None # type: List[str] self . extras = None # type: List[Dict[str, str]] self . tags = None # type: List[str] compile ( self , column_transformers , extra_transformers ) \u00b6 This idempotent method will be called by QueryManager.compile_queries so you should never need to call it directly. Source code in def compile ( self , column_transformers , # type: Dict[str, Callable[[Dict[str, Callable], str, Any], Any]] extra_transformers , # type: Dict[str, Callable[[Dict[str, Callable], str, Any], Any]] ): # type: (...) -> None \"\"\" This idempotent method will be called by `QueryManager.compile_queries` so you should never need to call it directly. \"\"\" # Check for previous compilation if self . name is not None : return query_name = self . query_data . get ( 'name' ) if not query_name : raise ValueError ( 'query field `name` is required' ) elif not isinstance ( query_name , str ): raise ValueError ( 'query field `name` must be a string' ) query = self . query_data . get ( 'query' ) if not query : raise ValueError ( 'field `query` for {} is required' . format ( query_name )) elif not isinstance ( query , str ): raise ValueError ( 'field `query` for {} must be a string' . format ( query_name )) columns = self . query_data . get ( 'columns' ) if not columns : raise ValueError ( 'field `columns` for {} is required' . format ( query_name )) elif not isinstance ( columns , list ): raise ValueError ( 'field `columns` for {} must be a list' . format ( query_name )) tags = self . query_data . get ( 'tags' , []) if tags is not None and not isinstance ( tags , list ): raise ValueError ( 'field `tags` for {} must be a list' . format ( query_name )) # Keep track of all defined names sources = {} column_data = [] for i , column in enumerate ( columns , 1 ): # Columns can be ignored via configuration. if not column : column_data . append (( None , None )) continue elif not isinstance ( column , dict ): raise ValueError ( 'column # {} of {} is not a mapping' . format ( i , query_name )) column_name = column . get ( 'name' ) if not column_name : raise ValueError ( 'field `name` for column # {} of {} is required' . format ( i , query_name )) elif not isinstance ( column_name , str ): raise ValueError ( 'field `name` for column # {} of {} must be a string' . format ( i , query_name )) elif column_name in sources : raise ValueError ( 'the name {} of {} was already defined in {} # {} ' . format ( column_name , query_name , sources [ column_name ][ 'type' ], sources [ column_name ][ 'index' ] ) ) sources [ column_name ] = { 'type' : 'column' , 'index' : i } column_type = column . get ( 'type' ) if not column_type : raise ValueError ( 'field `type` for column {} of {} is required' . format ( column_name , query_name )) elif not isinstance ( column_type , str ): raise ValueError ( 'field `type` for column {} of {} must be a string' . format ( column_name , query_name )) elif column_type == 'source' : column_data . append (( column_name , ( None , None ))) continue elif column_type not in column_transformers : raise ValueError ( 'unknown type ` {} ` for column {} of {} ' . format ( column_type , column_name , query_name )) modifiers = { key : value for key , value in column . items () if key not in ( 'name' , 'type' )} try : transformer = column_transformers [ column_type ]( column_transformers , column_name , ** modifiers ) except Exception as e : error = 'error compiling type ` {} ` for column {} of {} : {} ' . format ( column_type , column_name , query_name , e ) # Prepend helpful error text. # # When an exception is raised in the context of another one, both will be printed. To avoid # this we set the context to None. https://www.python.org/dev/peps/pep-0409/ raise_from ( type ( e )( error ), None ) else : if column_type in ( 'tag' , 'tag_list' ): column_data . append (( column_name , ( column_type , transformer ))) else : # All these would actually submit data. As that is the default case, we represent it as # a reference to None since if we use e.g. `value` it would never be checked anyway. column_data . append (( column_name , ( None , transformer ))) submission_transformers = column_transformers . copy () submission_transformers . pop ( 'tag' ) submission_transformers . pop ( 'tag_list' ) extras = self . query_data . get ( 'extras' , []) if not isinstance ( extras , list ): raise ValueError ( 'field `extras` for {} must be a list' . format ( query_name )) extra_data = [] for i , extra in enumerate ( extras , 1 ): if not isinstance ( extra , dict ): raise ValueError ( 'extra # {} of {} is not a mapping' . format ( i , query_name )) extra_name = extra . get ( 'name' ) if not extra_name : raise ValueError ( 'field `name` for extra # {} of {} is required' . format ( i , query_name )) elif not isinstance ( extra_name , str ): raise ValueError ( 'field `name` for extra # {} of {} must be a string' . format ( i , query_name )) elif extra_name in sources : raise ValueError ( 'the name {} of {} was already defined in {} # {} ' . format ( extra_name , query_name , sources [ extra_name ][ 'type' ], sources [ extra_name ][ 'index' ] ) ) sources [ extra_name ] = { 'type' : 'extra' , 'index' : i } extra_type = extra . get ( 'type' ) if not extra_type : if 'expression' in extra : extra_type = 'expression' else : raise ValueError ( 'field `type` for extra {} of {} is required' . format ( extra_name , query_name )) elif not isinstance ( extra_type , str ): raise ValueError ( 'field `type` for extra {} of {} must be a string' . format ( extra_name , query_name )) elif extra_type not in extra_transformers and extra_type not in submission_transformers : raise ValueError ( 'unknown type ` {} ` for extra {} of {} ' . format ( extra_type , extra_name , query_name )) transformer_factory = extra_transformers . get ( extra_type , submission_transformers . get ( extra_type )) extra_source = extra . get ( 'source' ) if extra_type in submission_transformers : if not extra_source : raise ValueError ( 'field `source` for extra {} of {} is required' . format ( extra_name , query_name )) modifiers = { key : value for key , value in extra . items () if key not in ( 'name' , 'type' , 'source' )} else : modifiers = { key : value for key , value in extra . items () if key not in ( 'name' , 'type' )} modifiers [ 'sources' ] = sources try : transformer = transformer_factory ( submission_transformers , extra_name , ** modifiers ) except Exception as e : error = 'error compiling type ` {} ` for extra {} of {} : {} ' . format ( extra_type , extra_name , query_name , e ) raise_from ( type ( e )( error ), None ) else : if extra_type in submission_transformers : transformer = create_extra_transformer ( transformer , extra_source ) extra_data . append (( extra_name , transformer )) self . name = query_name self . query = query self . columns = tuple ( column_data ) self . extras = tuple ( extra_data ) self . tags = tags del self . query_data datadog_checks.base.utils.db.core.QueryManager \u00b6 This class is in charge of running any number of Query instances for a single Check instance. You will most often see it created during Check initialization like this: self . _query_manager = QueryManager ( self , self . execute_query , queries = [ queries . SomeQuery1 , queries . SomeQuery2 , queries . SomeQuery3 , queries . SomeQuery4 , queries . SomeQuery5 , ], tags = self . instance . get ( 'tags' , []), error_handler = self . _error_sanitizer , ) self . check_initializations . append ( self . _query_manager . compile_queries ) __init__ ( self , check , executor , queries = None , tags = None , error_handler = None , hostname = None ) special \u00b6 check ( AgentCheck ) - an instance of a Check executor ( callable ) - a callable accepting a str query as its sole argument and returning a sequence representing either the full result set or an iterator over the result set queries ( List[Query] ) - a list of Query instances tags ( List[str] ) - a list of tags to associate with every submission error_handler ( callable ) - a callable accepting a str error as its sole argument and returning a sanitized string, useful for scrubbing potentially sensitive information libraries emit Source code in def __init__ ( self , check , # type: AgentCheck executor , # type: Callable[[str], Union[Sequence, Iterable]] queries = None , # type: List[str] tags = None , # type: List[str] error_handler = None , # type: Callable[[str], str] hostname = None , # type: str ): # type: (...) -> QueryManager \"\"\" - **check** (_AgentCheck_) - an instance of a Check - **executor** (_callable_) - a callable accepting a `str` query as its sole argument and returning a sequence representing either the full result set or an iterator over the result set - **queries** (_List[Query]_) - a list of `Query` instances - **tags** (_List[str]_) - a list of tags to associate with every submission - **error_handler** (_callable_) - a callable accepting a `str` error as its sole argument and returning a sanitized string, useful for scrubbing potentially sensitive information libraries emit \"\"\" self . check = check # type: AgentCheck self . executor = executor # type: Callable[[str], Union[Sequence, Iterable]] self . tags = tags or [] self . error_handler = error_handler self . queries = [ Query ( payload ) for payload in queries or []] # type: List[Query] self . hostname = hostname # type: str custom_queries = list ( self . check . instance . get ( 'custom_queries' , [])) # type: List[str] use_global_custom_queries = self . check . instance . get ( 'use_global_custom_queries' , True ) # type: str # Handle overrides if use_global_custom_queries == 'extend' : custom_queries . extend ( self . check . init_config . get ( 'global_custom_queries' , [])) elif ( not custom_queries and 'global_custom_queries' in self . check . init_config and is_affirmative ( use_global_custom_queries ) ): custom_queries = self . check . init_config . get ( 'global_custom_queries' , []) # Deduplicate for i , custom_query in enumerate ( iter_unique ( custom_queries ), 1 ): query = Query ( custom_query ) query . query_data . setdefault ( 'name' , 'custom query # {} ' . format ( i )) self . queries . append ( query ) compile_queries ( self ) \u00b6 This method compiles every Query object. Source code in def compile_queries ( self ): \"\"\"This method compiles every `Query` object.\"\"\" column_transformers = COLUMN_TRANSFORMERS . copy () for submission_method , transformer_name in SUBMISSION_METHODS . items (): method = getattr ( self . check , submission_method ) # Save each method in the initializer -> callable format column_transformers [ transformer_name ] = create_submission_transformer ( method ) for query in self . queries : query . compile ( column_transformers , EXTRA_TRANSFORMERS . copy ()) execute ( self , extra_tags = None ) \u00b6 This method is what you call every check run. Source code in def execute ( self , extra_tags = None ): \"\"\"This method is what you call every check run.\"\"\" logger = self . check . log if extra_tags : global_tags = list ( extra_tags ) global_tags . extend ( self . tags ) else : global_tags = self . tags for query in self . queries : query_name = query . name query_columns = query . columns query_extras = query . extras query_tags = query . tags num_columns = len ( query_columns ) try : rows = self . execute_query ( query . query ) except Exception as e : if self . error_handler : logger . error ( 'Error querying %s : %s ' , query_name , self . error_handler ( str ( e ))) else : logger . error ( 'Error querying %s : %s ' , query_name , e ) continue for row in rows : if not row : logger . debug ( 'Query %s returned an empty result' , query_name ) continue if num_columns != len ( row ): logger . error ( 'Query %s expected %d column %s , got %d ' , query_name , num_columns , 's' if num_columns > 1 else '' , len ( row ), ) continue sources = {} submission_queue = [] tags = list ( global_tags ) tags . extend ( query_tags ) for ( column_name , transformer ), value in zip ( query_columns , row ): # Columns can be ignored via configuration if not column_name : continue sources [ column_name ] = value column_type , transformer = transformer # The transformer can be None for `source` types. Those such columns do not submit # anything but are collected into the row values for other columns to reference. if transformer is None : continue elif column_type == 'tag' : tags . append ( transformer ( None , value )) elif column_type == 'tag_list' : tags . extend ( transformer ( None , value )) else : submission_queue . append (( transformer , value )) for transformer , value in submission_queue : transformer ( sources , value , tags = tags , hostname = self . hostname ) for name , transformer in query_extras : try : result = transformer ( sources , tags = tags , hostname = self . hostname ) except Exception as e : logger . error ( 'Error transforming %s : %s ' , name , e ) continue else : if result is not None : sources [ name ] = result execute_query ( self , query ) \u00b6 Called by execute , this triggers query execution to check for errors immediately in a way that is compatible with any library. If there are no errors, this is guaranteed to return an iterator over the result set. Source code in def execute_query ( self , query ): \"\"\" Called by `execute`, this triggers query execution to check for errors immediately in a way that is compatible with any library. If there are no errors, this is guaranteed to return an iterator over the result set. \"\"\" rows = self . executor ( query ) if rows is None : return iter ([]) else : rows = iter ( rows ) # Ensure we trigger query execution try : first_row = next ( rows ) except StopIteration : return iter ([]) return chain (( first_row ,), rows ) Transformers \u00b6 datadog_checks.base.utils.db.transform.ColumnTransformers \u00b6 match ( transformers , column_name , ** modifiers ) \u00b6 This is used for querying unstructured data. For example, say you want to collect the fields named foo and bar . Typically, they would be stored like: foo bar 4 2 and would be queried like: SELECT foo , bar FROM ... Often, you will instead find data stored in the following format: metric value foo 4 bar 2 and would be queried like: SELECT metric , value FROM ... In this case, the metric column stores the name with which to match on and its value is stored in a separate column. The required items modifier is a mapping of matched names to column data values. Consider the values to be exactly the same as the entries in the columns top level field. You must also define a source modifier either for this transformer itself or in the values of items (which will take precedence). The source will be treated as the value of the match. Say this is your configuration: query : SELECT source1, source2, metric FROM TABLE columns : - name : value1 type : source - name : value2 type : source - name : metric_name type : match source : value1 items : foo : name : test.foo type : gauge source : value2 bar : name : test.bar type : monotonic_gauge and the result set is: source1 source2 metric 1 2 foo 3 4 baz 5 6 bar Here's what would be submitted: foo - test.foo as a gauge with a value of 2 bar - test.bar.total as a gauge and test.bar.count as a monotonic_count , both with a value of 5 baz - nothing since it was not defined as a match item Source code in def get_match ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" This is used for querying unstructured data. For example, say you want to collect the fields named `foo` and `bar`. Typically, they would be stored like: | foo | bar | | --- | --- | | 4 | 2 | and would be queried like: ```sql SELECT foo, bar FROM ... ``` Often, you will instead find data stored in the following format: | metric | value | | ------ | ----- | | foo | 4 | | bar | 2 | and would be queried like: ```sql SELECT metric, value FROM ... ``` In this case, the `metric` column stores the name with which to match on and its `value` is stored in a separate column. The required `items` modifier is a mapping of matched names to column data values. Consider the values to be exactly the same as the entries in the `columns` top level field. You must also define a `source` modifier either for this transformer itself or in the values of `items` (which will take precedence). The source will be treated as the value of the match. Say this is your configuration: ```yaml query: SELECT source1, source2, metric FROM TABLE columns: - name: value1 type: source - name: value2 type: source - name: metric_name type: match source: value1 items: foo: name: test.foo type: gauge source: value2 bar: name: test.bar type: monotonic_gauge ``` and the result set is: | source1 | source2 | metric | | ------- | ------- | ------ | | 1 | 2 | foo | | 3 | 4 | baz | | 5 | 6 | bar | Here's what would be submitted: - `foo` - `test.foo` as a `gauge` with a value of `2` - `bar` - `test.bar.total` as a `gauge` and `test.bar.count` as a `monotonic_count`, both with a value of `5` - `baz` - nothing since it was not defined as a match item \"\"\" # Do work in a separate function to avoid having to `del` a bunch of variables compiled_items = _compile_match_items ( transformers , modifiers ) def match ( sources , value , ** kwargs ): if value in compiled_items : source , transformer = compiled_items [ value ] transformer ( sources , sources [ source ], ** kwargs ) return match monotonic_gauge ( transformers , column_name , ** modifiers ) \u00b6 Send the result as both a gauge suffixed by .total and a monotonic_count suffixed by .count . Source code in def get_monotonic_gauge ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" Send the result as both a `gauge` suffixed by `.total` and a `monotonic_count` suffixed by `.count`. \"\"\" gauge = transformers [ 'gauge' ]( transformers , ' {} .total' . format ( column_name ), ** modifiers ) monotonic_count = transformers [ 'monotonic_count' ]( transformers , ' {} .count' . format ( column_name ), ** modifiers ) def monotonic_gauge ( _ , value , ** kwargs ): gauge ( _ , value , ** kwargs ) monotonic_count ( _ , value , ** kwargs ) return monotonic_gauge service_check ( transformers , column_name , ** modifiers ) \u00b6 Submit a service check. The required modifier status_map is a mapping of values to statuses. Valid statuses include: OK WARNING CRITICAL UNKNOWN Any encountered values that are not defined will be sent as UNKNOWN . Source code in def get_service_check ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" Submit a service check. The required modifier `status_map` is a mapping of values to statuses. Valid statuses include: - `OK` - `WARNING` - `CRITICAL` - `UNKNOWN` Any encountered values that are not defined will be sent as `UNKNOWN`. \"\"\" # Do work in a separate function to avoid having to `del` a bunch of variables status_map = _compile_service_check_statuses ( modifiers ) service_check_method = transformers [ '__service_check' ]( transformers , column_name , ** modifiers ) def service_check ( _ , value , ** kwargs ): service_check_method ( _ , status_map . get ( value , ServiceCheck . UNKNOWN ), ** kwargs ) return service_check tag ( transformers , column_name , ** modifiers ) \u00b6 Convert a column to a tag that will be used in every subsequent submission. For example, if you named the column env and the column returned the value prod1 , all submissions from that row will be tagged by env:prod1 . This also accepts an optional modifier called boolean that when set to true will transform the result to the string true or false . So for example if you named the column alive and the result was the number 0 the tag will be alive:false . Source code in def get_tag ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> str \"\"\" Convert a column to a tag that will be used in every subsequent submission. For example, if you named the column `env` and the column returned the value `prod1`, all submissions from that row will be tagged by `env:prod1`. This also accepts an optional modifier called `boolean` that when set to `true` will transform the result to the string `true` or `false`. So for example if you named the column `alive` and the result was the number `0` the tag will be `alive:false`. \"\"\" template = ' {} :{{}}' . format ( column_name ) boolean = is_affirmative ( modifiers . pop ( 'boolean' , None )) def tag ( _ , value , ** kwargs ): if boolean : value = str ( is_affirmative ( value )) . lower () return template . format ( value ) return tag tag_list ( transformers , column_name , ** modifiers ) \u00b6 Convert a column to a list of tags that will be used in every submission. Tag name is determined by column_name . The column value represents a list of values. It is expected to be either a list of strings, or a comma-separated string. For example, if the column is named server_tag and the column returned the value 'us,primary' , then all submissions for that row will be tagged by server_tag:us and server_tag:primary . Source code in def get_tag_list ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], List[str]] \"\"\" Convert a column to a list of tags that will be used in every submission. Tag name is determined by `column_name`. The column value represents a list of values. It is expected to be either a list of strings, or a comma-separated string. For example, if the column is named `server_tag` and the column returned the value `'us,primary'`, then all submissions for that row will be tagged by `server_tag:us` and `server_tag:primary`. \"\"\" template = ' %s : {} ' % column_name def tag_list ( _ , value , ** kwargs ): if isinstance ( value , str ): value = [ v . strip () for v in value . split ( ',' )] return [ template . format ( v ) for v in value ] return tag_list temporal_percent ( transformers , column_name , ** modifiers ) \u00b6 Send the result as percentage of time since the last check run as a rate . For example, say the result is a forever increasing counter representing the total time spent pausing for garbage collection since start up. That number by itself is quite useless, but as a percentage of time spent pausing since the previous collection interval it becomes a useful metric. There is one required parameter called scale that indicates what unit of time the result should be considered. Valid values are: second millisecond microsecond nanosecond You may also define the unit as an integer number of parts compared to seconds e.g. millisecond is equivalent to 1000 . Source code in def get_temporal_percent ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" Send the result as percentage of time since the last check run as a `rate`. For example, say the result is a forever increasing counter representing the total time spent pausing for garbage collection since start up. That number by itself is quite useless, but as a percentage of time spent pausing since the previous collection interval it becomes a useful metric. There is one required parameter called `scale` that indicates what unit of time the result should be considered. Valid values are: - `second` - `millisecond` - `microsecond` - `nanosecond` You may also define the unit as an integer number of parts compared to seconds e.g. `millisecond` is equivalent to `1000`. \"\"\" scale = modifiers . pop ( 'scale' , None ) if scale is None : raise ValueError ( 'the `scale` parameter is required' ) if isinstance ( scale , str ): scale = constants . TIME_UNITS . get ( scale . lower ()) if scale is None : raise ValueError ( 'the `scale` parameter must be one of: {} ' . format ( ' | ' . join ( sorted ( constants . TIME_UNITS ))) ) elif not isinstance ( scale , int ): raise ValueError ( 'the `scale` parameter must be an integer representing parts of a second e.g. 1000 for millisecond' ) rate = transformers [ 'rate' ]( transformers , column_name , ** modifiers ) def temporal_percent ( _ , value , ** kwargs ): rate ( _ , total_time_to_temporal_percent ( float ( value ), scale = scale ), ** kwargs ) return temporal_percent time_elapsed ( transformers , column_name , ** modifiers ) \u00b6 Send the number of seconds elapsed from a time in the past as a gauge . For example, if the result is an instance of datetime.datetime representing 5 seconds ago, then this would submit with a value of 5 . The optional modifier format indicates what format the result is in. By default it is native , assuming the underlying library provides timestamps as datetime objects. If the value is a UNIX timestamp you can set the format modifier to unix_time . If the value is a string representation of a date, you must provide the expected timestamp format using the supported codes . Examples: columns : - name : time_since_x type : time_elapsed format : native # default value and can be omitted - name : time_since_y type : time_elapsed format : unix_time - name : time_since_z type : time_elapsed format : \"%d/%m/%Y %H:%M:%S\" Note The code %z (lower case) is not supported on Windows. Source code in def get_time_elapsed ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" Send the number of seconds elapsed from a time in the past as a `gauge`. For example, if the result is an instance of [datetime.datetime](https://docs.python.org/3/library/datetime.html#datetime.datetime) representing 5 seconds ago, then this would submit with a value of `5`. The optional modifier `format` indicates what format the result is in. By default it is `native`, assuming the underlying library provides timestamps as `datetime` objects. If the value is a UNIX timestamp you can set the `format` modifier to `unix_time`. If the value is a string representation of a date, you must provide the expected timestamp format using the [supported codes](https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes). Example: ```yaml columns: - name: time_since_x type: time_elapsed format: native # default value and can be omitted - name: time_since_y type: time_elapsed format: unix_time - name: time_since_z type: time_elapsed format: \"%d/%m/%Y %H:%M:%S\" ``` !!! note The code `%z` (lower case) is not supported on Windows. \"\"\" time_format = modifiers . pop ( 'format' , 'native' ) if not isinstance ( time_format , str ): raise ValueError ( 'the `format` parameter must be a string' ) gauge = transformers [ 'gauge' ]( transformers , column_name , ** modifiers ) if time_format == 'native' : def time_elapsed ( _ , value , ** kwargs ): value = ensure_aware_datetime ( value ) gauge ( _ , ( datetime . now ( value . tzinfo ) - value ) . total_seconds (), ** kwargs ) elif time_format == 'unix_time' : def time_elapsed ( _ , value , ** kwargs ): gauge ( _ , time . time () - value , ** kwargs ) else : def time_elapsed ( _ , value , ** kwargs ): value = ensure_aware_datetime ( datetime . strptime ( value , time_format )) gauge ( _ , ( datetime . now ( value . tzinfo ) - value ) . total_seconds (), ** kwargs ) return time_elapsed datadog_checks.base.utils.db.transform.ExtraTransformers \u00b6 Every column transformer (except tag ) is supported at this level, the only difference being one must set a source to retrieve the desired value. So for example here: columns : - name : foo.bar type : rate extras : - name : foo.current type : gauge source : foo.bar the metric foo.current will be sent as a gauge will the value of foo.bar . expression ( transformers , name , ** modifiers ) \u00b6 This allows the evaluation of a limited subset of Python syntax and built-in functions. columns : - name : disk.total type : gauge - name : disk.used type : gauge extras : - name : disk.free expression : disk.total - disk.used submit_type : gauge For brevity, if the expression attribute exists and type does not then it is assumed the type is expression . The submit_type can be any transformer and any extra options are passed down to it. The result of every expression is stored, so in lieu of a submit_type the above example could also be written as: columns : - name : disk.total type : gauge - name : disk.used type : gauge extras : - name : free expression : disk.total - disk.used - name : disk.free type : gauge source : free The order matters though, so for example the following will fail: columns : - name : disk.total type : gauge - name : disk.used type : gauge extras : - name : disk.free type : gauge source : free - name : free expression : disk.total - disk.used since the source free does not yet exist. Source code in def get_expression ( transformers , name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], Any] \"\"\" This allows the evaluation of a limited subset of Python syntax and built-in functions. ```yaml columns: - name: disk.total type: gauge - name: disk.used type: gauge extras: - name: disk.free expression: disk.total - disk.used submit_type: gauge ``` For brevity, if the `expression` attribute exists and `type` does not then it is assumed the type is `expression`. The `submit_type` can be any transformer and any extra options are passed down to it. The result of every expression is stored, so in lieu of a `submit_type` the above example could also be written as: ```yaml columns: - name: disk.total type: gauge - name: disk.used type: gauge extras: - name: free expression: disk.total - disk.used - name: disk.free type: gauge source: free ``` The order matters though, so for example the following will fail: ```yaml columns: - name: disk.total type: gauge - name: disk.used type: gauge extras: - name: disk.free type: gauge source: free - name: free expression: disk.total - disk.used ``` since the source `free` does not yet exist. \"\"\" available_sources = modifiers . pop ( 'sources' ) expression = modifiers . pop ( 'expression' , None ) if expression is None : raise ValueError ( 'the `expression` parameter is required' ) elif not isinstance ( expression , str ): raise ValueError ( 'the `expression` parameter must be a string' ) elif not expression : raise ValueError ( 'the `expression` parameter must not be empty' ) if not modifiers . pop ( 'verbose' , False ): # Sort the sources in reverse order of length to prevent greedy matching available_sources = sorted ( available_sources , key = lambda s : - len ( s )) # Escape special characters, mostly for the possible dots in metric names available_sources = list ( map ( re . escape , available_sources )) # Finally, utilize the order by relying on the guarantees provided by the alternation operator available_sources = '|' . join ( available_sources ) expression = re . sub ( SOURCE_PATTERN . format ( available_sources ), # Replace by the particular source that matched lambda match_obj : 'SOURCES[\" {} \"]' . format ( match_obj . group ( 1 )), expression , ) expression = compile ( expression , filename = name , mode = 'eval' ) del available_sources if 'submit_type' in modifiers : if modifiers [ 'submit_type' ] not in transformers : raise ValueError ( 'unknown submit_type ` {} `' . format ( modifiers [ 'submit_type' ])) submit_method = transformers [ modifiers . pop ( 'submit_type' )]( transformers , name , ** modifiers ) submit_method = create_extra_transformer ( submit_method ) def execute_expression ( sources , ** kwargs ): result = eval ( expression , ALLOWED_GLOBALS , { 'SOURCES' : sources }) submit_method ( sources , result , ** kwargs ) return result else : def execute_expression ( sources , ** kwargs ): return eval ( expression , ALLOWED_GLOBALS , { 'SOURCES' : sources }) return execute_expression percent ( transformers , name , ** modifiers ) \u00b6 Send a percentage based on 2 sources as a gauge . The required modifiers are part and total . For example, if you have this configuration: columns : - name : disk.total type : gauge - name : disk.used type : gauge extras : - name : disk.utilized type : percent part : disk.used total : disk.total then the extra metric disk.utilized would be sent as a gauge calculated as disk.used / disk.total * 100 . If the source of total is 0 , then the submitted value will always be sent as 0 too. Source code in def get_percent ( transformers , name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" Send a percentage based on 2 sources as a `gauge`. The required modifiers are `part` and `total`. For example, if you have this configuration: ```yaml columns: - name: disk.total type: gauge - name: disk.used type: gauge extras: - name: disk.utilized type: percent part: disk.used total: disk.total ``` then the extra metric `disk.utilized` would be sent as a `gauge` calculated as `disk.used / disk.total * 100`. If the source of `total` is `0`, then the submitted value will always be sent as `0` too. \"\"\" available_sources = modifiers . pop ( 'sources' ) part = modifiers . pop ( 'part' , None ) if part is None : raise ValueError ( 'the `part` parameter is required' ) elif not isinstance ( part , str ): raise ValueError ( 'the `part` parameter must be a string' ) elif part not in available_sources : raise ValueError ( 'the `part` parameter ` {} ` is not an available source' . format ( part )) total = modifiers . pop ( 'total' , None ) if total is None : raise ValueError ( 'the `total` parameter is required' ) elif not isinstance ( total , str ): raise ValueError ( 'the `total` parameter must be a string' ) elif total not in available_sources : raise ValueError ( 'the `total` parameter ` {} ` is not an available source' . format ( total )) del available_sources gauge = transformers [ 'gauge' ]( transformers , name , ** modifiers ) gauge = create_extra_transformer ( gauge ) def percent ( sources , ** kwargs ): gauge ( sources , compute_percent ( sources [ part ], sources [ total ]), ** kwargs ) return percent","title":"Databases"},{"location":"base/databases/#databases","text":"No matter the database you wish to monitor, the base package provides a standard way to define and collect data from arbitrary queries. The core premise is that you define a function that accepts a query (usually a str ) and it returns a sequence of equal length results.","title":"Databases"},{"location":"base/databases/#interface","text":"All the functionality is exposed by the Query and QueryManager classes.","title":"Interface"},{"location":"base/databases/#datadog_checks.base.utils.db.query.Query","text":"This class accepts a single dict argument which is necessary to run the query. The representation is based on our custom_queries format originally designed and implemented in !1528 . It is now part of all our database integrations and other products have since adopted this format.","title":"Query"},{"location":"base/databases/#datadog_checks.base.utils.db.query.Query.__init__","text":"Source code in def __init__ ( self , query_data ): # type: (Dict[str, Any]) -> Query self . query_data = deepcopy ( query_data or {}) # type: Dict[str, Any] self . name = None # type: str self . query = None # type: str self . columns = None # type: List[str] self . extras = None # type: List[Dict[str, str]] self . tags = None # type: List[str]","title":"__init__()"},{"location":"base/databases/#datadog_checks.base.utils.db.query.Query.compile","text":"This idempotent method will be called by QueryManager.compile_queries so you should never need to call it directly. Source code in def compile ( self , column_transformers , # type: Dict[str, Callable[[Dict[str, Callable], str, Any], Any]] extra_transformers , # type: Dict[str, Callable[[Dict[str, Callable], str, Any], Any]] ): # type: (...) -> None \"\"\" This idempotent method will be called by `QueryManager.compile_queries` so you should never need to call it directly. \"\"\" # Check for previous compilation if self . name is not None : return query_name = self . query_data . get ( 'name' ) if not query_name : raise ValueError ( 'query field `name` is required' ) elif not isinstance ( query_name , str ): raise ValueError ( 'query field `name` must be a string' ) query = self . query_data . get ( 'query' ) if not query : raise ValueError ( 'field `query` for {} is required' . format ( query_name )) elif not isinstance ( query , str ): raise ValueError ( 'field `query` for {} must be a string' . format ( query_name )) columns = self . query_data . get ( 'columns' ) if not columns : raise ValueError ( 'field `columns` for {} is required' . format ( query_name )) elif not isinstance ( columns , list ): raise ValueError ( 'field `columns` for {} must be a list' . format ( query_name )) tags = self . query_data . get ( 'tags' , []) if tags is not None and not isinstance ( tags , list ): raise ValueError ( 'field `tags` for {} must be a list' . format ( query_name )) # Keep track of all defined names sources = {} column_data = [] for i , column in enumerate ( columns , 1 ): # Columns can be ignored via configuration. if not column : column_data . append (( None , None )) continue elif not isinstance ( column , dict ): raise ValueError ( 'column # {} of {} is not a mapping' . format ( i , query_name )) column_name = column . get ( 'name' ) if not column_name : raise ValueError ( 'field `name` for column # {} of {} is required' . format ( i , query_name )) elif not isinstance ( column_name , str ): raise ValueError ( 'field `name` for column # {} of {} must be a string' . format ( i , query_name )) elif column_name in sources : raise ValueError ( 'the name {} of {} was already defined in {} # {} ' . format ( column_name , query_name , sources [ column_name ][ 'type' ], sources [ column_name ][ 'index' ] ) ) sources [ column_name ] = { 'type' : 'column' , 'index' : i } column_type = column . get ( 'type' ) if not column_type : raise ValueError ( 'field `type` for column {} of {} is required' . format ( column_name , query_name )) elif not isinstance ( column_type , str ): raise ValueError ( 'field `type` for column {} of {} must be a string' . format ( column_name , query_name )) elif column_type == 'source' : column_data . append (( column_name , ( None , None ))) continue elif column_type not in column_transformers : raise ValueError ( 'unknown type ` {} ` for column {} of {} ' . format ( column_type , column_name , query_name )) modifiers = { key : value for key , value in column . items () if key not in ( 'name' , 'type' )} try : transformer = column_transformers [ column_type ]( column_transformers , column_name , ** modifiers ) except Exception as e : error = 'error compiling type ` {} ` for column {} of {} : {} ' . format ( column_type , column_name , query_name , e ) # Prepend helpful error text. # # When an exception is raised in the context of another one, both will be printed. To avoid # this we set the context to None. https://www.python.org/dev/peps/pep-0409/ raise_from ( type ( e )( error ), None ) else : if column_type in ( 'tag' , 'tag_list' ): column_data . append (( column_name , ( column_type , transformer ))) else : # All these would actually submit data. As that is the default case, we represent it as # a reference to None since if we use e.g. `value` it would never be checked anyway. column_data . append (( column_name , ( None , transformer ))) submission_transformers = column_transformers . copy () submission_transformers . pop ( 'tag' ) submission_transformers . pop ( 'tag_list' ) extras = self . query_data . get ( 'extras' , []) if not isinstance ( extras , list ): raise ValueError ( 'field `extras` for {} must be a list' . format ( query_name )) extra_data = [] for i , extra in enumerate ( extras , 1 ): if not isinstance ( extra , dict ): raise ValueError ( 'extra # {} of {} is not a mapping' . format ( i , query_name )) extra_name = extra . get ( 'name' ) if not extra_name : raise ValueError ( 'field `name` for extra # {} of {} is required' . format ( i , query_name )) elif not isinstance ( extra_name , str ): raise ValueError ( 'field `name` for extra # {} of {} must be a string' . format ( i , query_name )) elif extra_name in sources : raise ValueError ( 'the name {} of {} was already defined in {} # {} ' . format ( extra_name , query_name , sources [ extra_name ][ 'type' ], sources [ extra_name ][ 'index' ] ) ) sources [ extra_name ] = { 'type' : 'extra' , 'index' : i } extra_type = extra . get ( 'type' ) if not extra_type : if 'expression' in extra : extra_type = 'expression' else : raise ValueError ( 'field `type` for extra {} of {} is required' . format ( extra_name , query_name )) elif not isinstance ( extra_type , str ): raise ValueError ( 'field `type` for extra {} of {} must be a string' . format ( extra_name , query_name )) elif extra_type not in extra_transformers and extra_type not in submission_transformers : raise ValueError ( 'unknown type ` {} ` for extra {} of {} ' . format ( extra_type , extra_name , query_name )) transformer_factory = extra_transformers . get ( extra_type , submission_transformers . get ( extra_type )) extra_source = extra . get ( 'source' ) if extra_type in submission_transformers : if not extra_source : raise ValueError ( 'field `source` for extra {} of {} is required' . format ( extra_name , query_name )) modifiers = { key : value for key , value in extra . items () if key not in ( 'name' , 'type' , 'source' )} else : modifiers = { key : value for key , value in extra . items () if key not in ( 'name' , 'type' )} modifiers [ 'sources' ] = sources try : transformer = transformer_factory ( submission_transformers , extra_name , ** modifiers ) except Exception as e : error = 'error compiling type ` {} ` for extra {} of {} : {} ' . format ( extra_type , extra_name , query_name , e ) raise_from ( type ( e )( error ), None ) else : if extra_type in submission_transformers : transformer = create_extra_transformer ( transformer , extra_source ) extra_data . append (( extra_name , transformer )) self . name = query_name self . query = query self . columns = tuple ( column_data ) self . extras = tuple ( extra_data ) self . tags = tags del self . query_data","title":"compile()"},{"location":"base/databases/#datadog_checks.base.utils.db.core.QueryManager","text":"This class is in charge of running any number of Query instances for a single Check instance. You will most often see it created during Check initialization like this: self . _query_manager = QueryManager ( self , self . execute_query , queries = [ queries . SomeQuery1 , queries . SomeQuery2 , queries . SomeQuery3 , queries . SomeQuery4 , queries . SomeQuery5 , ], tags = self . instance . get ( 'tags' , []), error_handler = self . _error_sanitizer , ) self . check_initializations . append ( self . _query_manager . compile_queries )","title":"QueryManager"},{"location":"base/databases/#datadog_checks.base.utils.db.core.QueryManager.__init__","text":"check ( AgentCheck ) - an instance of a Check executor ( callable ) - a callable accepting a str query as its sole argument and returning a sequence representing either the full result set or an iterator over the result set queries ( List[Query] ) - a list of Query instances tags ( List[str] ) - a list of tags to associate with every submission error_handler ( callable ) - a callable accepting a str error as its sole argument and returning a sanitized string, useful for scrubbing potentially sensitive information libraries emit Source code in def __init__ ( self , check , # type: AgentCheck executor , # type: Callable[[str], Union[Sequence, Iterable]] queries = None , # type: List[str] tags = None , # type: List[str] error_handler = None , # type: Callable[[str], str] hostname = None , # type: str ): # type: (...) -> QueryManager \"\"\" - **check** (_AgentCheck_) - an instance of a Check - **executor** (_callable_) - a callable accepting a `str` query as its sole argument and returning a sequence representing either the full result set or an iterator over the result set - **queries** (_List[Query]_) - a list of `Query` instances - **tags** (_List[str]_) - a list of tags to associate with every submission - **error_handler** (_callable_) - a callable accepting a `str` error as its sole argument and returning a sanitized string, useful for scrubbing potentially sensitive information libraries emit \"\"\" self . check = check # type: AgentCheck self . executor = executor # type: Callable[[str], Union[Sequence, Iterable]] self . tags = tags or [] self . error_handler = error_handler self . queries = [ Query ( payload ) for payload in queries or []] # type: List[Query] self . hostname = hostname # type: str custom_queries = list ( self . check . instance . get ( 'custom_queries' , [])) # type: List[str] use_global_custom_queries = self . check . instance . get ( 'use_global_custom_queries' , True ) # type: str # Handle overrides if use_global_custom_queries == 'extend' : custom_queries . extend ( self . check . init_config . get ( 'global_custom_queries' , [])) elif ( not custom_queries and 'global_custom_queries' in self . check . init_config and is_affirmative ( use_global_custom_queries ) ): custom_queries = self . check . init_config . get ( 'global_custom_queries' , []) # Deduplicate for i , custom_query in enumerate ( iter_unique ( custom_queries ), 1 ): query = Query ( custom_query ) query . query_data . setdefault ( 'name' , 'custom query # {} ' . format ( i )) self . queries . append ( query )","title":"__init__()"},{"location":"base/databases/#datadog_checks.base.utils.db.core.QueryManager.compile_queries","text":"This method compiles every Query object. Source code in def compile_queries ( self ): \"\"\"This method compiles every `Query` object.\"\"\" column_transformers = COLUMN_TRANSFORMERS . copy () for submission_method , transformer_name in SUBMISSION_METHODS . items (): method = getattr ( self . check , submission_method ) # Save each method in the initializer -> callable format column_transformers [ transformer_name ] = create_submission_transformer ( method ) for query in self . queries : query . compile ( column_transformers , EXTRA_TRANSFORMERS . copy ())","title":"compile_queries()"},{"location":"base/databases/#datadog_checks.base.utils.db.core.QueryManager.execute","text":"This method is what you call every check run. Source code in def execute ( self , extra_tags = None ): \"\"\"This method is what you call every check run.\"\"\" logger = self . check . log if extra_tags : global_tags = list ( extra_tags ) global_tags . extend ( self . tags ) else : global_tags = self . tags for query in self . queries : query_name = query . name query_columns = query . columns query_extras = query . extras query_tags = query . tags num_columns = len ( query_columns ) try : rows = self . execute_query ( query . query ) except Exception as e : if self . error_handler : logger . error ( 'Error querying %s : %s ' , query_name , self . error_handler ( str ( e ))) else : logger . error ( 'Error querying %s : %s ' , query_name , e ) continue for row in rows : if not row : logger . debug ( 'Query %s returned an empty result' , query_name ) continue if num_columns != len ( row ): logger . error ( 'Query %s expected %d column %s , got %d ' , query_name , num_columns , 's' if num_columns > 1 else '' , len ( row ), ) continue sources = {} submission_queue = [] tags = list ( global_tags ) tags . extend ( query_tags ) for ( column_name , transformer ), value in zip ( query_columns , row ): # Columns can be ignored via configuration if not column_name : continue sources [ column_name ] = value column_type , transformer = transformer # The transformer can be None for `source` types. Those such columns do not submit # anything but are collected into the row values for other columns to reference. if transformer is None : continue elif column_type == 'tag' : tags . append ( transformer ( None , value )) elif column_type == 'tag_list' : tags . extend ( transformer ( None , value )) else : submission_queue . append (( transformer , value )) for transformer , value in submission_queue : transformer ( sources , value , tags = tags , hostname = self . hostname ) for name , transformer in query_extras : try : result = transformer ( sources , tags = tags , hostname = self . hostname ) except Exception as e : logger . error ( 'Error transforming %s : %s ' , name , e ) continue else : if result is not None : sources [ name ] = result","title":"execute()"},{"location":"base/databases/#datadog_checks.base.utils.db.core.QueryManager.execute_query","text":"Called by execute , this triggers query execution to check for errors immediately in a way that is compatible with any library. If there are no errors, this is guaranteed to return an iterator over the result set. Source code in def execute_query ( self , query ): \"\"\" Called by `execute`, this triggers query execution to check for errors immediately in a way that is compatible with any library. If there are no errors, this is guaranteed to return an iterator over the result set. \"\"\" rows = self . executor ( query ) if rows is None : return iter ([]) else : rows = iter ( rows ) # Ensure we trigger query execution try : first_row = next ( rows ) except StopIteration : return iter ([]) return chain (( first_row ,), rows )","title":"execute_query()"},{"location":"base/databases/#transformers","text":"","title":"Transformers"},{"location":"base/databases/#datadog_checks.base.utils.db.transform.ColumnTransformers","text":"","title":"ColumnTransformers"},{"location":"base/databases/#datadog_checks.base.utils.db.transform.ColumnTransformers.match","text":"This is used for querying unstructured data. For example, say you want to collect the fields named foo and bar . Typically, they would be stored like: foo bar 4 2 and would be queried like: SELECT foo , bar FROM ... Often, you will instead find data stored in the following format: metric value foo 4 bar 2 and would be queried like: SELECT metric , value FROM ... In this case, the metric column stores the name with which to match on and its value is stored in a separate column. The required items modifier is a mapping of matched names to column data values. Consider the values to be exactly the same as the entries in the columns top level field. You must also define a source modifier either for this transformer itself or in the values of items (which will take precedence). The source will be treated as the value of the match. Say this is your configuration: query : SELECT source1, source2, metric FROM TABLE columns : - name : value1 type : source - name : value2 type : source - name : metric_name type : match source : value1 items : foo : name : test.foo type : gauge source : value2 bar : name : test.bar type : monotonic_gauge and the result set is: source1 source2 metric 1 2 foo 3 4 baz 5 6 bar Here's what would be submitted: foo - test.foo as a gauge with a value of 2 bar - test.bar.total as a gauge and test.bar.count as a monotonic_count , both with a value of 5 baz - nothing since it was not defined as a match item Source code in def get_match ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" This is used for querying unstructured data. For example, say you want to collect the fields named `foo` and `bar`. Typically, they would be stored like: | foo | bar | | --- | --- | | 4 | 2 | and would be queried like: ```sql SELECT foo, bar FROM ... ``` Often, you will instead find data stored in the following format: | metric | value | | ------ | ----- | | foo | 4 | | bar | 2 | and would be queried like: ```sql SELECT metric, value FROM ... ``` In this case, the `metric` column stores the name with which to match on and its `value` is stored in a separate column. The required `items` modifier is a mapping of matched names to column data values. Consider the values to be exactly the same as the entries in the `columns` top level field. You must also define a `source` modifier either for this transformer itself or in the values of `items` (which will take precedence). The source will be treated as the value of the match. Say this is your configuration: ```yaml query: SELECT source1, source2, metric FROM TABLE columns: - name: value1 type: source - name: value2 type: source - name: metric_name type: match source: value1 items: foo: name: test.foo type: gauge source: value2 bar: name: test.bar type: monotonic_gauge ``` and the result set is: | source1 | source2 | metric | | ------- | ------- | ------ | | 1 | 2 | foo | | 3 | 4 | baz | | 5 | 6 | bar | Here's what would be submitted: - `foo` - `test.foo` as a `gauge` with a value of `2` - `bar` - `test.bar.total` as a `gauge` and `test.bar.count` as a `monotonic_count`, both with a value of `5` - `baz` - nothing since it was not defined as a match item \"\"\" # Do work in a separate function to avoid having to `del` a bunch of variables compiled_items = _compile_match_items ( transformers , modifiers ) def match ( sources , value , ** kwargs ): if value in compiled_items : source , transformer = compiled_items [ value ] transformer ( sources , sources [ source ], ** kwargs ) return match","title":"match()"},{"location":"base/databases/#datadog_checks.base.utils.db.transform.ColumnTransformers.monotonic_gauge","text":"Send the result as both a gauge suffixed by .total and a monotonic_count suffixed by .count . Source code in def get_monotonic_gauge ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" Send the result as both a `gauge` suffixed by `.total` and a `monotonic_count` suffixed by `.count`. \"\"\" gauge = transformers [ 'gauge' ]( transformers , ' {} .total' . format ( column_name ), ** modifiers ) monotonic_count = transformers [ 'monotonic_count' ]( transformers , ' {} .count' . format ( column_name ), ** modifiers ) def monotonic_gauge ( _ , value , ** kwargs ): gauge ( _ , value , ** kwargs ) monotonic_count ( _ , value , ** kwargs ) return monotonic_gauge","title":"monotonic_gauge()"},{"location":"base/databases/#datadog_checks.base.utils.db.transform.ColumnTransformers.service_check","text":"Submit a service check. The required modifier status_map is a mapping of values to statuses. Valid statuses include: OK WARNING CRITICAL UNKNOWN Any encountered values that are not defined will be sent as UNKNOWN . Source code in def get_service_check ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" Submit a service check. The required modifier `status_map` is a mapping of values to statuses. Valid statuses include: - `OK` - `WARNING` - `CRITICAL` - `UNKNOWN` Any encountered values that are not defined will be sent as `UNKNOWN`. \"\"\" # Do work in a separate function to avoid having to `del` a bunch of variables status_map = _compile_service_check_statuses ( modifiers ) service_check_method = transformers [ '__service_check' ]( transformers , column_name , ** modifiers ) def service_check ( _ , value , ** kwargs ): service_check_method ( _ , status_map . get ( value , ServiceCheck . UNKNOWN ), ** kwargs ) return service_check","title":"service_check()"},{"location":"base/databases/#datadog_checks.base.utils.db.transform.ColumnTransformers.tag","text":"Convert a column to a tag that will be used in every subsequent submission. For example, if you named the column env and the column returned the value prod1 , all submissions from that row will be tagged by env:prod1 . This also accepts an optional modifier called boolean that when set to true will transform the result to the string true or false . So for example if you named the column alive and the result was the number 0 the tag will be alive:false . Source code in def get_tag ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> str \"\"\" Convert a column to a tag that will be used in every subsequent submission. For example, if you named the column `env` and the column returned the value `prod1`, all submissions from that row will be tagged by `env:prod1`. This also accepts an optional modifier called `boolean` that when set to `true` will transform the result to the string `true` or `false`. So for example if you named the column `alive` and the result was the number `0` the tag will be `alive:false`. \"\"\" template = ' {} :{{}}' . format ( column_name ) boolean = is_affirmative ( modifiers . pop ( 'boolean' , None )) def tag ( _ , value , ** kwargs ): if boolean : value = str ( is_affirmative ( value )) . lower () return template . format ( value ) return tag","title":"tag()"},{"location":"base/databases/#datadog_checks.base.utils.db.transform.ColumnTransformers.tag_list","text":"Convert a column to a list of tags that will be used in every submission. Tag name is determined by column_name . The column value represents a list of values. It is expected to be either a list of strings, or a comma-separated string. For example, if the column is named server_tag and the column returned the value 'us,primary' , then all submissions for that row will be tagged by server_tag:us and server_tag:primary . Source code in def get_tag_list ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], List[str]] \"\"\" Convert a column to a list of tags that will be used in every submission. Tag name is determined by `column_name`. The column value represents a list of values. It is expected to be either a list of strings, or a comma-separated string. For example, if the column is named `server_tag` and the column returned the value `'us,primary'`, then all submissions for that row will be tagged by `server_tag:us` and `server_tag:primary`. \"\"\" template = ' %s : {} ' % column_name def tag_list ( _ , value , ** kwargs ): if isinstance ( value , str ): value = [ v . strip () for v in value . split ( ',' )] return [ template . format ( v ) for v in value ] return tag_list","title":"tag_list()"},{"location":"base/databases/#datadog_checks.base.utils.db.transform.ColumnTransformers.temporal_percent","text":"Send the result as percentage of time since the last check run as a rate . For example, say the result is a forever increasing counter representing the total time spent pausing for garbage collection since start up. That number by itself is quite useless, but as a percentage of time spent pausing since the previous collection interval it becomes a useful metric. There is one required parameter called scale that indicates what unit of time the result should be considered. Valid values are: second millisecond microsecond nanosecond You may also define the unit as an integer number of parts compared to seconds e.g. millisecond is equivalent to 1000 . Source code in def get_temporal_percent ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" Send the result as percentage of time since the last check run as a `rate`. For example, say the result is a forever increasing counter representing the total time spent pausing for garbage collection since start up. That number by itself is quite useless, but as a percentage of time spent pausing since the previous collection interval it becomes a useful metric. There is one required parameter called `scale` that indicates what unit of time the result should be considered. Valid values are: - `second` - `millisecond` - `microsecond` - `nanosecond` You may also define the unit as an integer number of parts compared to seconds e.g. `millisecond` is equivalent to `1000`. \"\"\" scale = modifiers . pop ( 'scale' , None ) if scale is None : raise ValueError ( 'the `scale` parameter is required' ) if isinstance ( scale , str ): scale = constants . TIME_UNITS . get ( scale . lower ()) if scale is None : raise ValueError ( 'the `scale` parameter must be one of: {} ' . format ( ' | ' . join ( sorted ( constants . TIME_UNITS ))) ) elif not isinstance ( scale , int ): raise ValueError ( 'the `scale` parameter must be an integer representing parts of a second e.g. 1000 for millisecond' ) rate = transformers [ 'rate' ]( transformers , column_name , ** modifiers ) def temporal_percent ( _ , value , ** kwargs ): rate ( _ , total_time_to_temporal_percent ( float ( value ), scale = scale ), ** kwargs ) return temporal_percent","title":"temporal_percent()"},{"location":"base/databases/#datadog_checks.base.utils.db.transform.ColumnTransformers.time_elapsed","text":"Send the number of seconds elapsed from a time in the past as a gauge . For example, if the result is an instance of datetime.datetime representing 5 seconds ago, then this would submit with a value of 5 . The optional modifier format indicates what format the result is in. By default it is native , assuming the underlying library provides timestamps as datetime objects. If the value is a UNIX timestamp you can set the format modifier to unix_time . If the value is a string representation of a date, you must provide the expected timestamp format using the supported codes . Examples: columns : - name : time_since_x type : time_elapsed format : native # default value and can be omitted - name : time_since_y type : time_elapsed format : unix_time - name : time_since_z type : time_elapsed format : \"%d/%m/%Y %H:%M:%S\" Note The code %z (lower case) is not supported on Windows. Source code in def get_time_elapsed ( transformers , column_name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" Send the number of seconds elapsed from a time in the past as a `gauge`. For example, if the result is an instance of [datetime.datetime](https://docs.python.org/3/library/datetime.html#datetime.datetime) representing 5 seconds ago, then this would submit with a value of `5`. The optional modifier `format` indicates what format the result is in. By default it is `native`, assuming the underlying library provides timestamps as `datetime` objects. If the value is a UNIX timestamp you can set the `format` modifier to `unix_time`. If the value is a string representation of a date, you must provide the expected timestamp format using the [supported codes](https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes). Example: ```yaml columns: - name: time_since_x type: time_elapsed format: native # default value and can be omitted - name: time_since_y type: time_elapsed format: unix_time - name: time_since_z type: time_elapsed format: \"%d/%m/%Y %H:%M:%S\" ``` !!! note The code `%z` (lower case) is not supported on Windows. \"\"\" time_format = modifiers . pop ( 'format' , 'native' ) if not isinstance ( time_format , str ): raise ValueError ( 'the `format` parameter must be a string' ) gauge = transformers [ 'gauge' ]( transformers , column_name , ** modifiers ) if time_format == 'native' : def time_elapsed ( _ , value , ** kwargs ): value = ensure_aware_datetime ( value ) gauge ( _ , ( datetime . now ( value . tzinfo ) - value ) . total_seconds (), ** kwargs ) elif time_format == 'unix_time' : def time_elapsed ( _ , value , ** kwargs ): gauge ( _ , time . time () - value , ** kwargs ) else : def time_elapsed ( _ , value , ** kwargs ): value = ensure_aware_datetime ( datetime . strptime ( value , time_format )) gauge ( _ , ( datetime . now ( value . tzinfo ) - value ) . total_seconds (), ** kwargs ) return time_elapsed","title":"time_elapsed()"},{"location":"base/databases/#datadog_checks.base.utils.db.transform.ExtraTransformers","text":"Every column transformer (except tag ) is supported at this level, the only difference being one must set a source to retrieve the desired value. So for example here: columns : - name : foo.bar type : rate extras : - name : foo.current type : gauge source : foo.bar the metric foo.current will be sent as a gauge will the value of foo.bar .","title":"ExtraTransformers"},{"location":"base/databases/#datadog_checks.base.utils.db.transform.ExtraTransformers.expression","text":"This allows the evaluation of a limited subset of Python syntax and built-in functions. columns : - name : disk.total type : gauge - name : disk.used type : gauge extras : - name : disk.free expression : disk.total - disk.used submit_type : gauge For brevity, if the expression attribute exists and type does not then it is assumed the type is expression . The submit_type can be any transformer and any extra options are passed down to it. The result of every expression is stored, so in lieu of a submit_type the above example could also be written as: columns : - name : disk.total type : gauge - name : disk.used type : gauge extras : - name : free expression : disk.total - disk.used - name : disk.free type : gauge source : free The order matters though, so for example the following will fail: columns : - name : disk.total type : gauge - name : disk.used type : gauge extras : - name : disk.free type : gauge source : free - name : free expression : disk.total - disk.used since the source free does not yet exist. Source code in def get_expression ( transformers , name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], Any] \"\"\" This allows the evaluation of a limited subset of Python syntax and built-in functions. ```yaml columns: - name: disk.total type: gauge - name: disk.used type: gauge extras: - name: disk.free expression: disk.total - disk.used submit_type: gauge ``` For brevity, if the `expression` attribute exists and `type` does not then it is assumed the type is `expression`. The `submit_type` can be any transformer and any extra options are passed down to it. The result of every expression is stored, so in lieu of a `submit_type` the above example could also be written as: ```yaml columns: - name: disk.total type: gauge - name: disk.used type: gauge extras: - name: free expression: disk.total - disk.used - name: disk.free type: gauge source: free ``` The order matters though, so for example the following will fail: ```yaml columns: - name: disk.total type: gauge - name: disk.used type: gauge extras: - name: disk.free type: gauge source: free - name: free expression: disk.total - disk.used ``` since the source `free` does not yet exist. \"\"\" available_sources = modifiers . pop ( 'sources' ) expression = modifiers . pop ( 'expression' , None ) if expression is None : raise ValueError ( 'the `expression` parameter is required' ) elif not isinstance ( expression , str ): raise ValueError ( 'the `expression` parameter must be a string' ) elif not expression : raise ValueError ( 'the `expression` parameter must not be empty' ) if not modifiers . pop ( 'verbose' , False ): # Sort the sources in reverse order of length to prevent greedy matching available_sources = sorted ( available_sources , key = lambda s : - len ( s )) # Escape special characters, mostly for the possible dots in metric names available_sources = list ( map ( re . escape , available_sources )) # Finally, utilize the order by relying on the guarantees provided by the alternation operator available_sources = '|' . join ( available_sources ) expression = re . sub ( SOURCE_PATTERN . format ( available_sources ), # Replace by the particular source that matched lambda match_obj : 'SOURCES[\" {} \"]' . format ( match_obj . group ( 1 )), expression , ) expression = compile ( expression , filename = name , mode = 'eval' ) del available_sources if 'submit_type' in modifiers : if modifiers [ 'submit_type' ] not in transformers : raise ValueError ( 'unknown submit_type ` {} `' . format ( modifiers [ 'submit_type' ])) submit_method = transformers [ modifiers . pop ( 'submit_type' )]( transformers , name , ** modifiers ) submit_method = create_extra_transformer ( submit_method ) def execute_expression ( sources , ** kwargs ): result = eval ( expression , ALLOWED_GLOBALS , { 'SOURCES' : sources }) submit_method ( sources , result , ** kwargs ) return result else : def execute_expression ( sources , ** kwargs ): return eval ( expression , ALLOWED_GLOBALS , { 'SOURCES' : sources }) return execute_expression","title":"expression()"},{"location":"base/databases/#datadog_checks.base.utils.db.transform.ExtraTransformers.percent","text":"Send a percentage based on 2 sources as a gauge . The required modifiers are part and total . For example, if you have this configuration: columns : - name : disk.total type : gauge - name : disk.used type : gauge extras : - name : disk.utilized type : percent part : disk.used total : disk.total then the extra metric disk.utilized would be sent as a gauge calculated as disk.used / disk.total * 100 . If the source of total is 0 , then the submitted value will always be sent as 0 too. Source code in def get_percent ( transformers , name , ** modifiers ): # type: (Dict[str, Callable], str, Any) -> Callable[[Any, Any, Any], None] \"\"\" Send a percentage based on 2 sources as a `gauge`. The required modifiers are `part` and `total`. For example, if you have this configuration: ```yaml columns: - name: disk.total type: gauge - name: disk.used type: gauge extras: - name: disk.utilized type: percent part: disk.used total: disk.total ``` then the extra metric `disk.utilized` would be sent as a `gauge` calculated as `disk.used / disk.total * 100`. If the source of `total` is `0`, then the submitted value will always be sent as `0` too. \"\"\" available_sources = modifiers . pop ( 'sources' ) part = modifiers . pop ( 'part' , None ) if part is None : raise ValueError ( 'the `part` parameter is required' ) elif not isinstance ( part , str ): raise ValueError ( 'the `part` parameter must be a string' ) elif part not in available_sources : raise ValueError ( 'the `part` parameter ` {} ` is not an available source' . format ( part )) total = modifiers . pop ( 'total' , None ) if total is None : raise ValueError ( 'the `total` parameter is required' ) elif not isinstance ( total , str ): raise ValueError ( 'the `total` parameter must be a string' ) elif total not in available_sources : raise ValueError ( 'the `total` parameter ` {} ` is not an available source' . format ( total )) del available_sources gauge = transformers [ 'gauge' ]( transformers , name , ** modifiers ) gauge = create_extra_transformer ( gauge ) def percent ( sources , ** kwargs ): gauge ( sources , compute_percent ( sources [ part ], sources [ total ]), ** kwargs ) return percent","title":"percent()"},{"location":"base/http/","text":"HTTP \u00b6 Whenever you need to make HTTP requests, the base class provides a convenience member that has the same interface as the popular requests library and ensures consistent behavior across all integrations. The wrapper automatically parses and uses configuration from the instance , init_config , and Agent config. Also, this is only done once during initialization and cached to reduce the overhead of every call. For example, to make a GET request you would use: response = self . http . get ( url ) and the wrapper will pass the right things to requests . All methods accept optional keyword arguments like stream , etc. Any method-level option will override configuration. So for example if tls_verify was set to false and you do self.http.get(url, verify=True) , then SSL certificates will be verified on that particular request. You can use the keyword argument persist to override persist_connections . There is also support for non-standard or legacy configurations with the HTTP_CONFIG_REMAPPER class attribute. For example: class MyCheck ( AgentCheck ): HTTP_CONFIG_REMAPPER = { 'disable_ssl_validation' : { 'name' : 'tls_verify' , 'default' : False , 'invert' : True , }, ... } ... Support for Unix socket is provided via requests-unixsocket and allows making UDS requests on the unix:// scheme (not supported on Windows until Python adds support for AF_UNIX , see ticket ): url = 'unix:///var/run/docker.sock' response = self . http . get ( url ) Options \u00b6 Some options can be set globally in init_config (with instances taking precedence). For complete documentation of every option, see the associated configuration templates for the instances and init_config sections. auth_token auth_type aws_host aws_region aws_service connect_timeout extra_headers headers kerberos_auth kerberos_cache kerberos_delegate kerberos_force_initiate kerberos_hostname kerberos_keytab kerberos_principal log_requests ntlm_domain password persist_connections proxy read_timeout skip_proxy tls_ca_cert tls_cert tls_use_host_header tls_ignore_warning tls_private_key tls_verify timeout use_legacy_auth_encoding username Future \u00b6 Support for configuring cookies! Since they can be set globally, per-domain, and even per-path, the configuration may be complex if not thought out adequately. We'll discuss options for what that might look like. Only our spark and cisco_aci checks currently set cookies, and that is based on code logic, not configuration.","title":"HTTP"},{"location":"base/http/#http","text":"Whenever you need to make HTTP requests, the base class provides a convenience member that has the same interface as the popular requests library and ensures consistent behavior across all integrations. The wrapper automatically parses and uses configuration from the instance , init_config , and Agent config. Also, this is only done once during initialization and cached to reduce the overhead of every call. For example, to make a GET request you would use: response = self . http . get ( url ) and the wrapper will pass the right things to requests . All methods accept optional keyword arguments like stream , etc. Any method-level option will override configuration. So for example if tls_verify was set to false and you do self.http.get(url, verify=True) , then SSL certificates will be verified on that particular request. You can use the keyword argument persist to override persist_connections . There is also support for non-standard or legacy configurations with the HTTP_CONFIG_REMAPPER class attribute. For example: class MyCheck ( AgentCheck ): HTTP_CONFIG_REMAPPER = { 'disable_ssl_validation' : { 'name' : 'tls_verify' , 'default' : False , 'invert' : True , }, ... } ... Support for Unix socket is provided via requests-unixsocket and allows making UDS requests on the unix:// scheme (not supported on Windows until Python adds support for AF_UNIX , see ticket ): url = 'unix:///var/run/docker.sock' response = self . http . get ( url )","title":"HTTP"},{"location":"base/http/#options","text":"Some options can be set globally in init_config (with instances taking precedence). For complete documentation of every option, see the associated configuration templates for the instances and init_config sections. auth_token auth_type aws_host aws_region aws_service connect_timeout extra_headers headers kerberos_auth kerberos_cache kerberos_delegate kerberos_force_initiate kerberos_hostname kerberos_keytab kerberos_principal log_requests ntlm_domain password persist_connections proxy read_timeout skip_proxy tls_ca_cert tls_cert tls_use_host_header tls_ignore_warning tls_private_key tls_verify timeout use_legacy_auth_encoding username","title":"Options"},{"location":"base/http/#future","text":"Support for configuring cookies! Since they can be set globally, per-domain, and even per-path, the configuration may be complex if not thought out adequately. We'll discuss options for what that might look like. Only our spark and cisco_aci checks currently set cookies, and that is based on code logic, not configuration.","title":"Future"},{"location":"base/metadata/","text":"Metadata \u00b6 Often, you will want to collect mostly unstructured data that doesn't map well to tags, like fine-grained product version information. The base class provides a method that handles such cases. The collected data is captured by flares , displayed on the Agent's status page , and will eventually be queryable in-app . Interface \u00b6 The set_metadata method of the base class updates cached metadata values, which are then sent by the Agent at regular intervals. It requires 2 arguments: name - The name of the metadata. value - The value for the metadata. If name has no transformer defined then the raw value will be submitted and therefore it must be a str . The method also accepts arbitrary keyword arguments that are forwarded to any defined transformers. Transformers \u00b6 Custom transformers may be defined via a class level attribute METADATA_TRANSFORMERS . This is a mapping of metadata names to functions. When you call self . set_metadata ( name , value , ** options ) , if name is in this mapping then the corresponding function will be called with the value , and the return value(s) will be collected instead. Transformer functions must satisfy the following signature: def transform_ < NAME > ( value : Any , options : dict ) -> Union [ str , Dict [ str , str ]]: If the return type is str , then it will be sent as the value for name . If the return type is a mapping type, then each key will be considered a name and will be sent with its ( str ) value. For example, the following would collect an entity named square with a value of '25' : from datadog_checks.base import AgentCheck class AwesomeCheck ( AgentCheck ): METADATA_TRANSFORMERS = { 'square' : lambda value , options : str ( int ( value ) ** 2 ) } def check ( self , instance ): self . set_metadata ( 'square' , '5' ) There are a few default transformers, which can be overridden by custom transformers. transform_config ( self , config , options ) \u00b6 Note You should never need to collect configuration data directly, but instead define 2 class level attributes that will be used as whitelists of fields to allow: METADATA_DEFAULT_CONFIG_INSTANCE METADATA_DEFAULT_CONFIG_INIT_CONFIG This transforms a dict of arbitrary user configuration. A section must be defined indicating what the configuration represents e.g. init_config . The metadata name submitted will become config.
. The value will be a JSON str with the root being an array. There will be one map element for every allowed field. Every map may have 2 entries: is_set - a boolean indicating whether or not the field exists value - the value of the field. this is only set if the field exists and the value is a primitive type ( None | bool | float | int | str ) The allowed fields are derived from the optional whitelist and blacklist . By default, nothing will be sent. User configuration can override defaults allowing complete, granular control of metadata submissions. In any section, one may set metadata_whitelist and/or metadata_blacklist which will override their keyword argument counterparts. In following our standard, blacklists take precedence over whitelists. Blacklists are special in that each item is considered a regular expression. Source code in def transform_config ( self , config , options ): \"\"\" !!! note You should never need to collect configuration data directly, but instead define 2 class level attributes that will be used as whitelists of fields to allow: - `METADATA_DEFAULT_CONFIG_INSTANCE` - `METADATA_DEFAULT_CONFIG_INIT_CONFIG` This transforms a `dict` of arbitrary user configuration. A `section` must be defined indicating what the configuration represents e.g. `init_config`. The metadata name submitted will become `config.
`. The value will be a JSON `str` with the root being an array. There will be one map element for every allowed field. Every map may have 2 entries: 1. `is_set` - a boolean indicating whether or not the field exists 2. `value` - the value of the field. this is only set if the field exists and the value is a primitive type (`None` | `bool` | `float` | `int` | `str`) The allowed fields are derived from the optional `whitelist` and `blacklist`. By default, nothing will be sent. User configuration can override defaults allowing complete, granular control of metadata submissions. In any section, one may set `metadata_whitelist` and/or `metadata_blacklist` which will override their keyword argument counterparts. In following our standard, blacklists take precedence over whitelists. Blacklists are special in that each item is considered a regular expression. \"\"\" section = options . get ( 'section' ) if section is None : raise ValueError ( 'The `section` option is required' ) # Although we define the default fields to send in code i.e. the default whitelist, there # may be cases where a subclass (for example of OpenMetricsBaseCheck) would want to ignore # just a few fields, hence for convenience we have the ability to also pass a blacklist. whitelist = config . get ( 'metadata_whitelist' , options . get ( 'whitelist' )) or () blacklist = config . get ( 'metadata_blacklist' , options . get ( 'blacklist' , DEFAULT_BLACKLIST )) or () blacklist = re . compile ( '|' . join ( blacklist ), re . IGNORECASE ) transformed_data = {} data = [] for field in whitelist : if blacklist . search ( field ): self . logger . debug ( 'Skipping metadata submission of blacklisted field ` %s ` in section ` %s `' , field , section ) continue field_data = {} if field in config : field_data [ 'is_set' ] = True value = config [ field ] if is_primitive ( value ): field_data [ 'value' ] = value else : self . logger . debug ( 'Skipping metadata submission of non-primitive type ` %s ` for field ` %s ` in section ` %s `' , type ( value ) . __name__ , field , section , ) else : field_data [ 'is_set' ] = False data . append ( field_data ) if data : # To avoid the backend having to parse a potentially unbounded number of unique keys, we # send `config.` rather than `config..` since # the number of sections is finite (currently only `instance` and `init_config`). transformed_data [ 'config. {} ' . format ( section )] = json . dumps ( data ) return transformed_data transform_version ( self , version , options ) \u00b6 Transforms a version like 1.2.3-rc.4+5 to its constituent parts. In all cases, the metadata names version.raw and version.scheme will be collected. If a scheme is defined then it will be looked up from our known schemes. If no scheme is defined then it will default to semver . The supported schemes are: regex - A pattern must also be defined. The pattern must be a str or a pre-compiled re.Pattern . Any matching named subgroups will then be sent as version. . In this case, the check name will be used as the value of version.scheme unless final_scheme is also set, which will take precedence. parts - A part_map must also be defined. Each key in this mapping will be considered a name and will be sent with its ( str ) value. semver - This is essentially the same as regex with the pattern set to the standard regular expression for semantic versioning. Taking the example above, calling self . set_metadata ( 'version' , '1.2.3-rc.4+5' ) would produce: name value version.raw 1.2.3-rc.4+5 version.scheme semver version.major 1 version.minor 2 version.patch 3 version.release rc.4 version.build 5 Source code in def transform_version ( self , version , options ): \"\"\" Transforms a version like `1.2.3-rc.4+5` to its constituent parts. In all cases, the metadata names `version.raw` and `version.scheme` will be collected. If a `scheme` is defined then it will be looked up from our known schemes. If no scheme is defined then it will default to `semver`. The supported schemes are: - `regex` - A `pattern` must also be defined. The pattern must be a `str` or a pre-compiled `re.Pattern`. Any matching named subgroups will then be sent as `version.`. In this case, the check name will be used as the value of `version.scheme` unless `final_scheme` is also set, which will take precedence. - `parts` - A `part_map` must also be defined. Each key in this mapping will be considered a `name` and will be sent with its (`str`) value. - `semver` - This is essentially the same as `regex` with the `pattern` set to the standard regular expression for semantic versioning. Taking the example above, calling `#!python self.set_metadata('version', '1.2.3-rc.4+5')` would produce: | name | value | | --- | --- | | `version.raw` | `1.2.3-rc.4+5` | | `version.scheme` | `semver` | | `version.major` | `1` | | `version.minor` | `2` | | `version.patch` | `3` | | `version.release` | `rc.4` | | `version.build` | `5` | \"\"\" scheme , version_parts = parse_version ( version , options ) if scheme == 'regex' or scheme == 'parts' : scheme = options . get ( 'final_scheme' , self . check_name ) data = { 'version. {} ' . format ( part_name ): part_value for part_name , part_value in iteritems ( version_parts )} data [ 'version.raw' ] = version data [ 'version.scheme' ] = scheme return data","title":"Metadata"},{"location":"base/metadata/#metadata","text":"Often, you will want to collect mostly unstructured data that doesn't map well to tags, like fine-grained product version information. The base class provides a method that handles such cases. The collected data is captured by flares , displayed on the Agent's status page , and will eventually be queryable in-app .","title":"Metadata"},{"location":"base/metadata/#interface","text":"The set_metadata method of the base class updates cached metadata values, which are then sent by the Agent at regular intervals. It requires 2 arguments: name - The name of the metadata. value - The value for the metadata. If name has no transformer defined then the raw value will be submitted and therefore it must be a str . The method also accepts arbitrary keyword arguments that are forwarded to any defined transformers.","title":"Interface"},{"location":"base/metadata/#transformers","text":"Custom transformers may be defined via a class level attribute METADATA_TRANSFORMERS . This is a mapping of metadata names to functions. When you call self . set_metadata ( name , value , ** options ) , if name is in this mapping then the corresponding function will be called with the value , and the return value(s) will be collected instead. Transformer functions must satisfy the following signature: def transform_ < NAME > ( value : Any , options : dict ) -> Union [ str , Dict [ str , str ]]: If the return type is str , then it will be sent as the value for name . If the return type is a mapping type, then each key will be considered a name and will be sent with its ( str ) value. For example, the following would collect an entity named square with a value of '25' : from datadog_checks.base import AgentCheck class AwesomeCheck ( AgentCheck ): METADATA_TRANSFORMERS = { 'square' : lambda value , options : str ( int ( value ) ** 2 ) } def check ( self , instance ): self . set_metadata ( 'square' , '5' ) There are a few default transformers, which can be overridden by custom transformers.","title":"Transformers"},{"location":"base/metadata/#datadog_checks.base.utils.metadata.core.MetadataManager.transform_config","text":"Note You should never need to collect configuration data directly, but instead define 2 class level attributes that will be used as whitelists of fields to allow: METADATA_DEFAULT_CONFIG_INSTANCE METADATA_DEFAULT_CONFIG_INIT_CONFIG This transforms a dict of arbitrary user configuration. A section must be defined indicating what the configuration represents e.g. init_config . The metadata name submitted will become config.
. The value will be a JSON str with the root being an array. There will be one map element for every allowed field. Every map may have 2 entries: is_set - a boolean indicating whether or not the field exists value - the value of the field. this is only set if the field exists and the value is a primitive type ( None | bool | float | int | str ) The allowed fields are derived from the optional whitelist and blacklist . By default, nothing will be sent. User configuration can override defaults allowing complete, granular control of metadata submissions. In any section, one may set metadata_whitelist and/or metadata_blacklist which will override their keyword argument counterparts. In following our standard, blacklists take precedence over whitelists. Blacklists are special in that each item is considered a regular expression. Source code in def transform_config ( self , config , options ): \"\"\" !!! note You should never need to collect configuration data directly, but instead define 2 class level attributes that will be used as whitelists of fields to allow: - `METADATA_DEFAULT_CONFIG_INSTANCE` - `METADATA_DEFAULT_CONFIG_INIT_CONFIG` This transforms a `dict` of arbitrary user configuration. A `section` must be defined indicating what the configuration represents e.g. `init_config`. The metadata name submitted will become `config.
`. The value will be a JSON `str` with the root being an array. There will be one map element for every allowed field. Every map may have 2 entries: 1. `is_set` - a boolean indicating whether or not the field exists 2. `value` - the value of the field. this is only set if the field exists and the value is a primitive type (`None` | `bool` | `float` | `int` | `str`) The allowed fields are derived from the optional `whitelist` and `blacklist`. By default, nothing will be sent. User configuration can override defaults allowing complete, granular control of metadata submissions. In any section, one may set `metadata_whitelist` and/or `metadata_blacklist` which will override their keyword argument counterparts. In following our standard, blacklists take precedence over whitelists. Blacklists are special in that each item is considered a regular expression. \"\"\" section = options . get ( 'section' ) if section is None : raise ValueError ( 'The `section` option is required' ) # Although we define the default fields to send in code i.e. the default whitelist, there # may be cases where a subclass (for example of OpenMetricsBaseCheck) would want to ignore # just a few fields, hence for convenience we have the ability to also pass a blacklist. whitelist = config . get ( 'metadata_whitelist' , options . get ( 'whitelist' )) or () blacklist = config . get ( 'metadata_blacklist' , options . get ( 'blacklist' , DEFAULT_BLACKLIST )) or () blacklist = re . compile ( '|' . join ( blacklist ), re . IGNORECASE ) transformed_data = {} data = [] for field in whitelist : if blacklist . search ( field ): self . logger . debug ( 'Skipping metadata submission of blacklisted field ` %s ` in section ` %s `' , field , section ) continue field_data = {} if field in config : field_data [ 'is_set' ] = True value = config [ field ] if is_primitive ( value ): field_data [ 'value' ] = value else : self . logger . debug ( 'Skipping metadata submission of non-primitive type ` %s ` for field ` %s ` in section ` %s `' , type ( value ) . __name__ , field , section , ) else : field_data [ 'is_set' ] = False data . append ( field_data ) if data : # To avoid the backend having to parse a potentially unbounded number of unique keys, we # send `config.` rather than `config..` since # the number of sections is finite (currently only `instance` and `init_config`). transformed_data [ 'config. {} ' . format ( section )] = json . dumps ( data ) return transformed_data","title":"transform_config()"},{"location":"base/metadata/#datadog_checks.base.utils.metadata.core.MetadataManager.transform_version","text":"Transforms a version like 1.2.3-rc.4+5 to its constituent parts. In all cases, the metadata names version.raw and version.scheme will be collected. If a scheme is defined then it will be looked up from our known schemes. If no scheme is defined then it will default to semver . The supported schemes are: regex - A pattern must also be defined. The pattern must be a str or a pre-compiled re.Pattern . Any matching named subgroups will then be sent as version. . In this case, the check name will be used as the value of version.scheme unless final_scheme is also set, which will take precedence. parts - A part_map must also be defined. Each key in this mapping will be considered a name and will be sent with its ( str ) value. semver - This is essentially the same as regex with the pattern set to the standard regular expression for semantic versioning. Taking the example above, calling self . set_metadata ( 'version' , '1.2.3-rc.4+5' ) would produce: name value version.raw 1.2.3-rc.4+5 version.scheme semver version.major 1 version.minor 2 version.patch 3 version.release rc.4 version.build 5 Source code in def transform_version ( self , version , options ): \"\"\" Transforms a version like `1.2.3-rc.4+5` to its constituent parts. In all cases, the metadata names `version.raw` and `version.scheme` will be collected. If a `scheme` is defined then it will be looked up from our known schemes. If no scheme is defined then it will default to `semver`. The supported schemes are: - `regex` - A `pattern` must also be defined. The pattern must be a `str` or a pre-compiled `re.Pattern`. Any matching named subgroups will then be sent as `version.`. In this case, the check name will be used as the value of `version.scheme` unless `final_scheme` is also set, which will take precedence. - `parts` - A `part_map` must also be defined. Each key in this mapping will be considered a `name` and will be sent with its (`str`) value. - `semver` - This is essentially the same as `regex` with the `pattern` set to the standard regular expression for semantic versioning. Taking the example above, calling `#!python self.set_metadata('version', '1.2.3-rc.4+5')` would produce: | name | value | | --- | --- | | `version.raw` | `1.2.3-rc.4+5` | | `version.scheme` | `semver` | | `version.major` | `1` | | `version.minor` | `2` | | `version.patch` | `3` | | `version.release` | `rc.4` | | `version.build` | `5` | \"\"\" scheme , version_parts = parse_version ( version , options ) if scheme == 'regex' or scheme == 'parts' : scheme = options . get ( 'final_scheme' , self . check_name ) data = { 'version. {} ' . format ( part_name ): part_value for part_name , part_value in iteritems ( version_parts )} data [ 'version.raw' ] = version data [ 'version.scheme' ] = scheme return data","title":"transform_version()"},{"location":"base/prometheus/","text":"Prometheus \u00b6 Prometheus is an open source monitoring system for timeseries metric data. Many Datadog integrations collect metrics based on Prometheus exported data sets. Prometheus-based integrations use the OpenMetrics exposition format to collect metrics. Interface \u00b6 All functionality is exposed by the OpenMetricsBaseCheck and OpenMetricsScraperMixin classes. datadog_checks.base.checks.openmetrics.base_check.OpenMetricsBaseCheck \u00b6 OpenMetricsBaseCheck is a class that helps scrape endpoints that emit Prometheus metrics only with YAML configurations. Minimal example configuration: instances: - prometheus_url: http://example.com/endpoint namespace: \"foobar\" metrics: - bar - foo Agent 6 signature: OpenMetricsBaseCheck(name, init_config, instances, default_instances=None, default_namespace=None) __init__ ( self , * args , ** kwargs ) special \u00b6 The base class for any Prometheus-based integration. Source code in def __init__ ( self , * args , ** kwargs ): \"\"\" The base class for any Prometheus-based integration. \"\"\" args = list ( args ) default_instances = kwargs . pop ( 'default_instances' , None ) or {} default_namespace = kwargs . pop ( 'default_namespace' , None ) legacy_kwargs_in_args = args [ 4 :] del args [ 4 :] if len ( legacy_kwargs_in_args ) > 0 : default_instances = legacy_kwargs_in_args [ 0 ] or {} if len ( legacy_kwargs_in_args ) > 1 : default_namespace = legacy_kwargs_in_args [ 1 ] super ( OpenMetricsBaseCheck , self ) . __init__ ( * args , ** kwargs ) self . config_map = {} self . _http_handlers = {} self . default_instances = default_instances self . default_namespace = default_namespace # pre-generate the scraper configurations if 'instances' in kwargs : instances = kwargs [ 'instances' ] elif len ( args ) == 4 : # instances from agent 5 signature instances = args [ 3 ] elif isinstance ( args [ 2 ], ( tuple , list )): # instances from agent 6 signature instances = args [ 2 ] else : instances = None if instances is not None : for instance in instances : self . get_scraper_config ( instance ) check ( self , instance ) \u00b6 Source code in def check ( self , instance ): # Get the configuration for this specific instance scraper_config = self . get_scraper_config ( instance ) # We should be specifying metrics for checks that are vanilla OpenMetricsBaseCheck-based if not scraper_config [ 'metrics_mapper' ]: raise CheckException ( \"You have to collect at least one metric from the endpoint: {} \" . format ( scraper_config [ 'prometheus_url' ]) ) self . process ( scraper_config ) get_scraper_config ( self , instance ) \u00b6 Validates the instance configuration and creates a scraper configuration for a new instance. If the endpoint already has a corresponding configuration, return the cached configuration. Source code in def get_scraper_config ( self , instance ): \"\"\" Validates the instance configuration and creates a scraper configuration for a new instance. If the endpoint already has a corresponding configuration, return the cached configuration. \"\"\" endpoint = instance . get ( 'prometheus_url' ) if endpoint is None : raise CheckException ( \"Unable to find prometheus URL in config file.\" ) # If we've already created the corresponding scraper configuration, return it if endpoint in self . config_map : return self . config_map [ endpoint ] # Otherwise, we create the scraper configuration config = self . create_scraper_configuration ( instance ) # Add this configuration to the config_map self . config_map [ endpoint ] = config return config datadog_checks.base.checks.openmetrics.mixins.OpenMetricsScraperMixin \u00b6 create_scraper_configuration ( self , instance = None ) \u00b6 Creates a scraper configuration. If instance does not specify a value for a configuration option, the value will default to the init_config . Otherwise, the default_instance value will be used. A default mixin configuration will be returned if there is no instance. Source code in def create_scraper_configuration ( self , instance = None ): \"\"\" Creates a scraper configuration. If instance does not specify a value for a configuration option, the value will default to the `init_config`. Otherwise, the `default_instance` value will be used. A default mixin configuration will be returned if there is no instance. \"\"\" if 'openmetrics_endpoint' in instance : raise CheckException ( 'The setting `openmetrics_endpoint` is only available for Agent version 7 or later' ) # We can choose to create a default mixin configuration for an empty instance if instance is None : instance = {} # Supports new configuration options config = copy . deepcopy ( instance ) # Set the endpoint endpoint = instance . get ( 'prometheus_url' ) if instance and endpoint is None : raise CheckException ( \"You have to define a prometheus_url for each prometheus instance\" ) config [ 'prometheus_url' ] = endpoint # `NAMESPACE` is the prefix metrics will have. Need to be hardcoded in the # child check class. namespace = instance . get ( 'namespace' ) # Check if we have a namespace if instance and namespace is None : if self . default_namespace is None : raise CheckException ( \"You have to define a namespace for each prometheus check\" ) namespace = self . default_namespace config [ 'namespace' ] = namespace # Retrieve potential default instance settings for the namespace default_instance = self . default_instances . get ( namespace , {}) # `metrics_mapper` is a dictionary where the keys are the metrics to capture # and the values are the corresponding metrics names to have in datadog. # Note: it is empty in the parent class but will need to be # overloaded/hardcoded in the final check not to be counted as custom metric. # Metrics are preprocessed if no mapping metrics_mapper = {} # We merge list and dictionaries from optional defaults & instance settings metrics = default_instance . get ( 'metrics' , []) + instance . get ( 'metrics' , []) for metric in metrics : if isinstance ( metric , string_types ): metrics_mapper [ metric ] = metric else : metrics_mapper . update ( metric ) config [ 'metrics_mapper' ] = metrics_mapper # `_wildcards_re` is a Pattern object used to match metric wildcards config [ '_wildcards_re' ] = None wildcards = set () for metric in config [ 'metrics_mapper' ]: if \"*\" in metric : wildcards . add ( translate ( metric )) if wildcards : config [ '_wildcards_re' ] = compile ( '|' . join ( wildcards )) # `prometheus_metrics_prefix` allows to specify a prefix that all # prometheus metrics should have. This can be used when the prometheus # endpoint we are scrapping allows to add a custom prefix to it's # metrics. config [ 'prometheus_metrics_prefix' ] = instance . get ( 'prometheus_metrics_prefix' , default_instance . get ( 'prometheus_metrics_prefix' , '' ) ) # `label_joins` holds the configuration for extracting 1:1 labels from # a target metric to all metric matching the label, example: # self.label_joins = { # 'kube_pod_info': { # 'labels_to_match': ['pod'], # 'labels_to_get': ['node', 'host_ip'] # } # } config [ 'label_joins' ] = default_instance . get ( 'label_joins' , {}) config [ 'label_joins' ] . update ( instance . get ( 'label_joins' , {})) # `_label_mapping` holds the additionals label info to add for a specific # label value, example: # self._label_mapping = { # 'pod': { # 'dd-agent-9s1l1': { # \"node\": \"yolo\", # \"host_ip\": \"yey\" # } # } # } config [ '_label_mapping' ] = {} # `_active_label_mapping` holds a dictionary of label values found during the run # to cleanup the label_mapping of unused values, example: # self._active_label_mapping = { # 'pod': { # 'dd-agent-9s1l1': True # } # } config [ '_active_label_mapping' ] = {} # `_watched_labels` holds the sets of labels to watch for enrichment config [ '_watched_labels' ] = {} config [ '_dry_run' ] = True # Some metrics are ignored because they are duplicates or introduce a # very high cardinality. Metrics included in this list will be silently # skipped without a 'Unable to handle metric' debug line in the logs config [ 'ignore_metrics' ] = instance . get ( 'ignore_metrics' , default_instance . get ( 'ignore_metrics' , [])) config [ '_ignored_metrics' ] = set () # `_ignored_re` is a Pattern object used to match ignored metric patterns config [ '_ignored_re' ] = None ignored_patterns = set () # Separate ignored metric names and ignored patterns in different sets for faster lookup later for metric in config [ 'ignore_metrics' ]: if '*' in metric : ignored_patterns . add ( translate ( metric )) else : config [ '_ignored_metrics' ] . add ( metric ) if ignored_patterns : config [ '_ignored_re' ] = compile ( '|' . join ( ignored_patterns )) # Ignore metrics based on label keys or specific label values config [ 'ignore_metrics_by_labels' ] = instance . get ( 'ignore_metrics_by_labels' , default_instance . get ( 'ignore_metrics_by_labels' , {}) ) # If you want to send the buckets as tagged values when dealing with histograms, # set send_histograms_buckets to True, set to False otherwise. config [ 'send_histograms_buckets' ] = is_affirmative ( instance . get ( 'send_histograms_buckets' , default_instance . get ( 'send_histograms_buckets' , True )) ) # If you want the bucket to be non cumulative and to come with upper/lower bound tags # set non_cumulative_buckets to True, enabled when distribution metrics are enabled. config [ 'non_cumulative_buckets' ] = is_affirmative ( instance . get ( 'non_cumulative_buckets' , default_instance . get ( 'non_cumulative_buckets' , False )) ) # Send histograms as datadog distribution metrics config [ 'send_distribution_buckets' ] = is_affirmative ( instance . get ( 'send_distribution_buckets' , default_instance . get ( 'send_distribution_buckets' , False )) ) # Non cumulative buckets are mandatory for distribution metrics if config [ 'send_distribution_buckets' ] is True : config [ 'non_cumulative_buckets' ] = True # If you want to send `counter` metrics as monotonic counts, set this value to True. # Set to False if you want to instead send those metrics as `gauge`. config [ 'send_monotonic_counter' ] = is_affirmative ( instance . get ( 'send_monotonic_counter' , default_instance . get ( 'send_monotonic_counter' , True )) ) # If you want `counter` metrics to be submitted as both gauges and monotonic counts. Set this value to True. config [ 'send_monotonic_with_gauge' ] = is_affirmative ( instance . get ( 'send_monotonic_with_gauge' , default_instance . get ( 'send_monotonic_with_gauge' , False )) ) config [ 'send_distribution_counts_as_monotonic' ] = is_affirmative ( instance . get ( 'send_distribution_counts_as_monotonic' , default_instance . get ( 'send_distribution_counts_as_monotonic' , False ), ) ) config [ 'send_distribution_sums_as_monotonic' ] = is_affirmative ( instance . get ( 'send_distribution_sums_as_monotonic' , default_instance . get ( 'send_distribution_sums_as_monotonic' , False ), ) ) # If the `labels_mapper` dictionary is provided, the metrics labels names # in the `labels_mapper` will use the corresponding value as tag name # when sending the gauges. config [ 'labels_mapper' ] = default_instance . get ( 'labels_mapper' , {}) config [ 'labels_mapper' ] . update ( instance . get ( 'labels_mapper' , {})) # Rename bucket \"le\" label to \"upper_bound\" config [ 'labels_mapper' ][ 'le' ] = 'upper_bound' # `exclude_labels` is an array of labels names to exclude. Those labels # will just not be added as tags when submitting the metric. config [ 'exclude_labels' ] = default_instance . get ( 'exclude_labels' , []) + instance . get ( 'exclude_labels' , []) # `type_overrides` is a dictionary where the keys are prometheus metric names # and the values are a metric type (name as string) to use instead of the one # listed in the payload. It can be used to force a type on untyped metrics. # Note: it is empty in the parent class but will need to be # overloaded/hardcoded in the final check not to be counted as custom metric. config [ 'type_overrides' ] = default_instance . get ( 'type_overrides' , {}) config [ 'type_overrides' ] . update ( instance . get ( 'type_overrides' , {})) # `_type_override_patterns` is a dictionary where we store Pattern objects # that match metric names as keys, and their corresponding metric type overrrides as values. config [ '_type_override_patterns' ] = {} with_wildcards = set () for metric , type in iteritems ( config [ 'type_overrides' ]): if '*' in metric : config [ '_type_override_patterns' ][ compile ( translate ( metric ))] = type with_wildcards . add ( metric ) # cleanup metric names with wildcards from the 'type_overrides' dict for metric in with_wildcards : del config [ 'type_overrides' ][ metric ] # Some metrics are retrieved from differents hosts and often # a label can hold this information, this transfers it to the hostname config [ 'label_to_hostname' ] = instance . get ( 'label_to_hostname' , default_instance . get ( 'label_to_hostname' , None )) # In combination to label_as_hostname, allows to add a common suffix to the hostnames # submitted. This can be used for instance to discriminate hosts between clusters. config [ 'label_to_hostname_suffix' ] = instance . get ( 'label_to_hostname_suffix' , default_instance . get ( 'label_to_hostname_suffix' , None ) ) # Add a 'health' service check for the prometheus endpoint config [ 'health_service_check' ] = is_affirmative ( instance . get ( 'health_service_check' , default_instance . get ( 'health_service_check' , True )) ) # Can either be only the path to the certificate and thus you should specify the private key # or it can be the path to a file containing both the certificate & the private key config [ 'ssl_cert' ] = instance . get ( 'ssl_cert' , default_instance . get ( 'ssl_cert' , None )) # Needed if the certificate does not include the private key # # /!\\ The private key to your local certificate must be unencrypted. # Currently, Requests does not support using encrypted keys. config [ 'ssl_private_key' ] = instance . get ( 'ssl_private_key' , default_instance . get ( 'ssl_private_key' , None )) # The path to the trusted CA used for generating custom certificates config [ 'ssl_ca_cert' ] = instance . get ( 'ssl_ca_cert' , default_instance . get ( 'ssl_ca_cert' , None )) # Whether or not to validate SSL certificates config [ 'ssl_verify' ] = is_affirmative ( instance . get ( 'ssl_verify' , default_instance . get ( 'ssl_verify' , True ))) # Extra http headers to be sent when polling endpoint config [ 'extra_headers' ] = default_instance . get ( 'extra_headers' , {}) config [ 'extra_headers' ] . update ( instance . get ( 'extra_headers' , {})) # Timeout used during the network request config [ 'prometheus_timeout' ] = instance . get ( 'prometheus_timeout' , default_instance . get ( 'prometheus_timeout' , 10 ) ) # Authentication used when polling endpoint config [ 'username' ] = instance . get ( 'username' , default_instance . get ( 'username' , None )) config [ 'password' ] = instance . get ( 'password' , default_instance . get ( 'password' , None )) # Custom tags that will be sent with each metric config [ 'custom_tags' ] = instance . get ( 'tags' , []) # Some tags can be ignored to reduce the cardinality. # This can be useful for cost optimization in containerized environments # when the openmetrics check is configured to collect custom metrics. # Even when the Agent's Tagger is configured to add low-cardinality tags only, # some tags can still generate unwanted metric contexts (e.g pod annotations as tags). ignore_tags = instance . get ( 'ignore_tags' , default_instance . get ( 'ignore_tags' , [])) if ignore_tags : ignored_tags_re = compile ( '|' . join ( set ( ignore_tags ))) config [ 'custom_tags' ] = [ tag for tag in config [ 'custom_tags' ] if not ignored_tags_re . search ( tag )] # Additional tags to be sent with each metric config [ '_metric_tags' ] = [] # List of strings to filter the input text payload on. If any line contains # one of these strings, it will be filtered out before being parsed. # INTERNAL FEATURE, might be removed in future versions config [ '_text_filter_blacklist' ] = [] # Whether or not to use the service account bearer token for authentication # if 'bearer_token_path' is not set, we use /var/run/secrets/kubernetes.io/serviceaccount/token # as a default path to get the token. config [ 'bearer_token_auth' ] = is_affirmative ( instance . get ( 'bearer_token_auth' , default_instance . get ( 'bearer_token_auth' , False )) ) # Can be used to get a service account bearer token from files # other than /var/run/secrets/kubernetes.io/serviceaccount/token # 'bearer_token_auth' should be enabled. config [ 'bearer_token_path' ] = instance . get ( 'bearer_token_path' , default_instance . get ( 'bearer_token_path' , None )) # The service account bearer token to be used for authentication config [ '_bearer_token' ] = self . _get_bearer_token ( config [ 'bearer_token_auth' ], config [ 'bearer_token_path' ]) config [ 'telemetry' ] = is_affirmative ( instance . get ( 'telemetry' , default_instance . get ( 'telemetry' , False ))) # The metric name services use to indicate build information config [ 'metadata_metric_name' ] = instance . get ( 'metadata_metric_name' , default_instance . get ( 'metadata_metric_name' ) ) # Map of metadata key names to label names config [ 'metadata_label_map' ] = instance . get ( 'metadata_label_map' , default_instance . get ( 'metadata_label_map' , {}) ) config [ '_default_metric_transformers' ] = {} if config [ 'metadata_metric_name' ] and config [ 'metadata_label_map' ]: config [ '_default_metric_transformers' ][ config [ 'metadata_metric_name' ]] = self . transform_metadata # Whether or not to enable flushing of the first value of monotonic counts config [ '_successfully_executed' ] = False return config parse_metric_family ( self , response , scraper_config ) \u00b6 Parse the MetricFamily from a valid requests.Response object to provide a MetricFamily object. The text format uses iter_lines() generator. Source code in def parse_metric_family ( self , response , scraper_config ): \"\"\" Parse the MetricFamily from a valid `requests.Response` object to provide a MetricFamily object. The text format uses iter_lines() generator. \"\"\" if response . encoding is None : response . encoding = 'utf-8' input_gen = response . iter_lines ( chunk_size = self . REQUESTS_CHUNK_SIZE , decode_unicode = True ) if scraper_config [ '_text_filter_blacklist' ]: input_gen = self . _text_filter_input ( input_gen , scraper_config ) for metric in text_fd_to_metric_families ( input_gen ): self . _send_telemetry_counter ( self . TELEMETRY_COUNTER_METRICS_INPUT_COUNT , len ( metric . samples ), scraper_config ) type_override = scraper_config [ 'type_overrides' ] . get ( metric . name ) if type_override : metric . type = type_override elif scraper_config [ '_type_override_patterns' ]: for pattern , new_type in iteritems ( scraper_config [ '_type_override_patterns' ]): if pattern . search ( metric . name ): metric . type = new_type break if metric . type not in self . METRIC_TYPES : continue metric . name = self . _remove_metric_prefix ( metric . name , scraper_config ) yield metric poll ( self , scraper_config , headers = None ) \u00b6 Returns a valid requests.Response , otherwise raise requests.HTTPError if the status code of the response isn't valid - see response.raise_for_status() The caller needs to close the requests.Response. Custom headers can be added to the default headers. Source code in def poll ( self , scraper_config , headers = None ): \"\"\" Returns a valid `requests.Response`, otherwise raise requests.HTTPError if the status code of the response isn't valid - see `response.raise_for_status()` The caller needs to close the requests.Response. Custom headers can be added to the default headers. \"\"\" endpoint = scraper_config . get ( 'prometheus_url' ) # Should we send a service check for when we make a request health_service_check = scraper_config [ 'health_service_check' ] service_check_name = self . _metric_name_with_namespace ( 'prometheus.health' , scraper_config ) service_check_tags = [ 'endpoint: {} ' . format ( endpoint )] service_check_tags . extend ( scraper_config [ 'custom_tags' ]) try : response = self . send_request ( endpoint , scraper_config , headers ) except requests . exceptions . SSLError : self . log . error ( \"Invalid SSL settings for requesting %s endpoint\" , endpoint ) raise except IOError : if health_service_check : self . service_check ( service_check_name , AgentCheck . CRITICAL , tags = service_check_tags ) raise try : response . raise_for_status () if health_service_check : self . service_check ( service_check_name , AgentCheck . OK , tags = service_check_tags ) return response except requests . HTTPError : response . close () if health_service_check : self . service_check ( service_check_name , AgentCheck . CRITICAL , tags = service_check_tags ) raise process ( self , scraper_config , metric_transformers = None ) \u00b6 Polls the data from Prometheus and submits them as Datadog metrics. endpoint is the metrics endpoint to use to poll metrics from Prometheus Note that if the instance has a tags attribute, it will be pushed automatically as additional custom tags and added to the metrics Source code in def process ( self , scraper_config , metric_transformers = None ): \"\"\" Polls the data from Prometheus and submits them as Datadog metrics. `endpoint` is the metrics endpoint to use to poll metrics from Prometheus Note that if the instance has a `tags` attribute, it will be pushed automatically as additional custom tags and added to the metrics \"\"\" transformers = scraper_config [ '_default_metric_transformers' ] . copy () if metric_transformers : transformers . update ( metric_transformers ) for metric in self . scrape_metrics ( scraper_config ): self . process_metric ( metric , scraper_config , metric_transformers = transformers ) scraper_config [ '_successfully_executed' ] = True process_metric ( self , metric , scraper_config , metric_transformers = None ) \u00b6 Handle a Prometheus metric according to the following flow: - search scraper_config['metrics_mapper'] for a prometheus.metric to datadog.metric mapping - call check method with the same name as the metric - log info if none of the above worked metric_transformers is a dict of : Source code in def process_metric ( self , metric , scraper_config , metric_transformers = None ): \"\"\" Handle a Prometheus metric according to the following flow: - search `scraper_config['metrics_mapper']` for a prometheus.metric to datadog.metric mapping - call check method with the same name as the metric - log info if none of the above worked `metric_transformers` is a dict of `:` \"\"\" # If targeted metric, store labels self . _store_labels ( metric , scraper_config ) if scraper_config [ 'ignore_metrics' ]: if metric . name in scraper_config [ '_ignored_metrics' ]: self . _send_telemetry_counter ( self . TELEMETRY_COUNTER_METRICS_IGNORE_COUNT , len ( metric . samples ), scraper_config ) return # Ignore the metric if scraper_config [ '_ignored_re' ] and scraper_config [ '_ignored_re' ] . search ( metric . name ): # Metric must be ignored scraper_config [ '_ignored_metrics' ] . add ( metric . name ) self . _send_telemetry_counter ( self . TELEMETRY_COUNTER_METRICS_IGNORE_COUNT , len ( metric . samples ), scraper_config ) return # Ignore the metric self . _send_telemetry_counter ( self . TELEMETRY_COUNTER_METRICS_PROCESS_COUNT , len ( metric . samples ), scraper_config ) if self . _filter_metric ( metric , scraper_config ): return # Ignore the metric # Filter metric to see if we can enrich with joined labels self . _join_labels ( metric , scraper_config ) if scraper_config [ '_dry_run' ]: return try : self . submit_openmetric ( scraper_config [ 'metrics_mapper' ][ metric . name ], metric , scraper_config ) except KeyError : if metric_transformers is not None and metric . name in metric_transformers : try : # Get the transformer function for this specific metric transformer = metric_transformers [ metric . name ] transformer ( metric , scraper_config ) except Exception as err : self . log . warning ( 'Error handling metric: %s - error: %s ' , metric . name , err ) return # check for wilcards in transformers for transformer_name , transformer in iteritems ( metric_transformers ): if transformer_name . endswith ( '*' ) and metric . name . startswith ( transformer_name [: - 1 ]): transformer ( metric , scraper_config , transformer_name ) # try matching wildcards if scraper_config [ '_wildcards_re' ] and scraper_config [ '_wildcards_re' ] . search ( metric . name ): self . submit_openmetric ( metric . name , metric , scraper_config ) return self . log . debug ( 'Skipping metric ` %s ` as it is not defined in the metrics mapper, ' 'has no transformer function, nor does it match any wildcards.' , metric . name , ) scrape_metrics ( self , scraper_config ) \u00b6 Poll the data from Prometheus and return the metrics as a generator. Source code in def scrape_metrics ( self , scraper_config ): \"\"\" Poll the data from Prometheus and return the metrics as a generator. \"\"\" response = self . poll ( scraper_config ) if scraper_config [ 'telemetry' ]: if 'content-length' in response . headers : content_len = int ( response . headers [ 'content-length' ]) else : content_len = len ( response . content ) self . _send_telemetry_gauge ( self . TELEMETRY_GAUGE_MESSAGE_SIZE , content_len , scraper_config ) try : # no dry run if no label joins if not scraper_config [ 'label_joins' ]: scraper_config [ '_dry_run' ] = False elif not scraper_config [ '_watched_labels' ]: watched = scraper_config [ '_watched_labels' ] watched [ 'sets' ] = {} watched [ 'keys' ] = {} watched [ 'singles' ] = set () for key , val in iteritems ( scraper_config [ 'label_joins' ]): labels = [] if 'labels_to_match' in val : labels = val [ 'labels_to_match' ] elif 'label_to_match' in val : self . log . warning ( \"`label_to_match` is being deprecated, please use `labels_to_match`\" ) if isinstance ( val [ 'label_to_match' ], list ): labels = val [ 'label_to_match' ] else : labels = [ val [ 'label_to_match' ]] if labels : s = frozenset ( labels ) watched [ 'sets' ][ key ] = s watched [ 'keys' ][ key ] = ',' . join ( s ) if len ( labels ) == 1 : watched [ 'singles' ] . add ( labels [ 0 ]) for metric in self . parse_metric_family ( response , scraper_config ): yield metric # Set dry run off scraper_config [ '_dry_run' ] = False # Garbage collect unused mapping and reset active labels for metric , mapping in list ( iteritems ( scraper_config [ '_label_mapping' ])): for key in list ( mapping ): if ( metric in scraper_config [ '_active_label_mapping' ] and key not in scraper_config [ '_active_label_mapping' ][ metric ] ): del scraper_config [ '_label_mapping' ][ metric ][ key ] scraper_config [ '_active_label_mapping' ] = {} finally : response . close () submit_openmetric ( self , metric_name , metric , scraper_config , hostname = None ) \u00b6 For each sample in the metric, report it as a gauge with all labels as tags except if a labels dict is passed, in which case keys are label names we'll extract and corresponding values are tag names we'll use (eg: {'node': 'node'}). Histograms generate a set of values instead of a unique metric. send_histograms_buckets is used to specify if you want to send the buckets as tagged values when dealing with histograms. custom_tags is an array of tag:value that will be added to the metric when sending the gauge to Datadog. Source code in def submit_openmetric ( self , metric_name , metric , scraper_config , hostname = None ): \"\"\" For each sample in the metric, report it as a gauge with all labels as tags except if a labels `dict` is passed, in which case keys are label names we'll extract and corresponding values are tag names we'll use (eg: {'node': 'node'}). Histograms generate a set of values instead of a unique metric. `send_histograms_buckets` is used to specify if you want to send the buckets as tagged values when dealing with histograms. `custom_tags` is an array of `tag:value` that will be added to the metric when sending the gauge to Datadog. \"\"\" if metric . type in [ \"gauge\" , \"counter\" , \"rate\" ]: metric_name_with_namespace = self . _metric_name_with_namespace ( metric_name , scraper_config ) for sample in metric . samples : if self . _ignore_metrics_by_label ( scraper_config , metric_name , sample ): continue val = sample [ self . SAMPLE_VALUE ] if not self . _is_value_valid ( val ): self . log . debug ( \"Metric value is not supported for metric %s \" , sample [ self . SAMPLE_NAME ]) continue custom_hostname = self . _get_hostname ( hostname , sample , scraper_config ) # Determine the tags to send tags = self . _metric_tags ( metric_name , val , sample , scraper_config , hostname = custom_hostname ) if metric . type == \"counter\" and scraper_config [ 'send_monotonic_counter' ]: self . monotonic_count ( metric_name_with_namespace , val , tags = tags , hostname = custom_hostname , flush_first_value = scraper_config [ '_successfully_executed' ], ) elif metric . type == \"rate\" : self . rate ( metric_name_with_namespace , val , tags = tags , hostname = custom_hostname ) else : self . gauge ( metric_name_with_namespace , val , tags = tags , hostname = custom_hostname ) # Metric is a \"counter\" but legacy behavior has \"send_as_monotonic\" defaulted to False # Submit metric as monotonic_count with appended name if metric . type == \"counter\" and scraper_config [ 'send_monotonic_with_gauge' ]: self . monotonic_count ( metric_name_with_namespace + '.total' , val , tags = tags , hostname = custom_hostname , flush_first_value = scraper_config [ '_successfully_executed' ], ) elif metric . type == \"histogram\" : self . _submit_gauges_from_histogram ( metric_name , metric , scraper_config ) elif metric . type == \"summary\" : self . _submit_gauges_from_summary ( metric_name , metric , scraper_config ) else : self . log . error ( \"Metric type %s unsupported for metric %s .\" , metric . type , metric_name ) Options \u00b6 Some options can be set globally in init_config (with instances taking precedence). For complete documentation of every option, see the associated configuration templates for the instances and init_config sections. All HTTP options are also supported. prometheus_url namespace metrics prometheus_metrics_prefix health_service_check label_to_hostname label_joins labels_mapper type_overrides send_histograms_buckets send_distribution_buckets send_monotonic_counter send_monotonic_with_gauge send_distribution_counts_as_monotonic send_distribution_sums_as_monotonic exclude_labels bearer_token_auth bearer_token_path ignore_metrics Prometheus to Datadog metric types \u00b6 The Openmetrics Base Check supports various configurations for submitting Prometheus metrics to Datadog. We currently support Prometheus gauge , counter , histogram , and summary metric types. Gauge \u00b6 A gauge metric represents a single numerical value that can arbitrarily go up or down. Prometheus gauge metrics are submitted as Datadog gauge metrics. Counter \u00b6 A Prometheus counter is a cumulative metric that represents a single monotonically increasing counter whose value can only increase or be reset to zero on restart. Config Option Value Datadog Metric Submitted send_monotonic_counter true (default) monotonic_count false gauge Histogram \u00b6 A Prometheus histogram samples observations and counts them in configurable buckets along with a sum of all observed values. Histogram metrics ending in: _sum represent the total sum of all observed values. Generally sums are like counters but it's also possible for a negative observation which would not behave like a typical always increasing counter. _count represent the total number of events that have been observed. _bucket represent the cumulative counters for the observation buckets. Note that buckets are only submitted if send_histogram_buckets is enabled. Subtype Config Option Value Datadog Metric Submitted send_distribution_buckets true The entire histogram can be submitted as a single distribution metric . If the option is enabled, none of the subtype metrics will be submitted. _sum send_distribution_sums_as_monotonic false (default) gauge true monotonic_count _count send_distribution_counts_as_monotonic false (default) gauge true monotonic_count _bucket non_cumulative_buckets false (default) gauge true monotonic_count under .count metric name if send_distribution_counts_as_monotonic is enabled. Otherwise, gauge . Summary \u00b6 Prometheus summary metrics are similar to histograms but allow configurable quantiles. Summary metrics ending in: _sum represent the total sum of all observed values. Generally sums are like counters but it's also possible for a negative observation which would not behave like a typical always increasing counter. _count represent the total number of events that have been observed. metrics with labels like {quantile=\"<\u03c6>\"} represent the streaming quantiles of observed events. Subtype Config Option Value Datadog Metric Submitted _sum send_distribution_sums_as_monotonic false (default) gauge true monotonic_count _count send_distribution_counts_as_monotonic false (default) gauge true monotonic_count _quantile gauge","title":"Prometheus"},{"location":"base/prometheus/#prometheus","text":"Prometheus is an open source monitoring system for timeseries metric data. Many Datadog integrations collect metrics based on Prometheus exported data sets. Prometheus-based integrations use the OpenMetrics exposition format to collect metrics.","title":"Prometheus"},{"location":"base/prometheus/#interface","text":"All functionality is exposed by the OpenMetricsBaseCheck and OpenMetricsScraperMixin classes.","title":"Interface"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.base_check.OpenMetricsBaseCheck","text":"OpenMetricsBaseCheck is a class that helps scrape endpoints that emit Prometheus metrics only with YAML configurations. Minimal example configuration: instances: - prometheus_url: http://example.com/endpoint namespace: \"foobar\" metrics: - bar - foo Agent 6 signature: OpenMetricsBaseCheck(name, init_config, instances, default_instances=None, default_namespace=None)","title":"OpenMetricsBaseCheck"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.base_check.OpenMetricsBaseCheck.__init__","text":"The base class for any Prometheus-based integration. Source code in def __init__ ( self , * args , ** kwargs ): \"\"\" The base class for any Prometheus-based integration. \"\"\" args = list ( args ) default_instances = kwargs . pop ( 'default_instances' , None ) or {} default_namespace = kwargs . pop ( 'default_namespace' , None ) legacy_kwargs_in_args = args [ 4 :] del args [ 4 :] if len ( legacy_kwargs_in_args ) > 0 : default_instances = legacy_kwargs_in_args [ 0 ] or {} if len ( legacy_kwargs_in_args ) > 1 : default_namespace = legacy_kwargs_in_args [ 1 ] super ( OpenMetricsBaseCheck , self ) . __init__ ( * args , ** kwargs ) self . config_map = {} self . _http_handlers = {} self . default_instances = default_instances self . default_namespace = default_namespace # pre-generate the scraper configurations if 'instances' in kwargs : instances = kwargs [ 'instances' ] elif len ( args ) == 4 : # instances from agent 5 signature instances = args [ 3 ] elif isinstance ( args [ 2 ], ( tuple , list )): # instances from agent 6 signature instances = args [ 2 ] else : instances = None if instances is not None : for instance in instances : self . get_scraper_config ( instance )","title":"__init__()"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.base_check.OpenMetricsBaseCheck.check","text":"Source code in def check ( self , instance ): # Get the configuration for this specific instance scraper_config = self . get_scraper_config ( instance ) # We should be specifying metrics for checks that are vanilla OpenMetricsBaseCheck-based if not scraper_config [ 'metrics_mapper' ]: raise CheckException ( \"You have to collect at least one metric from the endpoint: {} \" . format ( scraper_config [ 'prometheus_url' ]) ) self . process ( scraper_config )","title":"check()"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.base_check.OpenMetricsBaseCheck.get_scraper_config","text":"Validates the instance configuration and creates a scraper configuration for a new instance. If the endpoint already has a corresponding configuration, return the cached configuration. Source code in def get_scraper_config ( self , instance ): \"\"\" Validates the instance configuration and creates a scraper configuration for a new instance. If the endpoint already has a corresponding configuration, return the cached configuration. \"\"\" endpoint = instance . get ( 'prometheus_url' ) if endpoint is None : raise CheckException ( \"Unable to find prometheus URL in config file.\" ) # If we've already created the corresponding scraper configuration, return it if endpoint in self . config_map : return self . config_map [ endpoint ] # Otherwise, we create the scraper configuration config = self . create_scraper_configuration ( instance ) # Add this configuration to the config_map self . config_map [ endpoint ] = config return config","title":"get_scraper_config()"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.mixins.OpenMetricsScraperMixin","text":"","title":"OpenMetricsScraperMixin"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.mixins.OpenMetricsScraperMixin.create_scraper_configuration","text":"Creates a scraper configuration. If instance does not specify a value for a configuration option, the value will default to the init_config . Otherwise, the default_instance value will be used. A default mixin configuration will be returned if there is no instance. Source code in def create_scraper_configuration ( self , instance = None ): \"\"\" Creates a scraper configuration. If instance does not specify a value for a configuration option, the value will default to the `init_config`. Otherwise, the `default_instance` value will be used. A default mixin configuration will be returned if there is no instance. \"\"\" if 'openmetrics_endpoint' in instance : raise CheckException ( 'The setting `openmetrics_endpoint` is only available for Agent version 7 or later' ) # We can choose to create a default mixin configuration for an empty instance if instance is None : instance = {} # Supports new configuration options config = copy . deepcopy ( instance ) # Set the endpoint endpoint = instance . get ( 'prometheus_url' ) if instance and endpoint is None : raise CheckException ( \"You have to define a prometheus_url for each prometheus instance\" ) config [ 'prometheus_url' ] = endpoint # `NAMESPACE` is the prefix metrics will have. Need to be hardcoded in the # child check class. namespace = instance . get ( 'namespace' ) # Check if we have a namespace if instance and namespace is None : if self . default_namespace is None : raise CheckException ( \"You have to define a namespace for each prometheus check\" ) namespace = self . default_namespace config [ 'namespace' ] = namespace # Retrieve potential default instance settings for the namespace default_instance = self . default_instances . get ( namespace , {}) # `metrics_mapper` is a dictionary where the keys are the metrics to capture # and the values are the corresponding metrics names to have in datadog. # Note: it is empty in the parent class but will need to be # overloaded/hardcoded in the final check not to be counted as custom metric. # Metrics are preprocessed if no mapping metrics_mapper = {} # We merge list and dictionaries from optional defaults & instance settings metrics = default_instance . get ( 'metrics' , []) + instance . get ( 'metrics' , []) for metric in metrics : if isinstance ( metric , string_types ): metrics_mapper [ metric ] = metric else : metrics_mapper . update ( metric ) config [ 'metrics_mapper' ] = metrics_mapper # `_wildcards_re` is a Pattern object used to match metric wildcards config [ '_wildcards_re' ] = None wildcards = set () for metric in config [ 'metrics_mapper' ]: if \"*\" in metric : wildcards . add ( translate ( metric )) if wildcards : config [ '_wildcards_re' ] = compile ( '|' . join ( wildcards )) # `prometheus_metrics_prefix` allows to specify a prefix that all # prometheus metrics should have. This can be used when the prometheus # endpoint we are scrapping allows to add a custom prefix to it's # metrics. config [ 'prometheus_metrics_prefix' ] = instance . get ( 'prometheus_metrics_prefix' , default_instance . get ( 'prometheus_metrics_prefix' , '' ) ) # `label_joins` holds the configuration for extracting 1:1 labels from # a target metric to all metric matching the label, example: # self.label_joins = { # 'kube_pod_info': { # 'labels_to_match': ['pod'], # 'labels_to_get': ['node', 'host_ip'] # } # } config [ 'label_joins' ] = default_instance . get ( 'label_joins' , {}) config [ 'label_joins' ] . update ( instance . get ( 'label_joins' , {})) # `_label_mapping` holds the additionals label info to add for a specific # label value, example: # self._label_mapping = { # 'pod': { # 'dd-agent-9s1l1': { # \"node\": \"yolo\", # \"host_ip\": \"yey\" # } # } # } config [ '_label_mapping' ] = {} # `_active_label_mapping` holds a dictionary of label values found during the run # to cleanup the label_mapping of unused values, example: # self._active_label_mapping = { # 'pod': { # 'dd-agent-9s1l1': True # } # } config [ '_active_label_mapping' ] = {} # `_watched_labels` holds the sets of labels to watch for enrichment config [ '_watched_labels' ] = {} config [ '_dry_run' ] = True # Some metrics are ignored because they are duplicates or introduce a # very high cardinality. Metrics included in this list will be silently # skipped without a 'Unable to handle metric' debug line in the logs config [ 'ignore_metrics' ] = instance . get ( 'ignore_metrics' , default_instance . get ( 'ignore_metrics' , [])) config [ '_ignored_metrics' ] = set () # `_ignored_re` is a Pattern object used to match ignored metric patterns config [ '_ignored_re' ] = None ignored_patterns = set () # Separate ignored metric names and ignored patterns in different sets for faster lookup later for metric in config [ 'ignore_metrics' ]: if '*' in metric : ignored_patterns . add ( translate ( metric )) else : config [ '_ignored_metrics' ] . add ( metric ) if ignored_patterns : config [ '_ignored_re' ] = compile ( '|' . join ( ignored_patterns )) # Ignore metrics based on label keys or specific label values config [ 'ignore_metrics_by_labels' ] = instance . get ( 'ignore_metrics_by_labels' , default_instance . get ( 'ignore_metrics_by_labels' , {}) ) # If you want to send the buckets as tagged values when dealing with histograms, # set send_histograms_buckets to True, set to False otherwise. config [ 'send_histograms_buckets' ] = is_affirmative ( instance . get ( 'send_histograms_buckets' , default_instance . get ( 'send_histograms_buckets' , True )) ) # If you want the bucket to be non cumulative and to come with upper/lower bound tags # set non_cumulative_buckets to True, enabled when distribution metrics are enabled. config [ 'non_cumulative_buckets' ] = is_affirmative ( instance . get ( 'non_cumulative_buckets' , default_instance . get ( 'non_cumulative_buckets' , False )) ) # Send histograms as datadog distribution metrics config [ 'send_distribution_buckets' ] = is_affirmative ( instance . get ( 'send_distribution_buckets' , default_instance . get ( 'send_distribution_buckets' , False )) ) # Non cumulative buckets are mandatory for distribution metrics if config [ 'send_distribution_buckets' ] is True : config [ 'non_cumulative_buckets' ] = True # If you want to send `counter` metrics as monotonic counts, set this value to True. # Set to False if you want to instead send those metrics as `gauge`. config [ 'send_monotonic_counter' ] = is_affirmative ( instance . get ( 'send_monotonic_counter' , default_instance . get ( 'send_monotonic_counter' , True )) ) # If you want `counter` metrics to be submitted as both gauges and monotonic counts. Set this value to True. config [ 'send_monotonic_with_gauge' ] = is_affirmative ( instance . get ( 'send_monotonic_with_gauge' , default_instance . get ( 'send_monotonic_with_gauge' , False )) ) config [ 'send_distribution_counts_as_monotonic' ] = is_affirmative ( instance . get ( 'send_distribution_counts_as_monotonic' , default_instance . get ( 'send_distribution_counts_as_monotonic' , False ), ) ) config [ 'send_distribution_sums_as_monotonic' ] = is_affirmative ( instance . get ( 'send_distribution_sums_as_monotonic' , default_instance . get ( 'send_distribution_sums_as_monotonic' , False ), ) ) # If the `labels_mapper` dictionary is provided, the metrics labels names # in the `labels_mapper` will use the corresponding value as tag name # when sending the gauges. config [ 'labels_mapper' ] = default_instance . get ( 'labels_mapper' , {}) config [ 'labels_mapper' ] . update ( instance . get ( 'labels_mapper' , {})) # Rename bucket \"le\" label to \"upper_bound\" config [ 'labels_mapper' ][ 'le' ] = 'upper_bound' # `exclude_labels` is an array of labels names to exclude. Those labels # will just not be added as tags when submitting the metric. config [ 'exclude_labels' ] = default_instance . get ( 'exclude_labels' , []) + instance . get ( 'exclude_labels' , []) # `type_overrides` is a dictionary where the keys are prometheus metric names # and the values are a metric type (name as string) to use instead of the one # listed in the payload. It can be used to force a type on untyped metrics. # Note: it is empty in the parent class but will need to be # overloaded/hardcoded in the final check not to be counted as custom metric. config [ 'type_overrides' ] = default_instance . get ( 'type_overrides' , {}) config [ 'type_overrides' ] . update ( instance . get ( 'type_overrides' , {})) # `_type_override_patterns` is a dictionary where we store Pattern objects # that match metric names as keys, and their corresponding metric type overrrides as values. config [ '_type_override_patterns' ] = {} with_wildcards = set () for metric , type in iteritems ( config [ 'type_overrides' ]): if '*' in metric : config [ '_type_override_patterns' ][ compile ( translate ( metric ))] = type with_wildcards . add ( metric ) # cleanup metric names with wildcards from the 'type_overrides' dict for metric in with_wildcards : del config [ 'type_overrides' ][ metric ] # Some metrics are retrieved from differents hosts and often # a label can hold this information, this transfers it to the hostname config [ 'label_to_hostname' ] = instance . get ( 'label_to_hostname' , default_instance . get ( 'label_to_hostname' , None )) # In combination to label_as_hostname, allows to add a common suffix to the hostnames # submitted. This can be used for instance to discriminate hosts between clusters. config [ 'label_to_hostname_suffix' ] = instance . get ( 'label_to_hostname_suffix' , default_instance . get ( 'label_to_hostname_suffix' , None ) ) # Add a 'health' service check for the prometheus endpoint config [ 'health_service_check' ] = is_affirmative ( instance . get ( 'health_service_check' , default_instance . get ( 'health_service_check' , True )) ) # Can either be only the path to the certificate and thus you should specify the private key # or it can be the path to a file containing both the certificate & the private key config [ 'ssl_cert' ] = instance . get ( 'ssl_cert' , default_instance . get ( 'ssl_cert' , None )) # Needed if the certificate does not include the private key # # /!\\ The private key to your local certificate must be unencrypted. # Currently, Requests does not support using encrypted keys. config [ 'ssl_private_key' ] = instance . get ( 'ssl_private_key' , default_instance . get ( 'ssl_private_key' , None )) # The path to the trusted CA used for generating custom certificates config [ 'ssl_ca_cert' ] = instance . get ( 'ssl_ca_cert' , default_instance . get ( 'ssl_ca_cert' , None )) # Whether or not to validate SSL certificates config [ 'ssl_verify' ] = is_affirmative ( instance . get ( 'ssl_verify' , default_instance . get ( 'ssl_verify' , True ))) # Extra http headers to be sent when polling endpoint config [ 'extra_headers' ] = default_instance . get ( 'extra_headers' , {}) config [ 'extra_headers' ] . update ( instance . get ( 'extra_headers' , {})) # Timeout used during the network request config [ 'prometheus_timeout' ] = instance . get ( 'prometheus_timeout' , default_instance . get ( 'prometheus_timeout' , 10 ) ) # Authentication used when polling endpoint config [ 'username' ] = instance . get ( 'username' , default_instance . get ( 'username' , None )) config [ 'password' ] = instance . get ( 'password' , default_instance . get ( 'password' , None )) # Custom tags that will be sent with each metric config [ 'custom_tags' ] = instance . get ( 'tags' , []) # Some tags can be ignored to reduce the cardinality. # This can be useful for cost optimization in containerized environments # when the openmetrics check is configured to collect custom metrics. # Even when the Agent's Tagger is configured to add low-cardinality tags only, # some tags can still generate unwanted metric contexts (e.g pod annotations as tags). ignore_tags = instance . get ( 'ignore_tags' , default_instance . get ( 'ignore_tags' , [])) if ignore_tags : ignored_tags_re = compile ( '|' . join ( set ( ignore_tags ))) config [ 'custom_tags' ] = [ tag for tag in config [ 'custom_tags' ] if not ignored_tags_re . search ( tag )] # Additional tags to be sent with each metric config [ '_metric_tags' ] = [] # List of strings to filter the input text payload on. If any line contains # one of these strings, it will be filtered out before being parsed. # INTERNAL FEATURE, might be removed in future versions config [ '_text_filter_blacklist' ] = [] # Whether or not to use the service account bearer token for authentication # if 'bearer_token_path' is not set, we use /var/run/secrets/kubernetes.io/serviceaccount/token # as a default path to get the token. config [ 'bearer_token_auth' ] = is_affirmative ( instance . get ( 'bearer_token_auth' , default_instance . get ( 'bearer_token_auth' , False )) ) # Can be used to get a service account bearer token from files # other than /var/run/secrets/kubernetes.io/serviceaccount/token # 'bearer_token_auth' should be enabled. config [ 'bearer_token_path' ] = instance . get ( 'bearer_token_path' , default_instance . get ( 'bearer_token_path' , None )) # The service account bearer token to be used for authentication config [ '_bearer_token' ] = self . _get_bearer_token ( config [ 'bearer_token_auth' ], config [ 'bearer_token_path' ]) config [ 'telemetry' ] = is_affirmative ( instance . get ( 'telemetry' , default_instance . get ( 'telemetry' , False ))) # The metric name services use to indicate build information config [ 'metadata_metric_name' ] = instance . get ( 'metadata_metric_name' , default_instance . get ( 'metadata_metric_name' ) ) # Map of metadata key names to label names config [ 'metadata_label_map' ] = instance . get ( 'metadata_label_map' , default_instance . get ( 'metadata_label_map' , {}) ) config [ '_default_metric_transformers' ] = {} if config [ 'metadata_metric_name' ] and config [ 'metadata_label_map' ]: config [ '_default_metric_transformers' ][ config [ 'metadata_metric_name' ]] = self . transform_metadata # Whether or not to enable flushing of the first value of monotonic counts config [ '_successfully_executed' ] = False return config","title":"create_scraper_configuration()"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.mixins.OpenMetricsScraperMixin.parse_metric_family","text":"Parse the MetricFamily from a valid requests.Response object to provide a MetricFamily object. The text format uses iter_lines() generator. Source code in def parse_metric_family ( self , response , scraper_config ): \"\"\" Parse the MetricFamily from a valid `requests.Response` object to provide a MetricFamily object. The text format uses iter_lines() generator. \"\"\" if response . encoding is None : response . encoding = 'utf-8' input_gen = response . iter_lines ( chunk_size = self . REQUESTS_CHUNK_SIZE , decode_unicode = True ) if scraper_config [ '_text_filter_blacklist' ]: input_gen = self . _text_filter_input ( input_gen , scraper_config ) for metric in text_fd_to_metric_families ( input_gen ): self . _send_telemetry_counter ( self . TELEMETRY_COUNTER_METRICS_INPUT_COUNT , len ( metric . samples ), scraper_config ) type_override = scraper_config [ 'type_overrides' ] . get ( metric . name ) if type_override : metric . type = type_override elif scraper_config [ '_type_override_patterns' ]: for pattern , new_type in iteritems ( scraper_config [ '_type_override_patterns' ]): if pattern . search ( metric . name ): metric . type = new_type break if metric . type not in self . METRIC_TYPES : continue metric . name = self . _remove_metric_prefix ( metric . name , scraper_config ) yield metric","title":"parse_metric_family()"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.mixins.OpenMetricsScraperMixin.poll","text":"Returns a valid requests.Response , otherwise raise requests.HTTPError if the status code of the response isn't valid - see response.raise_for_status() The caller needs to close the requests.Response. Custom headers can be added to the default headers. Source code in def poll ( self , scraper_config , headers = None ): \"\"\" Returns a valid `requests.Response`, otherwise raise requests.HTTPError if the status code of the response isn't valid - see `response.raise_for_status()` The caller needs to close the requests.Response. Custom headers can be added to the default headers. \"\"\" endpoint = scraper_config . get ( 'prometheus_url' ) # Should we send a service check for when we make a request health_service_check = scraper_config [ 'health_service_check' ] service_check_name = self . _metric_name_with_namespace ( 'prometheus.health' , scraper_config ) service_check_tags = [ 'endpoint: {} ' . format ( endpoint )] service_check_tags . extend ( scraper_config [ 'custom_tags' ]) try : response = self . send_request ( endpoint , scraper_config , headers ) except requests . exceptions . SSLError : self . log . error ( \"Invalid SSL settings for requesting %s endpoint\" , endpoint ) raise except IOError : if health_service_check : self . service_check ( service_check_name , AgentCheck . CRITICAL , tags = service_check_tags ) raise try : response . raise_for_status () if health_service_check : self . service_check ( service_check_name , AgentCheck . OK , tags = service_check_tags ) return response except requests . HTTPError : response . close () if health_service_check : self . service_check ( service_check_name , AgentCheck . CRITICAL , tags = service_check_tags ) raise","title":"poll()"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.mixins.OpenMetricsScraperMixin.process","text":"Polls the data from Prometheus and submits them as Datadog metrics. endpoint is the metrics endpoint to use to poll metrics from Prometheus Note that if the instance has a tags attribute, it will be pushed automatically as additional custom tags and added to the metrics Source code in def process ( self , scraper_config , metric_transformers = None ): \"\"\" Polls the data from Prometheus and submits them as Datadog metrics. `endpoint` is the metrics endpoint to use to poll metrics from Prometheus Note that if the instance has a `tags` attribute, it will be pushed automatically as additional custom tags and added to the metrics \"\"\" transformers = scraper_config [ '_default_metric_transformers' ] . copy () if metric_transformers : transformers . update ( metric_transformers ) for metric in self . scrape_metrics ( scraper_config ): self . process_metric ( metric , scraper_config , metric_transformers = transformers ) scraper_config [ '_successfully_executed' ] = True","title":"process()"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.mixins.OpenMetricsScraperMixin.process_metric","text":"Handle a Prometheus metric according to the following flow: - search scraper_config['metrics_mapper'] for a prometheus.metric to datadog.metric mapping - call check method with the same name as the metric - log info if none of the above worked metric_transformers is a dict of : Source code in def process_metric ( self , metric , scraper_config , metric_transformers = None ): \"\"\" Handle a Prometheus metric according to the following flow: - search `scraper_config['metrics_mapper']` for a prometheus.metric to datadog.metric mapping - call check method with the same name as the metric - log info if none of the above worked `metric_transformers` is a dict of `:` \"\"\" # If targeted metric, store labels self . _store_labels ( metric , scraper_config ) if scraper_config [ 'ignore_metrics' ]: if metric . name in scraper_config [ '_ignored_metrics' ]: self . _send_telemetry_counter ( self . TELEMETRY_COUNTER_METRICS_IGNORE_COUNT , len ( metric . samples ), scraper_config ) return # Ignore the metric if scraper_config [ '_ignored_re' ] and scraper_config [ '_ignored_re' ] . search ( metric . name ): # Metric must be ignored scraper_config [ '_ignored_metrics' ] . add ( metric . name ) self . _send_telemetry_counter ( self . TELEMETRY_COUNTER_METRICS_IGNORE_COUNT , len ( metric . samples ), scraper_config ) return # Ignore the metric self . _send_telemetry_counter ( self . TELEMETRY_COUNTER_METRICS_PROCESS_COUNT , len ( metric . samples ), scraper_config ) if self . _filter_metric ( metric , scraper_config ): return # Ignore the metric # Filter metric to see if we can enrich with joined labels self . _join_labels ( metric , scraper_config ) if scraper_config [ '_dry_run' ]: return try : self . submit_openmetric ( scraper_config [ 'metrics_mapper' ][ metric . name ], metric , scraper_config ) except KeyError : if metric_transformers is not None and metric . name in metric_transformers : try : # Get the transformer function for this specific metric transformer = metric_transformers [ metric . name ] transformer ( metric , scraper_config ) except Exception as err : self . log . warning ( 'Error handling metric: %s - error: %s ' , metric . name , err ) return # check for wilcards in transformers for transformer_name , transformer in iteritems ( metric_transformers ): if transformer_name . endswith ( '*' ) and metric . name . startswith ( transformer_name [: - 1 ]): transformer ( metric , scraper_config , transformer_name ) # try matching wildcards if scraper_config [ '_wildcards_re' ] and scraper_config [ '_wildcards_re' ] . search ( metric . name ): self . submit_openmetric ( metric . name , metric , scraper_config ) return self . log . debug ( 'Skipping metric ` %s ` as it is not defined in the metrics mapper, ' 'has no transformer function, nor does it match any wildcards.' , metric . name , )","title":"process_metric()"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.mixins.OpenMetricsScraperMixin.scrape_metrics","text":"Poll the data from Prometheus and return the metrics as a generator. Source code in def scrape_metrics ( self , scraper_config ): \"\"\" Poll the data from Prometheus and return the metrics as a generator. \"\"\" response = self . poll ( scraper_config ) if scraper_config [ 'telemetry' ]: if 'content-length' in response . headers : content_len = int ( response . headers [ 'content-length' ]) else : content_len = len ( response . content ) self . _send_telemetry_gauge ( self . TELEMETRY_GAUGE_MESSAGE_SIZE , content_len , scraper_config ) try : # no dry run if no label joins if not scraper_config [ 'label_joins' ]: scraper_config [ '_dry_run' ] = False elif not scraper_config [ '_watched_labels' ]: watched = scraper_config [ '_watched_labels' ] watched [ 'sets' ] = {} watched [ 'keys' ] = {} watched [ 'singles' ] = set () for key , val in iteritems ( scraper_config [ 'label_joins' ]): labels = [] if 'labels_to_match' in val : labels = val [ 'labels_to_match' ] elif 'label_to_match' in val : self . log . warning ( \"`label_to_match` is being deprecated, please use `labels_to_match`\" ) if isinstance ( val [ 'label_to_match' ], list ): labels = val [ 'label_to_match' ] else : labels = [ val [ 'label_to_match' ]] if labels : s = frozenset ( labels ) watched [ 'sets' ][ key ] = s watched [ 'keys' ][ key ] = ',' . join ( s ) if len ( labels ) == 1 : watched [ 'singles' ] . add ( labels [ 0 ]) for metric in self . parse_metric_family ( response , scraper_config ): yield metric # Set dry run off scraper_config [ '_dry_run' ] = False # Garbage collect unused mapping and reset active labels for metric , mapping in list ( iteritems ( scraper_config [ '_label_mapping' ])): for key in list ( mapping ): if ( metric in scraper_config [ '_active_label_mapping' ] and key not in scraper_config [ '_active_label_mapping' ][ metric ] ): del scraper_config [ '_label_mapping' ][ metric ][ key ] scraper_config [ '_active_label_mapping' ] = {} finally : response . close ()","title":"scrape_metrics()"},{"location":"base/prometheus/#datadog_checks.base.checks.openmetrics.mixins.OpenMetricsScraperMixin.submit_openmetric","text":"For each sample in the metric, report it as a gauge with all labels as tags except if a labels dict is passed, in which case keys are label names we'll extract and corresponding values are tag names we'll use (eg: {'node': 'node'}). Histograms generate a set of values instead of a unique metric. send_histograms_buckets is used to specify if you want to send the buckets as tagged values when dealing with histograms. custom_tags is an array of tag:value that will be added to the metric when sending the gauge to Datadog. Source code in def submit_openmetric ( self , metric_name , metric , scraper_config , hostname = None ): \"\"\" For each sample in the metric, report it as a gauge with all labels as tags except if a labels `dict` is passed, in which case keys are label names we'll extract and corresponding values are tag names we'll use (eg: {'node': 'node'}). Histograms generate a set of values instead of a unique metric. `send_histograms_buckets` is used to specify if you want to send the buckets as tagged values when dealing with histograms. `custom_tags` is an array of `tag:value` that will be added to the metric when sending the gauge to Datadog. \"\"\" if metric . type in [ \"gauge\" , \"counter\" , \"rate\" ]: metric_name_with_namespace = self . _metric_name_with_namespace ( metric_name , scraper_config ) for sample in metric . samples : if self . _ignore_metrics_by_label ( scraper_config , metric_name , sample ): continue val = sample [ self . SAMPLE_VALUE ] if not self . _is_value_valid ( val ): self . log . debug ( \"Metric value is not supported for metric %s \" , sample [ self . SAMPLE_NAME ]) continue custom_hostname = self . _get_hostname ( hostname , sample , scraper_config ) # Determine the tags to send tags = self . _metric_tags ( metric_name , val , sample , scraper_config , hostname = custom_hostname ) if metric . type == \"counter\" and scraper_config [ 'send_monotonic_counter' ]: self . monotonic_count ( metric_name_with_namespace , val , tags = tags , hostname = custom_hostname , flush_first_value = scraper_config [ '_successfully_executed' ], ) elif metric . type == \"rate\" : self . rate ( metric_name_with_namespace , val , tags = tags , hostname = custom_hostname ) else : self . gauge ( metric_name_with_namespace , val , tags = tags , hostname = custom_hostname ) # Metric is a \"counter\" but legacy behavior has \"send_as_monotonic\" defaulted to False # Submit metric as monotonic_count with appended name if metric . type == \"counter\" and scraper_config [ 'send_monotonic_with_gauge' ]: self . monotonic_count ( metric_name_with_namespace + '.total' , val , tags = tags , hostname = custom_hostname , flush_first_value = scraper_config [ '_successfully_executed' ], ) elif metric . type == \"histogram\" : self . _submit_gauges_from_histogram ( metric_name , metric , scraper_config ) elif metric . type == \"summary\" : self . _submit_gauges_from_summary ( metric_name , metric , scraper_config ) else : self . log . error ( \"Metric type %s unsupported for metric %s .\" , metric . type , metric_name )","title":"submit_openmetric()"},{"location":"base/prometheus/#options","text":"Some options can be set globally in init_config (with instances taking precedence). For complete documentation of every option, see the associated configuration templates for the instances and init_config sections. All HTTP options are also supported. prometheus_url namespace metrics prometheus_metrics_prefix health_service_check label_to_hostname label_joins labels_mapper type_overrides send_histograms_buckets send_distribution_buckets send_monotonic_counter send_monotonic_with_gauge send_distribution_counts_as_monotonic send_distribution_sums_as_monotonic exclude_labels bearer_token_auth bearer_token_path ignore_metrics","title":"Options"},{"location":"base/prometheus/#prometheus-to-datadog-metric-types","text":"The Openmetrics Base Check supports various configurations for submitting Prometheus metrics to Datadog. We currently support Prometheus gauge , counter , histogram , and summary metric types.","title":"Prometheus to Datadog metric types"},{"location":"base/prometheus/#gauge","text":"A gauge metric represents a single numerical value that can arbitrarily go up or down. Prometheus gauge metrics are submitted as Datadog gauge metrics.","title":"Gauge"},{"location":"base/prometheus/#counter","text":"A Prometheus counter is a cumulative metric that represents a single monotonically increasing counter whose value can only increase or be reset to zero on restart. Config Option Value Datadog Metric Submitted send_monotonic_counter true (default) monotonic_count false gauge","title":"Counter"},{"location":"base/prometheus/#histogram","text":"A Prometheus histogram samples observations and counts them in configurable buckets along with a sum of all observed values. Histogram metrics ending in: _sum represent the total sum of all observed values. Generally sums are like counters but it's also possible for a negative observation which would not behave like a typical always increasing counter. _count represent the total number of events that have been observed. _bucket represent the cumulative counters for the observation buckets. Note that buckets are only submitted if send_histogram_buckets is enabled. Subtype Config Option Value Datadog Metric Submitted send_distribution_buckets true The entire histogram can be submitted as a single distribution metric . If the option is enabled, none of the subtype metrics will be submitted. _sum send_distribution_sums_as_monotonic false (default) gauge true monotonic_count _count send_distribution_counts_as_monotonic false (default) gauge true monotonic_count _bucket non_cumulative_buckets false (default) gauge true monotonic_count under .count metric name if send_distribution_counts_as_monotonic is enabled. Otherwise, gauge .","title":"Histogram"},{"location":"base/prometheus/#summary","text":"Prometheus summary metrics are similar to histograms but allow configurable quantiles. Summary metrics ending in: _sum represent the total sum of all observed values. Generally sums are like counters but it's also possible for a negative observation which would not behave like a typical always increasing counter. _count represent the total number of events that have been observed. metrics with labels like {quantile=\"<\u03c6>\"} represent the streaming quantiles of observed events. Subtype Config Option Value Datadog Metric Submitted _sum send_distribution_sums_as_monotonic false (default) gauge true monotonic_count _count send_distribution_counts_as_monotonic false (default) gauge true monotonic_count _quantile gauge","title":"Summary"},{"location":"base/tls/","text":"TLS/SSL \u00b6 TLS/SSL is widely used to provide communications over a secure network. Many of the software that Datadog supports has features to allow TLS/SSL. Therefore, the Datadog Agent may need to connect with TLS/SSL to get metrics. Getting started \u00b6 For Agent v7.24+, checks compatible with TLS/SSL should not manually create a raw ssl.SSLContext . Instead, check implementations should use AgentCheck.get_tls_context() to obtain a TLS/SSL context. get_tls_context() allows a few optional parameters which may be helpful when developing integrations. datadog_checks . base . checks . base . AgentCheck . get_tls_context ( self , refresh = False , overrides = None ) \u00b6 Creates and cache an SSLContext instance based on user configuration. Note that user configuration can be overridden by using overrides . This should only be applied to older integration that manually set config values. Since: Agent 7.24 Source code in def get_tls_context ( self , refresh = False , overrides = None ): # type: (bool, Dict[AnyStr, Any]) -> ssl.SSLContext \"\"\" Creates and cache an SSLContext instance based on user configuration. Note that user configuration can be overridden by using `overrides`. This should only be applied to older integration that manually set config values. Since: Agent 7.24 \"\"\" if not hasattr ( self , '_tls_context_wrapper' ): self . _tls_context_wrapper = TlsContextWrapper ( self . instance or {}, self . TLS_CONFIG_REMAPPER , overrides = overrides ) if refresh : self . _tls_context_wrapper . refresh_tls_context () return self . _tls_context_wrapper . tls_context","title":"TLS/SSL"},{"location":"base/tls/#tlsssl","text":"TLS/SSL is widely used to provide communications over a secure network. Many of the software that Datadog supports has features to allow TLS/SSL. Therefore, the Datadog Agent may need to connect with TLS/SSL to get metrics.","title":"TLS/SSL"},{"location":"base/tls/#getting-started","text":"For Agent v7.24+, checks compatible with TLS/SSL should not manually create a raw ssl.SSLContext . Instead, check implementations should use AgentCheck.get_tls_context() to obtain a TLS/SSL context. get_tls_context() allows a few optional parameters which may be helpful when developing integrations.","title":"Getting started"},{"location":"base/tls/#datadog_checks.base.checks.base.AgentCheck.get_tls_context","text":"Creates and cache an SSLContext instance based on user configuration. Note that user configuration can be overridden by using overrides . This should only be applied to older integration that manually set config values. Since: Agent 7.24 Source code in def get_tls_context ( self , refresh = False , overrides = None ): # type: (bool, Dict[AnyStr, Any]) -> ssl.SSLContext \"\"\" Creates and cache an SSLContext instance based on user configuration. Note that user configuration can be overridden by using `overrides`. This should only be applied to older integration that manually set config values. Since: Agent 7.24 \"\"\" if not hasattr ( self , '_tls_context_wrapper' ): self . _tls_context_wrapper = TlsContextWrapper ( self . instance or {}, self . TLS_CONFIG_REMAPPER , overrides = overrides ) if refresh : self . _tls_context_wrapper . refresh_tls_context () return self . _tls_context_wrapper . tls_context","title":"get_tls_context()"},{"location":"ddev/about/","text":"What's in the box? \u00b6 The Dev package , often referred to as its CLI entrypoint ddev , is fundamentally split into 2 parts. Test framework \u00b6 The test framework provides everything necessary to test integrations, such as: Dependencies like pytest , mock , requests , etc. Utilities for consistently handling complex logic or common operations An orchestrator for arbitrary E2E environments CLI \u00b6 The CLI provides the interface through which tests are invoked, E2E environments are managed, and general repository maintenance (such as dependency management) occurs. Separation \u00b6 As the dependencies of the test framework are a subset of what is required for the CLI, the CLI tooling may import from the test framework, but not vice versa. The diagram below shows the import hierarchy between each component. Clicking a node will open that component's location in the source code. graph BT A([Plugins]) click A \"https://github.com/DataDog/integrations-core/tree/master/datadog_checks_dev/datadog_checks/dev/plugin\" \"Test framework plugins location\" B([Test framework]) click B \"https://github.com/DataDog/integrations-core/tree/master/datadog_checks_dev/datadog_checks/dev\" \"Test framework location\" C([CLI]) click C \"https://github.com/DataDog/integrations-core/tree/master/datadog_checks_dev/datadog_checks/dev/tooling\" \"CLI tooling location\" A-->B C-->B var config = { securityLevel: \"loose\", startOnLoad: false, theme: \"dark\", flowchart: { htmlLabels: false }, sequence: { useMaxWidth: false }, class: { textHeight: 16, dividerMargin: 16 } }; mermaid.initialize(config);","title":"What's in the box?"},{"location":"ddev/about/#whats-in-the-box","text":"The Dev package , often referred to as its CLI entrypoint ddev , is fundamentally split into 2 parts.","title":"What's in the box?"},{"location":"ddev/about/#test-framework","text":"The test framework provides everything necessary to test integrations, such as: Dependencies like pytest , mock , requests , etc. Utilities for consistently handling complex logic or common operations An orchestrator for arbitrary E2E environments","title":"Test framework"},{"location":"ddev/about/#cli","text":"The CLI provides the interface through which tests are invoked, E2E environments are managed, and general repository maintenance (such as dependency management) occurs.","title":"CLI"},{"location":"ddev/about/#separation","text":"As the dependencies of the test framework are a subset of what is required for the CLI, the CLI tooling may import from the test framework, but not vice versa. The diagram below shows the import hierarchy between each component. Clicking a node will open that component's location in the source code. graph BT A([Plugins]) click A \"https://github.com/DataDog/integrations-core/tree/master/datadog_checks_dev/datadog_checks/dev/plugin\" \"Test framework plugins location\" B([Test framework]) click B \"https://github.com/DataDog/integrations-core/tree/master/datadog_checks_dev/datadog_checks/dev\" \"Test framework location\" C([CLI]) click C \"https://github.com/DataDog/integrations-core/tree/master/datadog_checks_dev/datadog_checks/dev/tooling\" \"CLI tooling location\" A-->B C-->B var config = { securityLevel: \"loose\", startOnLoad: false, theme: \"dark\", flowchart: { htmlLabels: false }, sequence: { useMaxWidth: false }, class: { textHeight: 16, dividerMargin: 16 } }; mermaid.initialize(config);","title":"Separation"},{"location":"ddev/cli/","text":"ddev \u00b6 Usage: ddev [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --core , -c boolean Work on integrations-core . False --extras , -e boolean Work on integrations-extras . False --agent , -a boolean Work on datadog-agent . False --marketplace , -m boolean Work on marketplace . False --here , -x boolean Work on the current location. False --color / --no-color boolean Whether or not to display colored output (default true). required --quiet , -q boolean Silence output False --debug , -d boolean Include debug output False --version boolean Show the version and exit. False --help boolean Show this message and exit. False ddev agent \u00b6 A collection of tasks related to the Datadog Agent Usage: ddev agent [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev agent changelog \u00b6 Generates a markdown file containing the list of checks that changed for a given Agent release. Agent version numbers are derived inspecting tags on integrations-core so running this tool might provide unexpected results if the repo is not up to date with the Agent release process. If neither --since or --to are passed (the most common use case), the tool will generate the whole changelog since Agent version 6.3.0 (before that point we don't have enough information to build the log). Usage: ddev agent changelog [OPTIONS] Options: Name Type Description Default --since text Initial Agent version 6.3.0 --to text Final Agent version required --write , -w boolean Write to the changelog file, if omitted contents will be printed to stdout False --force , -f boolean Replace an existing file False --help boolean Show this message and exit. False ddev agent integrations \u00b6 Generates a markdown file containing the list of integrations shipped in a given Agent release. Agent version numbers are derived inspecting tags on integrations-core so running this tool might provide unexpected results if the repo is not up to date with the Agent release process. If neither --since or --to are passed (the most common use case), the tool will generate the list for every Agent since version 6.3.0 (before that point we don't have enough information to build the log). Usage: ddev agent integrations [OPTIONS] Options: Name Type Description Default --since text Initial Agent version 6.3.0 --to text Final Agent version required --write , -w boolean Write to file, if omitted contents will be printed to stdout False --force , -f boolean Replace an existing file False --help boolean Show this message and exit. False ddev agent integrations-changelog \u00b6 Update integration CHANGELOG.md by adding the Agent version. Agent version is only added to the integration versions released with a specific Agent release. Usage: ddev agent integrations-changelog [OPTIONS] [CHECKS]... Options: Name Type Description Default --since text Initial Agent version 6.3.0 --to text Final Agent version required --write , -w boolean Write to the changelog file, if omitted contents will be printed to stdout False --help boolean Show this message and exit. False ddev agent requirements \u00b6 Write the requirements-agent-release.txt file at the root of the repo listing all the Agent-based integrations pinned at the version they currently have in HEAD. Usage: ddev agent requirements [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False ddev ci \u00b6 CI related utils. Anything here should be considered experimental. Usage: ddev ci [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev ci setup \u00b6 Run CI setup scripts Usage: ddev ci setup [OPTIONS] [CHECKS]... Options: Name Type Description Default --changed boolean Only target changed checks False --help boolean Show this message and exit. False ddev clean \u00b6 Remove build and test artifacts for the given CHECK. If CHECK is not specified, the current working directory is used. Usage: ddev clean [OPTIONS] [CHECK] Options: Name Type Description Default --compiled-only , -c boolean Remove compiled files only (*.pyc, *.pyd, *.pyo, *.whl, pycache ). False --all , -a boolean Disable the detection of a project's dedicated virtual env and/or editable installation. By default, these will not be considered. False --force , -f boolean If set and the command is run from the root directory, allow removing build and test artifacts (*.egg-info, .benchmarks, .cache, .coverage, .eggs, .pytest_cache, .tox, build, dist). False --verbose , -v boolean Shows removed paths. False --help boolean Show this message and exit. False ddev config \u00b6 Manage the config file Usage: ddev config [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev config edit \u00b6 Edit the config file with your default EDITOR. Usage: ddev config edit [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False ddev config explore \u00b6 Open the config location in your file manager. Usage: ddev config explore [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False ddev config find \u00b6 Show the location of the config file. Usage: ddev config find [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False ddev config restore \u00b6 Restore the config file to default settings. Usage: ddev config restore [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False ddev config set \u00b6 Assigns values to config file entries. If the value is omitted, you will be prompted, with the input hidden if it is sensitive. $ ddev config set github.user foo New setting: [github] user = \"foo\" You can also assign values on a per-org basis. $ ddev config set orgs..api_key New setting: [orgs.] api_key = \"***********\" Usage: ddev config set [OPTIONS] KEY [VALUE] Options: Name Type Description Default --help boolean Show this message and exit. False ddev config show \u00b6 Show the contents of the config file. Usage: ddev config show [OPTIONS] Options: Name Type Description Default --all , -a boolean No not scrub secret fields False --help boolean Show this message and exit. False ddev config update \u00b6 Update the config file with any new fields. Usage: ddev config update [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False ddev create \u00b6 Create scaffolding for a new integration. NAME: The display name of the integration that will appear in documentation. Usage: ddev create [OPTIONS] NAME Options: Name Type Description Default --type , -t choice ( check | jmx | logs | snmp_tile | tile ) The type of integration to create check --location , -l text The directory where files will be written required --non-interactive , -ni boolean Disable prompting for fields False --quiet , -q boolean Show less output False --dry-run , -n boolean Only show what would be created False --help boolean Show this message and exit. False ddev dep \u00b6 Manage dependencies Usage: ddev dep [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev dep freeze \u00b6 Combine all dependencies for the Agent's static environment. Usage: ddev dep freeze [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False ddev dep pin \u00b6 Pin a dependency for all checks that require it. This can also resolve transient dependencies. Setting the version to none will remove the package. You can specify an unlimited number of additional checks to apply the pin for via arguments. Usage: ddev dep pin [OPTIONS] PACKAGE VERSION Options: Name Type Description Default --marker , -m text Environment marker to use required --help boolean Show this message and exit. False ddev docs \u00b6 Manage documentation Usage: ddev docs [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev docs build \u00b6 Build documentation. Usage: ddev docs build [OPTIONS] Options: Name Type Description Default --verbose , -v integer range ( 0 and above) Increase verbosity (can be used additively) 0 --pdf boolean Also export the site as PDF False --help boolean Show this message and exit. False ddev docs deploy \u00b6 Deploy built documentation. Usage: ddev docs deploy [OPTIONS] [BRANCH] Options: Name Type Description Default --yes , -y boolean N/A False --help boolean Show this message and exit. False ddev docs serve \u00b6 Serve and view documentation in a web browser. Usage: ddev docs serve [OPTIONS] Options: Name Type Description Default --no-open , -n boolean Do not open the documentation in a web browser False --verbose , -v integer range ( 0 and above) Increase verbosity (can be used additively) 0 --pdf boolean Also export the site as PDF False --dirty boolean Speed up reload time by only rebuilding edited pages (based on modified time). For development only. False --help boolean Show this message and exit. False ddev env \u00b6 Manage environments Usage: ddev env [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev env check \u00b6 Run an Agent check. Usage: ddev env check [OPTIONS] CHECK [ENV] Options: Name Type Description Default --rate , -r boolean Compute rates by running the check twice with a pause between each run False --times , -t integer Number of times to run the check required --pause integer Number of milliseconds to pause between multiple check runs required --delay , -d integer Delay in milliseconds between running the check and grabbing what was collected required --log-level , -l text Set the log level (default off ) required --json boolean Format the aggregator and check runner output as JSON False --table boolean Format the aggregator and check runner output as tabular False --breakpoint , -b integer Line number to start a PDB session (0: first line, -1: last line) required --config text Path to a JSON check configuration to use required --jmx-list text JMX metrics listing method required --help boolean Show this message and exit. False ddev env edit \u00b6 Start an environment. Usage: ddev env edit [OPTIONS] CHECK ENV Options: Name Type Description Default --editor , -e text Editor to use required --help boolean Show this message and exit. False ddev env ls \u00b6 List active or available environments. Usage: ddev env ls [OPTIONS] [CHECKS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev env prune \u00b6 Remove all configuration for environments. Usage: ddev env prune [OPTIONS] Options: Name Type Description Default --force , -f boolean N/A False --help boolean Show this message and exit. False ddev env reload \u00b6 Restart an Agent to detect environment changes. Usage: ddev env reload [OPTIONS] CHECK [ENV] Options: Name Type Description Default --help boolean Show this message and exit. False ddev env shell \u00b6 Run a shell inside the Agent docker container. Usage: ddev env shell [OPTIONS] CHECK [ENV] Options: Name Type Description Default -c , --exec-command text Optionally execute command inside container, executes after any installs required -v , --install-vim boolean Optionally install editing/viewing tools vim and less False -i , --install-tools text Optionally install custom tools required --help boolean Show this message and exit. False ddev env start \u00b6 Start an environment. Usage: ddev env start [OPTIONS] CHECK ENV Options: Name Type Description Default --agent , -a text The agent build to use e.g. a Docker image like datadog/agent:latest . You can also use the name of an agent defined in the agents configuration section. required --python , -py integer The version of Python to use. Defaults to 3 if no tox Python is specified. required --dev / --prod boolean Whether to use the latest version of a check or what is shipped False --base boolean Whether to use the latest version of the base check or what is shipped False --env-vars , -e text ENV Variable that should be passed to the Agent container. Ex: -e DD_URL=app.datadoghq.com -e DD_API_KEY=123456 required --org-name , -o text The org to use for data submission. required --profile-memory , -pm boolean Whether to collect metrics about memory usage False --dogstatsd boolean Enable dogstatsd port on agent False --help boolean Show this message and exit. False ddev env stop \u00b6 Stop environments, use \"all\" as check argument to stop everything. Usage: ddev env stop [OPTIONS] CHECK [ENV] Options: Name Type Description Default --help boolean Show this message and exit. False ddev env test \u00b6 Test an environment. Usage: ddev env test [OPTIONS] [CHECKS]... Options: Name Type Description Default --agent , -a text The agent build to use e.g. a Docker image like datadog/agent:latest . You can also use the name of an agent defined in the agents configuration section. required --python , -py integer The version of Python to use. Defaults to 3 if no tox Python is specified. required --dev / --prod boolean Whether to use the latest version of a check or what is shipped required --base boolean Whether to use the latest version of the base check or what is shipped False --env-vars , -e text ENV Variable that should be passed to the Agent container. Ex: -e DD_URL=app.datadoghq.com -e DD_API_KEY=123456 required --new-env , -ne boolean Execute setup and tear down actions False --profile-memory , -pm boolean Whether to collect metrics about memory usage False --junit , -j boolean Generate junit reports False --filter , -k text Only run tests matching given substring expression required --changed boolean Only test changed checks False --help boolean Show this message and exit. False ddev meta \u00b6 Anything here should be considered experimental. This meta namespace can be used for an arbitrary number of niche or beta features without bloating the root namespace. Usage: ddev meta [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta catalog \u00b6 Create a catalog with information about integrations Usage: ddev meta catalog [OPTIONS] CHECKS... Options: Name Type Description Default -f , --file text Output to file (it will be overwritten), you can pass \"tmp\" to generate a temporary file required --markdown , -m boolean Output to markdown instead of CSV False --help boolean Show this message and exit. False ddev meta changes \u00b6 Show changes since a specific date. Usage: ddev meta changes [OPTIONS] SINCE Options: Name Type Description Default --out , -o boolean Output to file False --eager boolean Skip validation of commit subjects False --help boolean Show this message and exit. False ddev meta create-example-commits \u00b6 Create branch commits from example repo Usage: ddev meta create-example-commits [OPTIONS] SOURCE_DIR Options: Name Type Description Default --prefix , -p text Optional text to prefix each commit `` --help boolean Show this message and exit. False ddev meta dash \u00b6 Dashboard utilities Usage: ddev meta dash [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta dash export \u00b6 Export a Dashboard as JSON Usage: ddev meta dash export [OPTIONS] URL INTEGRATION Options: Name Type Description Default --author , -a text The owner of this integration's dashboard. Default is 'Datadog' Datadog --help boolean Show this message and exit. False ddev meta jmx \u00b6 JMX utilities Usage: ddev meta jmx [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta jmx query-endpoint \u00b6 Query endpoint for JMX info Usage: ddev meta jmx query-endpoint [OPTIONS] HOST PORT [DOMAIN] Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta prom \u00b6 Prometheus utilities Usage: ddev meta prom [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta prom info \u00b6 Show metric info from a Prometheus endpoint. Example: $ ddev meta prom info :8080/_status/vars Usage: ddev meta prom info [OPTIONS] ENDPOINT Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta prom parse \u00b6 Interactively parse metric info from a Prometheus endpoint and write it to metadata.csv. Usage: ddev meta prom parse [OPTIONS] ENDPOINT CHECK Options: Name Type Description Default --here , -x boolean Output to the current location False --help boolean Show this message and exit. False ddev meta scripts \u00b6 Miscellaneous scripts that may be useful Usage: ddev meta scripts [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta scripts email2ghuser \u00b6 Given an email, attempt to find a Github username associated with the email. $ ddev meta scripts email2ghuser example@datadoghq.com Usage: ddev meta scripts email2ghuser [OPTIONS] EMAIL Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta scripts metrics2md \u00b6 Convert a check's metadata.csv file to a Markdown table, which will be copied to your clipboard. By default it will be compact and only contain the most useful fields. If you wish to use arbitrary metric data, you may set the check to cb to target the current contents of your clipboard. Usage: ddev meta scripts metrics2md [OPTIONS] CHECK [FIELDS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta scripts remove-labels \u00b6 Remove all labels from an issue or pull request. This is useful when there are too many labels and its state cannot be modified (known GitHub issue). $ ddev meta scripts remove-labels 5626 Usage: ddev meta scripts remove-labels [OPTIONS] ISSUE_NUMBER Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta scripts upgrade-python \u00b6 Upgrade the Python version of all test environments. $ ddev meta scripts upgrade-python 3.8 Usage: ddev meta scripts upgrade-python [OPTIONS] NEW_VERSION [OLD_VERSION] Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta snmp \u00b6 SNMP utilities Usage: ddev meta snmp [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev meta snmp generate-profile-from-mibs \u00b6 Generate an SNMP profile from MIBs. Accepts a directory path containing mib files to be used as source to generate the profile, along with a filter if a device or family of devices support only a subset of oids from a mib. filters is the path to a yaml file containing a collection of MIBs, with their list of MIB node names to be included. For example: RFC1213-MIB : - system - interfaces - ip CISCO-SYSLOG-MIB : [] SNMP-FRAMEWORK-MIB : - snmpEngine Note that each MIB:node_name correspond to exactly one and only one OID. However, some MIBs report legacy nodes that are overwritten. To resolve, edit the MIB by removing legacy values manually before loading them with this profile generator. If a MIB is fully supported, it can be omitted from the filter as MIBs not found in a filter will be fully loaded. If a MIB is not fully supported, it can be listed with an empty node list, as CISCO-SYSLOG-MIB in the example. -a, --aliases is an option to provide the path to a YAML file containing a list of aliases to be used as metric tags for tables, in the following format: aliases : - from : MIB : ENTITY-MIB name : entPhysicalIndex to : MIB : ENTITY-MIB name : entPhysicalName MIBs tables most of the time define a column OID within the table, or from a different table and even different MIB, which value can be used to index entries. This is the INDEX field in row nodes. As an example, entPhysicalContainsTable in ENTITY-MIB entPhysicalContainsEntry OBJECT-TYPE SYNTAX EntPhysicalContainsEntry MAX-ACCESS not-accessible STATUS current DESCRIPTION \"A single container/'containee' relationship.\" INDEX { entPhysicalIndex, entPhysicalChildIndex } ::= { entPhysicalContainsTable 1 } or its json dump, where INDEX is replaced by indices \"entPhysicalContainsEntry\" : { \"name\" : \"entPhysicalContainsEntry\" , \"oid\" : \"1.3.6.1.2.1.47.1.3.3.1\" , \"nodetype\" : \"row\" , \"class\" : \"objecttype\" , \"maxaccess\" : \"not-accessible\" , \"indices\" : [ { \"module\" : \"ENTITY-MIB\" , \"object\" : \"entPhysicalIndex\" , \"implied\" : 0 }, { \"module\" : \"ENTITY-MIB\" , \"object\" : \"entPhysicalChildIndex\" , \"implied\" : 0 } ], \"status\" : \"current\" , \"description\" : \"A single container/'containee' relationship.\" }, Sometimes indexes are columns from another table, and we might want to use another column as it could have more human readable information - we might prefer to see the interface name vs its numerical table index. This can be achieved using metric_tag_aliases Return a list of SNMP metrics and copy its yaml dump to the clipboard Metric tags need to be added manually Usage: ddev meta snmp generate-profile-from-mibs [OPTIONS] [MIB_FILES]... Options: Name Type Description Default -f , --filters text Path to OIDs filter required -a , --aliases text Path to metric tag aliases required --debug , -d boolean Include debug output False --interactive , -i boolean Prompt to confirm before saving to a file False --help boolean Show this message and exit. False ddev meta snmp translate-profile \u00b6 Do OID translation in a SNMP profile. This isn't a plain replacement, as it doesn't preserve comments and indent, but it should automate most of the work. You'll need to install pysnmp and pysnmp-mibs manually beforehand. Usage: ddev meta snmp translate-profile [OPTIONS] PROFILE_PATH Options: Name Type Description Default --mib_source_url text Source url to fetch missing MIBS https://raw.githubusercontent.com/projx/snmp-mibs/master/@mib@ --help boolean Show this message and exit. False ddev meta snmp validate-mib-filenames \u00b6 Validate MIB file names. Frameworks used to load mib files expect MIB file names to match MIB name. Usage: ddev meta snmp validate-mib-filenames [OPTIONS] [MIB_FILES]... Options: Name Type Description Default --interactive , -i boolean Prompt to confirm before renaming all invalid MIB files False --help boolean Show this message and exit. False ddev release \u00b6 Manage the release of checks Usage: ddev release [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev release build \u00b6 Build a wheel for a check as it is on the repo HEAD Usage: ddev release build [OPTIONS] CHECK Options: Name Type Description Default --sdist , -s boolean N/A False --help boolean Show this message and exit. False ddev release changelog \u00b6 Perform the operations needed to update the changelog. This method is supposed to be used by other tasks and not directly. Usage: ddev release changelog [OPTIONS] CHECK VERSION [OLD_VERSION] Options: Name Type Description Default --initial boolean N/A False --organization , -r text N/A DataDog --quiet , -q boolean N/A False --dry-run , -n boolean N/A False --output-file , -o text N/A CHANGELOG.md --tag-prefix , -tp text N/A v --no-semver , -ns boolean N/A False --help boolean Show this message and exit. False ddev release make \u00b6 Perform a set of operations needed to release checks: update the version in __about__.py update the changelog update the requirements-agent-release.txt file update in-toto metadata commit the above changes You can release everything at once by setting the check to all . If you run into issues signing: Ensure you did gpg --import .gpg.pub Usage: ddev release make [OPTIONS] CHECKS... Options: Name Type Description Default --version text N/A required --new boolean Ensure versions are at 1.0.0 False --skip-sign boolean Skip the signing of release metadata False --sign-only boolean Only sign release metadata False --exclude text Comma-separated list of checks to skip required --allow-master boolean Allow ddev to commit directly to master. Forbidden for core. False --help boolean Show this message and exit. False ddev release show \u00b6 To avoid GitHub's public API rate limits, you need to set github.user / github.token in your config file or use the DD_GITHUB_USER / DD_GITHUB_TOKEN environment variables. Usage: ddev release show [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev release show changes \u00b6 Show all the pending PRs for a given check. Usage: ddev release show changes [OPTIONS] CHECK Options: Name Type Description Default --organization , -r text The Github organization the repository belongs to DataDog --tag-pattern text The regex pattern for the format of the tag. Required if the tag doesn't follow semver required --tag-prefix text Specify the prefix of the tag to use if the tag doesn't follow semver required --dry-run , -n boolean Run the command in dry-run mode False --since text The git ref to use instead of auto-detecting the tag to view changes since required --help boolean Show this message and exit. False ddev release show ready \u00b6 Show all the checks that can be released. Usage: ddev release show ready [OPTIONS] Options: Name Type Description Default --quiet , -q boolean N/A False --help boolean Show this message and exit. False ddev release stats \u00b6 A collection of tasks to generate reports about releases Usage: ddev release stats [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev release stats merged-prs \u00b6 Prints the PRs merged between the first RC and the current RC/final build Usage: ddev release stats merged-prs [OPTIONS] Options: Name Type Description Default --from-ref , -f text Reference to start stats on (first RC tagged) required --to-ref , -t text Reference to end stats at (current RC/final tag) required --release-milestone , -r text Github release milestone required --exclude-releases , -e boolean Flag to exclude the release PRs from the list False --export-csv text CSV file where the list will be exported required --help boolean Show this message and exit. False ddev release stats report \u00b6 Prints some release stats we want to track Usage: ddev release stats report [OPTIONS] Options: Name Type Description Default --from-ref , -f text Reference to start stats on (first RC tagged) required --to-ref , -t text Reference to end stats at (current RC/final tag) required --release-milestone , -r text Github release milestone required --help boolean Show this message and exit. False ddev release tag \u00b6 Tag the HEAD of the git repo with the current release number for a specific check. The tag is pushed to origin by default. You can tag everything at once by setting the check to all . Notice: specifying a different version than the one in __about__.py is a maintenance task that should be run under very specific circumstances (e.g. re-align an old release performed on the wrong commit). Usage: ddev release tag [OPTIONS] CHECK [VERSION] Options: Name Type Description Default --push / --no-push boolean N/A True --dry-run , -n boolean N/A False --help boolean Show this message and exit. False ddev release trello \u00b6 Subcommands for interacting with Trello Release boards. To use Trello: 1. Go to https://trello.com/app-key and copy your API key. 2. Run ddev config set trello.key and paste your API key. 3. Go to https://trello.com/1/authorize?key=key&name=name&scope=read,write&expiration=never&response_type=token , where key is your API key and name is the name to give your token, e.g. ReleaseTestingYourName. Authorize access and copy your token. 4. Run ddev config set trello.token and paste your token. Usage: ddev release trello [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev release trello status \u00b6 Print tabular status of Agent Release based on Trello columns. See trello subcommand for details on how to setup access: ddev release trello -h . Usage: ddev release trello status [OPTIONS] Options: Name Type Description Default --verbose , -v boolean Return the detailed results instead of the aggregates False --json , -j boolean Return as raw JSON instead False --clipboard , -c boolean Copy output to clipboard False --help boolean Show this message and exit. False ddev release trello testable \u00b6 Create a Trello card for changes since a previous release (referenced by BASE_REF ) that need to be tested for the next release (referenced by TARGET_REF ). BASE_REF and TARGET_REF can be any valid git references. It practice, you should use either: A tag: 7.16.1 , 7.17.0-rc.4 , ... A release branch: 6.16.x , 7.17.x , ... The master branch. NOTE: using a minor version shorthand (e.g. 7.16 ) is not supported, as it is ambiguous. Example: assuming we are working on the release of 7.17.0, we can... Create cards for changes between a previous Agent release and master (useful when preparing an initial RC): $ ddev release trello testable 7.16.1 origin/master Create cards for changes between a previous RC and master (useful when preparing a new RC, and a separate release branch was not created yet): $ ddev release trello testable 7.17.0-rc.2 origin/master Create cards for changes between a previous RC and a release branch (useful to only review changes in a release branch that has diverged from master ): $ ddev release trello testable 7.17.0-rc.4 7.17.x Create cards for changes between two arbitrary tags, e.g. between RCs: $ ddev release trello testable 7.17.0-rc.4 7.17.0-rc.5 TIP: run with ddev -x release trello testable to force the use of the current directory. To avoid GitHub's public API rate limits, you need to set github.user / github.token in your config file or use the DD_GITHUB_USER / DD_GITHUB_TOKEN environment variables. See trello subcommand for details on how to setup access: ddev release trello -h . Usage: ddev release trello testable [OPTIONS] BASE_REF TARGET_REF Options: Name Type Description Default --milestone text The PR milestone to filter by required --dry-run , -n boolean Only show the changes False --update-rc-builds-cards boolean Update cards in RC builds column with target_ref version False --move-cards boolean Do not create a card for a change, but move the existing card from HAVE BUGS - FIXME or FIXED - Ready to Rebuild to INBOX team False --help boolean Show this message and exit. False ddev release trello update-rc-links \u00b6 Update links to RCs in the QA board Trello cards Usage: ddev release trello update-rc-links [OPTIONS] TARGET_REF Options: Name Type Description Default --help boolean Show this message and exit. False ddev release upload \u00b6 Release a specific check to PyPI as it is on the repo HEAD. Usage: ddev release upload [OPTIONS] CHECK Options: Name Type Description Default --sdist , -s boolean N/A False --dry-run , -n boolean N/A False --help boolean Show this message and exit. False ddev run \u00b6 Run commands in the proper repo. Usage: ddev run [OPTIONS] [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev test \u00b6 Run tests for Agent-based checks. If no checks are specified, this will only test checks that were changed compared to the master branch. You can also select specific comma-separated environments to test like so: $ ddev test mysql:mysql57,maria10130 Usage: ddev test [OPTIONS] [CHECKS]... Options: Name Type Description Default --format-style , -fs boolean Run only the code style formatter False --style , -s boolean Run only style checks False --bench , -b boolean Run only benchmarks False --latest-metrics boolean Only verify support of new metrics False --e2e boolean Run only end-to-end tests False --ddtrace boolean Run tests using dd-trace-py False --cov , -c boolean Measure code coverage False --cov-missing , -cm boolean Show line numbers of statements that were not executed False --junit , -j boolean Generate junit reports False --marker , -m text Only run tests matching given marker expression required --filter , -k text Only run tests matching given substring expression required --pdb boolean Drop to PDB on first failure, then end test session False --debug , -d boolean Set the log level to debug False --verbose , -v integer range ( 0 and above) Increase verbosity (can be used additively) 0 --list , -l boolean List available test environments False --passenv text Additional environment variables to pass down required --changed boolean Only test changed checks False --cov-keep boolean Keep coverage reports False --skip-env boolean Skip environment creation and assume it is already running False --pytest-args , -pa text Additional arguments to pytest required --force-base-unpinned boolean Force using datadog-checks-base as specified by check dep False --force-base-min boolean Force using lowest viable release version of datadog-checks-base False --force-env-rebuild boolean Force creating a new env False --help boolean Show this message and exit. False ddev validate \u00b6 Verify certain aspects of the repo Usage: ddev validate [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate agent-reqs \u00b6 Verify that the checks versions are in sync with the requirements-agent-release.txt file. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate agent-reqs [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate all \u00b6 Run all CI validations for a repo. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate all [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate ci \u00b6 Validate CI infrastructure configuration. Usage: ddev validate ci [OPTIONS] Options: Name Type Description Default --fix boolean Attempt to fix errors False --help boolean Show this message and exit. False ddev validate codeowners \u00b6 Validate that every integration has an entry in the CODEOWNERS file. Usage: ddev validate codeowners [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate config \u00b6 Validate default configuration files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate config [OPTIONS] [CHECK] Options: Name Type Description Default --sync , -s boolean Generate example configuration files based on specifications False --verbose , -v boolean Verbose mode False --help boolean Show this message and exit. False ddev validate dashboards \u00b6 Validate all Dashboard definition files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate dashboards [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate dep \u00b6 This command will: Verify the uniqueness of dependency versions across all checks, or optionally a single check Verify all the dependencies are pinned. Verify the embedded Python environment defined in the base check and requirements listed in every integration are compatible. Verify each check specifies a CHECKS_BASE_REQ variable for datadog-checks-base requirement Optionally verify that the datadog-checks-base requirement is lower-bounded Optionally verify that the datadog-checks-base requirement satisfies specific version Usage: ddev validate dep [OPTIONS] [CHECK] Options: Name Type Description Default --require-base-check-version boolean Require specific version for datadog-checks-base requirement False --min-base-check-version text Specify minimum version for datadog-checks-base requirement, e.g. 11.0.0 required --help boolean Show this message and exit. False ddev validate eula \u00b6 Validate all EULA definition files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate eula [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate http \u00b6 Validate all integrations for usage of http wrapper. Usage: ddev validate http [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate imports \u00b6 Validate proper imports in checks. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate imports [OPTIONS] [CHECK] Options: Name Type Description Default --autofix boolean Apply suggested fix False --help boolean Show this message and exit. False ddev validate jmx-metrics \u00b6 Validate all default JMX metrics definitions. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate jmx-metrics [OPTIONS] [CHECK] Options: Name Type Description Default --verbose , -v boolean Verbose mode False --help boolean Show this message and exit. False ddev validate legacy-signature \u00b6 Validate that no integration uses the legacy signature. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate legacy-signature [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate licenses \u00b6 Validate third-party license list. Usage: ddev validate licenses [OPTIONS] Options: Name Type Description Default --sync , -s boolean Generate the LICENSE-3rdparty.csv file False --help boolean Show this message and exit. False ddev validate manifest \u00b6 Validate manifest.json files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate manifest [OPTIONS] [CHECK] Options: Name Type Description Default --fix boolean Attempt to fix errors False --help boolean Show this message and exit. False ddev validate metadata \u00b6 Validates metadata.csv files If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate metadata [OPTIONS] [CHECK] Options: Name Type Description Default --check-duplicates boolean Output warnings if there are duplicate short names and descriptions False --show-warnings , -w boolean Show warnings in addition to failures False --help boolean Show this message and exit. False ddev validate models \u00b6 Validate configuration data models. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate models [OPTIONS] [CHECK] Options: Name Type Description Default --sync , -s boolean Generate data models based on specifications False --verbose , -v boolean Verbose mode False --help boolean Show this message and exit. False ddev validate package \u00b6 Validate all setup.py files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate package [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate readmes \u00b6 Validates README files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate readmes [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate recommended-monitors \u00b6 Validate all recommended monitors definition files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate recommended-monitors [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate saved-views \u00b6 Validates saved view files If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate saved-views [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False ddev validate service-checks \u00b6 Validate all service_checks.json files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate service-checks [OPTIONS] [CHECK] Options: Name Type Description Default --sync boolean Generate example configuration files based on specifications False --help boolean Show this message and exit. False","title":"CLI"},{"location":"ddev/cli/#ddev","text":"Usage: ddev [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --core , -c boolean Work on integrations-core . False --extras , -e boolean Work on integrations-extras . False --agent , -a boolean Work on datadog-agent . False --marketplace , -m boolean Work on marketplace . False --here , -x boolean Work on the current location. False --color / --no-color boolean Whether or not to display colored output (default true). required --quiet , -q boolean Silence output False --debug , -d boolean Include debug output False --version boolean Show the version and exit. False --help boolean Show this message and exit. False","title":"ddev"},{"location":"ddev/cli/#ddev-agent","text":"A collection of tasks related to the Datadog Agent Usage: ddev agent [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"agent"},{"location":"ddev/cli/#ddev-agent-changelog","text":"Generates a markdown file containing the list of checks that changed for a given Agent release. Agent version numbers are derived inspecting tags on integrations-core so running this tool might provide unexpected results if the repo is not up to date with the Agent release process. If neither --since or --to are passed (the most common use case), the tool will generate the whole changelog since Agent version 6.3.0 (before that point we don't have enough information to build the log). Usage: ddev agent changelog [OPTIONS] Options: Name Type Description Default --since text Initial Agent version 6.3.0 --to text Final Agent version required --write , -w boolean Write to the changelog file, if omitted contents will be printed to stdout False --force , -f boolean Replace an existing file False --help boolean Show this message and exit. False","title":"changelog"},{"location":"ddev/cli/#ddev-agent-integrations","text":"Generates a markdown file containing the list of integrations shipped in a given Agent release. Agent version numbers are derived inspecting tags on integrations-core so running this tool might provide unexpected results if the repo is not up to date with the Agent release process. If neither --since or --to are passed (the most common use case), the tool will generate the list for every Agent since version 6.3.0 (before that point we don't have enough information to build the log). Usage: ddev agent integrations [OPTIONS] Options: Name Type Description Default --since text Initial Agent version 6.3.0 --to text Final Agent version required --write , -w boolean Write to file, if omitted contents will be printed to stdout False --force , -f boolean Replace an existing file False --help boolean Show this message and exit. False","title":"integrations"},{"location":"ddev/cli/#ddev-agent-integrations-changelog","text":"Update integration CHANGELOG.md by adding the Agent version. Agent version is only added to the integration versions released with a specific Agent release. Usage: ddev agent integrations-changelog [OPTIONS] [CHECKS]... Options: Name Type Description Default --since text Initial Agent version 6.3.0 --to text Final Agent version required --write , -w boolean Write to the changelog file, if omitted contents will be printed to stdout False --help boolean Show this message and exit. False","title":"integrations-changelog"},{"location":"ddev/cli/#ddev-agent-requirements","text":"Write the requirements-agent-release.txt file at the root of the repo listing all the Agent-based integrations pinned at the version they currently have in HEAD. Usage: ddev agent requirements [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"requirements"},{"location":"ddev/cli/#ddev-ci","text":"CI related utils. Anything here should be considered experimental. Usage: ddev ci [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"ci"},{"location":"ddev/cli/#ddev-ci-setup","text":"Run CI setup scripts Usage: ddev ci setup [OPTIONS] [CHECKS]... Options: Name Type Description Default --changed boolean Only target changed checks False --help boolean Show this message and exit. False","title":"setup"},{"location":"ddev/cli/#ddev-clean","text":"Remove build and test artifacts for the given CHECK. If CHECK is not specified, the current working directory is used. Usage: ddev clean [OPTIONS] [CHECK] Options: Name Type Description Default --compiled-only , -c boolean Remove compiled files only (*.pyc, *.pyd, *.pyo, *.whl, pycache ). False --all , -a boolean Disable the detection of a project's dedicated virtual env and/or editable installation. By default, these will not be considered. False --force , -f boolean If set and the command is run from the root directory, allow removing build and test artifacts (*.egg-info, .benchmarks, .cache, .coverage, .eggs, .pytest_cache, .tox, build, dist). False --verbose , -v boolean Shows removed paths. False --help boolean Show this message and exit. False","title":"clean"},{"location":"ddev/cli/#ddev-config","text":"Manage the config file Usage: ddev config [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"config"},{"location":"ddev/cli/#ddev-config-edit","text":"Edit the config file with your default EDITOR. Usage: ddev config edit [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"edit"},{"location":"ddev/cli/#ddev-config-explore","text":"Open the config location in your file manager. Usage: ddev config explore [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"explore"},{"location":"ddev/cli/#ddev-config-find","text":"Show the location of the config file. Usage: ddev config find [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"find"},{"location":"ddev/cli/#ddev-config-restore","text":"Restore the config file to default settings. Usage: ddev config restore [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"restore"},{"location":"ddev/cli/#ddev-config-set","text":"Assigns values to config file entries. If the value is omitted, you will be prompted, with the input hidden if it is sensitive. $ ddev config set github.user foo New setting: [github] user = \"foo\" You can also assign values on a per-org basis. $ ddev config set orgs..api_key New setting: [orgs.] api_key = \"***********\" Usage: ddev config set [OPTIONS] KEY [VALUE] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"set"},{"location":"ddev/cli/#ddev-config-show","text":"Show the contents of the config file. Usage: ddev config show [OPTIONS] Options: Name Type Description Default --all , -a boolean No not scrub secret fields False --help boolean Show this message and exit. False","title":"show"},{"location":"ddev/cli/#ddev-config-update","text":"Update the config file with any new fields. Usage: ddev config update [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"update"},{"location":"ddev/cli/#ddev-create","text":"Create scaffolding for a new integration. NAME: The display name of the integration that will appear in documentation. Usage: ddev create [OPTIONS] NAME Options: Name Type Description Default --type , -t choice ( check | jmx | logs | snmp_tile | tile ) The type of integration to create check --location , -l text The directory where files will be written required --non-interactive , -ni boolean Disable prompting for fields False --quiet , -q boolean Show less output False --dry-run , -n boolean Only show what would be created False --help boolean Show this message and exit. False","title":"create"},{"location":"ddev/cli/#ddev-dep","text":"Manage dependencies Usage: ddev dep [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"dep"},{"location":"ddev/cli/#ddev-dep-freeze","text":"Combine all dependencies for the Agent's static environment. Usage: ddev dep freeze [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"freeze"},{"location":"ddev/cli/#ddev-dep-pin","text":"Pin a dependency for all checks that require it. This can also resolve transient dependencies. Setting the version to none will remove the package. You can specify an unlimited number of additional checks to apply the pin for via arguments. Usage: ddev dep pin [OPTIONS] PACKAGE VERSION Options: Name Type Description Default --marker , -m text Environment marker to use required --help boolean Show this message and exit. False","title":"pin"},{"location":"ddev/cli/#ddev-docs","text":"Manage documentation Usage: ddev docs [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"docs"},{"location":"ddev/cli/#ddev-docs-build","text":"Build documentation. Usage: ddev docs build [OPTIONS] Options: Name Type Description Default --verbose , -v integer range ( 0 and above) Increase verbosity (can be used additively) 0 --pdf boolean Also export the site as PDF False --help boolean Show this message and exit. False","title":"build"},{"location":"ddev/cli/#ddev-docs-deploy","text":"Deploy built documentation. Usage: ddev docs deploy [OPTIONS] [BRANCH] Options: Name Type Description Default --yes , -y boolean N/A False --help boolean Show this message and exit. False","title":"deploy"},{"location":"ddev/cli/#ddev-docs-serve","text":"Serve and view documentation in a web browser. Usage: ddev docs serve [OPTIONS] Options: Name Type Description Default --no-open , -n boolean Do not open the documentation in a web browser False --verbose , -v integer range ( 0 and above) Increase verbosity (can be used additively) 0 --pdf boolean Also export the site as PDF False --dirty boolean Speed up reload time by only rebuilding edited pages (based on modified time). For development only. False --help boolean Show this message and exit. False","title":"serve"},{"location":"ddev/cli/#ddev-env","text":"Manage environments Usage: ddev env [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"env"},{"location":"ddev/cli/#ddev-env-check","text":"Run an Agent check. Usage: ddev env check [OPTIONS] CHECK [ENV] Options: Name Type Description Default --rate , -r boolean Compute rates by running the check twice with a pause between each run False --times , -t integer Number of times to run the check required --pause integer Number of milliseconds to pause between multiple check runs required --delay , -d integer Delay in milliseconds between running the check and grabbing what was collected required --log-level , -l text Set the log level (default off ) required --json boolean Format the aggregator and check runner output as JSON False --table boolean Format the aggregator and check runner output as tabular False --breakpoint , -b integer Line number to start a PDB session (0: first line, -1: last line) required --config text Path to a JSON check configuration to use required --jmx-list text JMX metrics listing method required --help boolean Show this message and exit. False","title":"check"},{"location":"ddev/cli/#ddev-env-edit","text":"Start an environment. Usage: ddev env edit [OPTIONS] CHECK ENV Options: Name Type Description Default --editor , -e text Editor to use required --help boolean Show this message and exit. False","title":"edit"},{"location":"ddev/cli/#ddev-env-ls","text":"List active or available environments. Usage: ddev env ls [OPTIONS] [CHECKS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"ls"},{"location":"ddev/cli/#ddev-env-prune","text":"Remove all configuration for environments. Usage: ddev env prune [OPTIONS] Options: Name Type Description Default --force , -f boolean N/A False --help boolean Show this message and exit. False","title":"prune"},{"location":"ddev/cli/#ddev-env-reload","text":"Restart an Agent to detect environment changes. Usage: ddev env reload [OPTIONS] CHECK [ENV] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"reload"},{"location":"ddev/cli/#ddev-env-shell","text":"Run a shell inside the Agent docker container. Usage: ddev env shell [OPTIONS] CHECK [ENV] Options: Name Type Description Default -c , --exec-command text Optionally execute command inside container, executes after any installs required -v , --install-vim boolean Optionally install editing/viewing tools vim and less False -i , --install-tools text Optionally install custom tools required --help boolean Show this message and exit. False","title":"shell"},{"location":"ddev/cli/#ddev-env-start","text":"Start an environment. Usage: ddev env start [OPTIONS] CHECK ENV Options: Name Type Description Default --agent , -a text The agent build to use e.g. a Docker image like datadog/agent:latest . You can also use the name of an agent defined in the agents configuration section. required --python , -py integer The version of Python to use. Defaults to 3 if no tox Python is specified. required --dev / --prod boolean Whether to use the latest version of a check or what is shipped False --base boolean Whether to use the latest version of the base check or what is shipped False --env-vars , -e text ENV Variable that should be passed to the Agent container. Ex: -e DD_URL=app.datadoghq.com -e DD_API_KEY=123456 required --org-name , -o text The org to use for data submission. required --profile-memory , -pm boolean Whether to collect metrics about memory usage False --dogstatsd boolean Enable dogstatsd port on agent False --help boolean Show this message and exit. False","title":"start"},{"location":"ddev/cli/#ddev-env-stop","text":"Stop environments, use \"all\" as check argument to stop everything. Usage: ddev env stop [OPTIONS] CHECK [ENV] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"stop"},{"location":"ddev/cli/#ddev-env-test","text":"Test an environment. Usage: ddev env test [OPTIONS] [CHECKS]... Options: Name Type Description Default --agent , -a text The agent build to use e.g. a Docker image like datadog/agent:latest . You can also use the name of an agent defined in the agents configuration section. required --python , -py integer The version of Python to use. Defaults to 3 if no tox Python is specified. required --dev / --prod boolean Whether to use the latest version of a check or what is shipped required --base boolean Whether to use the latest version of the base check or what is shipped False --env-vars , -e text ENV Variable that should be passed to the Agent container. Ex: -e DD_URL=app.datadoghq.com -e DD_API_KEY=123456 required --new-env , -ne boolean Execute setup and tear down actions False --profile-memory , -pm boolean Whether to collect metrics about memory usage False --junit , -j boolean Generate junit reports False --filter , -k text Only run tests matching given substring expression required --changed boolean Only test changed checks False --help boolean Show this message and exit. False","title":"test"},{"location":"ddev/cli/#ddev-meta","text":"Anything here should be considered experimental. This meta namespace can be used for an arbitrary number of niche or beta features without bloating the root namespace. Usage: ddev meta [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"meta"},{"location":"ddev/cli/#ddev-meta-catalog","text":"Create a catalog with information about integrations Usage: ddev meta catalog [OPTIONS] CHECKS... Options: Name Type Description Default -f , --file text Output to file (it will be overwritten), you can pass \"tmp\" to generate a temporary file required --markdown , -m boolean Output to markdown instead of CSV False --help boolean Show this message and exit. False","title":"catalog"},{"location":"ddev/cli/#ddev-meta-changes","text":"Show changes since a specific date. Usage: ddev meta changes [OPTIONS] SINCE Options: Name Type Description Default --out , -o boolean Output to file False --eager boolean Skip validation of commit subjects False --help boolean Show this message and exit. False","title":"changes"},{"location":"ddev/cli/#ddev-meta-create-example-commits","text":"Create branch commits from example repo Usage: ddev meta create-example-commits [OPTIONS] SOURCE_DIR Options: Name Type Description Default --prefix , -p text Optional text to prefix each commit `` --help boolean Show this message and exit. False","title":"create-example-commits"},{"location":"ddev/cli/#ddev-meta-dash","text":"Dashboard utilities Usage: ddev meta dash [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"dash"},{"location":"ddev/cli/#ddev-meta-dash-export","text":"Export a Dashboard as JSON Usage: ddev meta dash export [OPTIONS] URL INTEGRATION Options: Name Type Description Default --author , -a text The owner of this integration's dashboard. Default is 'Datadog' Datadog --help boolean Show this message and exit. False","title":"export"},{"location":"ddev/cli/#ddev-meta-jmx","text":"JMX utilities Usage: ddev meta jmx [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"jmx"},{"location":"ddev/cli/#ddev-meta-jmx-query-endpoint","text":"Query endpoint for JMX info Usage: ddev meta jmx query-endpoint [OPTIONS] HOST PORT [DOMAIN] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"query-endpoint"},{"location":"ddev/cli/#ddev-meta-prom","text":"Prometheus utilities Usage: ddev meta prom [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"prom"},{"location":"ddev/cli/#ddev-meta-prom-info","text":"Show metric info from a Prometheus endpoint. Example: $ ddev meta prom info :8080/_status/vars Usage: ddev meta prom info [OPTIONS] ENDPOINT Options: Name Type Description Default --help boolean Show this message and exit. False","title":"info"},{"location":"ddev/cli/#ddev-meta-prom-parse","text":"Interactively parse metric info from a Prometheus endpoint and write it to metadata.csv. Usage: ddev meta prom parse [OPTIONS] ENDPOINT CHECK Options: Name Type Description Default --here , -x boolean Output to the current location False --help boolean Show this message and exit. False","title":"parse"},{"location":"ddev/cli/#ddev-meta-scripts","text":"Miscellaneous scripts that may be useful Usage: ddev meta scripts [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"scripts"},{"location":"ddev/cli/#ddev-meta-scripts-email2ghuser","text":"Given an email, attempt to find a Github username associated with the email. $ ddev meta scripts email2ghuser example@datadoghq.com Usage: ddev meta scripts email2ghuser [OPTIONS] EMAIL Options: Name Type Description Default --help boolean Show this message and exit. False","title":"email2ghuser"},{"location":"ddev/cli/#ddev-meta-scripts-metrics2md","text":"Convert a check's metadata.csv file to a Markdown table, which will be copied to your clipboard. By default it will be compact and only contain the most useful fields. If you wish to use arbitrary metric data, you may set the check to cb to target the current contents of your clipboard. Usage: ddev meta scripts metrics2md [OPTIONS] CHECK [FIELDS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"metrics2md"},{"location":"ddev/cli/#ddev-meta-scripts-remove-labels","text":"Remove all labels from an issue or pull request. This is useful when there are too many labels and its state cannot be modified (known GitHub issue). $ ddev meta scripts remove-labels 5626 Usage: ddev meta scripts remove-labels [OPTIONS] ISSUE_NUMBER Options: Name Type Description Default --help boolean Show this message and exit. False","title":"remove-labels"},{"location":"ddev/cli/#ddev-meta-scripts-upgrade-python","text":"Upgrade the Python version of all test environments. $ ddev meta scripts upgrade-python 3.8 Usage: ddev meta scripts upgrade-python [OPTIONS] NEW_VERSION [OLD_VERSION] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"upgrade-python"},{"location":"ddev/cli/#ddev-meta-snmp","text":"SNMP utilities Usage: ddev meta snmp [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"snmp"},{"location":"ddev/cli/#ddev-meta-snmp-generate-profile-from-mibs","text":"Generate an SNMP profile from MIBs. Accepts a directory path containing mib files to be used as source to generate the profile, along with a filter if a device or family of devices support only a subset of oids from a mib. filters is the path to a yaml file containing a collection of MIBs, with their list of MIB node names to be included. For example: RFC1213-MIB : - system - interfaces - ip CISCO-SYSLOG-MIB : [] SNMP-FRAMEWORK-MIB : - snmpEngine Note that each MIB:node_name correspond to exactly one and only one OID. However, some MIBs report legacy nodes that are overwritten. To resolve, edit the MIB by removing legacy values manually before loading them with this profile generator. If a MIB is fully supported, it can be omitted from the filter as MIBs not found in a filter will be fully loaded. If a MIB is not fully supported, it can be listed with an empty node list, as CISCO-SYSLOG-MIB in the example. -a, --aliases is an option to provide the path to a YAML file containing a list of aliases to be used as metric tags for tables, in the following format: aliases : - from : MIB : ENTITY-MIB name : entPhysicalIndex to : MIB : ENTITY-MIB name : entPhysicalName MIBs tables most of the time define a column OID within the table, or from a different table and even different MIB, which value can be used to index entries. This is the INDEX field in row nodes. As an example, entPhysicalContainsTable in ENTITY-MIB entPhysicalContainsEntry OBJECT-TYPE SYNTAX EntPhysicalContainsEntry MAX-ACCESS not-accessible STATUS current DESCRIPTION \"A single container/'containee' relationship.\" INDEX { entPhysicalIndex, entPhysicalChildIndex } ::= { entPhysicalContainsTable 1 } or its json dump, where INDEX is replaced by indices \"entPhysicalContainsEntry\" : { \"name\" : \"entPhysicalContainsEntry\" , \"oid\" : \"1.3.6.1.2.1.47.1.3.3.1\" , \"nodetype\" : \"row\" , \"class\" : \"objecttype\" , \"maxaccess\" : \"not-accessible\" , \"indices\" : [ { \"module\" : \"ENTITY-MIB\" , \"object\" : \"entPhysicalIndex\" , \"implied\" : 0 }, { \"module\" : \"ENTITY-MIB\" , \"object\" : \"entPhysicalChildIndex\" , \"implied\" : 0 } ], \"status\" : \"current\" , \"description\" : \"A single container/'containee' relationship.\" }, Sometimes indexes are columns from another table, and we might want to use another column as it could have more human readable information - we might prefer to see the interface name vs its numerical table index. This can be achieved using metric_tag_aliases Return a list of SNMP metrics and copy its yaml dump to the clipboard Metric tags need to be added manually Usage: ddev meta snmp generate-profile-from-mibs [OPTIONS] [MIB_FILES]... Options: Name Type Description Default -f , --filters text Path to OIDs filter required -a , --aliases text Path to metric tag aliases required --debug , -d boolean Include debug output False --interactive , -i boolean Prompt to confirm before saving to a file False --help boolean Show this message and exit. False","title":"generate-profile-from-mibs"},{"location":"ddev/cli/#ddev-meta-snmp-translate-profile","text":"Do OID translation in a SNMP profile. This isn't a plain replacement, as it doesn't preserve comments and indent, but it should automate most of the work. You'll need to install pysnmp and pysnmp-mibs manually beforehand. Usage: ddev meta snmp translate-profile [OPTIONS] PROFILE_PATH Options: Name Type Description Default --mib_source_url text Source url to fetch missing MIBS https://raw.githubusercontent.com/projx/snmp-mibs/master/@mib@ --help boolean Show this message and exit. False","title":"translate-profile"},{"location":"ddev/cli/#ddev-meta-snmp-validate-mib-filenames","text":"Validate MIB file names. Frameworks used to load mib files expect MIB file names to match MIB name. Usage: ddev meta snmp validate-mib-filenames [OPTIONS] [MIB_FILES]... Options: Name Type Description Default --interactive , -i boolean Prompt to confirm before renaming all invalid MIB files False --help boolean Show this message and exit. False","title":"validate-mib-filenames"},{"location":"ddev/cli/#ddev-release","text":"Manage the release of checks Usage: ddev release [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"release"},{"location":"ddev/cli/#ddev-release-build","text":"Build a wheel for a check as it is on the repo HEAD Usage: ddev release build [OPTIONS] CHECK Options: Name Type Description Default --sdist , -s boolean N/A False --help boolean Show this message and exit. False","title":"build"},{"location":"ddev/cli/#ddev-release-changelog","text":"Perform the operations needed to update the changelog. This method is supposed to be used by other tasks and not directly. Usage: ddev release changelog [OPTIONS] CHECK VERSION [OLD_VERSION] Options: Name Type Description Default --initial boolean N/A False --organization , -r text N/A DataDog --quiet , -q boolean N/A False --dry-run , -n boolean N/A False --output-file , -o text N/A CHANGELOG.md --tag-prefix , -tp text N/A v --no-semver , -ns boolean N/A False --help boolean Show this message and exit. False","title":"changelog"},{"location":"ddev/cli/#ddev-release-make","text":"Perform a set of operations needed to release checks: update the version in __about__.py update the changelog update the requirements-agent-release.txt file update in-toto metadata commit the above changes You can release everything at once by setting the check to all . If you run into issues signing: Ensure you did gpg --import .gpg.pub Usage: ddev release make [OPTIONS] CHECKS... Options: Name Type Description Default --version text N/A required --new boolean Ensure versions are at 1.0.0 False --skip-sign boolean Skip the signing of release metadata False --sign-only boolean Only sign release metadata False --exclude text Comma-separated list of checks to skip required --allow-master boolean Allow ddev to commit directly to master. Forbidden for core. False --help boolean Show this message and exit. False","title":"make"},{"location":"ddev/cli/#ddev-release-show","text":"To avoid GitHub's public API rate limits, you need to set github.user / github.token in your config file or use the DD_GITHUB_USER / DD_GITHUB_TOKEN environment variables. Usage: ddev release show [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"show"},{"location":"ddev/cli/#ddev-release-show-changes","text":"Show all the pending PRs for a given check. Usage: ddev release show changes [OPTIONS] CHECK Options: Name Type Description Default --organization , -r text The Github organization the repository belongs to DataDog --tag-pattern text The regex pattern for the format of the tag. Required if the tag doesn't follow semver required --tag-prefix text Specify the prefix of the tag to use if the tag doesn't follow semver required --dry-run , -n boolean Run the command in dry-run mode False --since text The git ref to use instead of auto-detecting the tag to view changes since required --help boolean Show this message and exit. False","title":"changes"},{"location":"ddev/cli/#ddev-release-show-ready","text":"Show all the checks that can be released. Usage: ddev release show ready [OPTIONS] Options: Name Type Description Default --quiet , -q boolean N/A False --help boolean Show this message and exit. False","title":"ready"},{"location":"ddev/cli/#ddev-release-stats","text":"A collection of tasks to generate reports about releases Usage: ddev release stats [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"stats"},{"location":"ddev/cli/#ddev-release-stats-merged-prs","text":"Prints the PRs merged between the first RC and the current RC/final build Usage: ddev release stats merged-prs [OPTIONS] Options: Name Type Description Default --from-ref , -f text Reference to start stats on (first RC tagged) required --to-ref , -t text Reference to end stats at (current RC/final tag) required --release-milestone , -r text Github release milestone required --exclude-releases , -e boolean Flag to exclude the release PRs from the list False --export-csv text CSV file where the list will be exported required --help boolean Show this message and exit. False","title":"merged-prs"},{"location":"ddev/cli/#ddev-release-stats-report","text":"Prints some release stats we want to track Usage: ddev release stats report [OPTIONS] Options: Name Type Description Default --from-ref , -f text Reference to start stats on (first RC tagged) required --to-ref , -t text Reference to end stats at (current RC/final tag) required --release-milestone , -r text Github release milestone required --help boolean Show this message and exit. False","title":"report"},{"location":"ddev/cli/#ddev-release-tag","text":"Tag the HEAD of the git repo with the current release number for a specific check. The tag is pushed to origin by default. You can tag everything at once by setting the check to all . Notice: specifying a different version than the one in __about__.py is a maintenance task that should be run under very specific circumstances (e.g. re-align an old release performed on the wrong commit). Usage: ddev release tag [OPTIONS] CHECK [VERSION] Options: Name Type Description Default --push / --no-push boolean N/A True --dry-run , -n boolean N/A False --help boolean Show this message and exit. False","title":"tag"},{"location":"ddev/cli/#ddev-release-trello","text":"Subcommands for interacting with Trello Release boards. To use Trello: 1. Go to https://trello.com/app-key and copy your API key. 2. Run ddev config set trello.key and paste your API key. 3. Go to https://trello.com/1/authorize?key=key&name=name&scope=read,write&expiration=never&response_type=token , where key is your API key and name is the name to give your token, e.g. ReleaseTestingYourName. Authorize access and copy your token. 4. Run ddev config set trello.token and paste your token. Usage: ddev release trello [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"trello"},{"location":"ddev/cli/#ddev-release-trello-status","text":"Print tabular status of Agent Release based on Trello columns. See trello subcommand for details on how to setup access: ddev release trello -h . Usage: ddev release trello status [OPTIONS] Options: Name Type Description Default --verbose , -v boolean Return the detailed results instead of the aggregates False --json , -j boolean Return as raw JSON instead False --clipboard , -c boolean Copy output to clipboard False --help boolean Show this message and exit. False","title":"status"},{"location":"ddev/cli/#ddev-release-trello-testable","text":"Create a Trello card for changes since a previous release (referenced by BASE_REF ) that need to be tested for the next release (referenced by TARGET_REF ). BASE_REF and TARGET_REF can be any valid git references. It practice, you should use either: A tag: 7.16.1 , 7.17.0-rc.4 , ... A release branch: 6.16.x , 7.17.x , ... The master branch. NOTE: using a minor version shorthand (e.g. 7.16 ) is not supported, as it is ambiguous. Example: assuming we are working on the release of 7.17.0, we can... Create cards for changes between a previous Agent release and master (useful when preparing an initial RC): $ ddev release trello testable 7.16.1 origin/master Create cards for changes between a previous RC and master (useful when preparing a new RC, and a separate release branch was not created yet): $ ddev release trello testable 7.17.0-rc.2 origin/master Create cards for changes between a previous RC and a release branch (useful to only review changes in a release branch that has diverged from master ): $ ddev release trello testable 7.17.0-rc.4 7.17.x Create cards for changes between two arbitrary tags, e.g. between RCs: $ ddev release trello testable 7.17.0-rc.4 7.17.0-rc.5 TIP: run with ddev -x release trello testable to force the use of the current directory. To avoid GitHub's public API rate limits, you need to set github.user / github.token in your config file or use the DD_GITHUB_USER / DD_GITHUB_TOKEN environment variables. See trello subcommand for details on how to setup access: ddev release trello -h . Usage: ddev release trello testable [OPTIONS] BASE_REF TARGET_REF Options: Name Type Description Default --milestone text The PR milestone to filter by required --dry-run , -n boolean Only show the changes False --update-rc-builds-cards boolean Update cards in RC builds column with target_ref version False --move-cards boolean Do not create a card for a change, but move the existing card from HAVE BUGS - FIXME or FIXED - Ready to Rebuild to INBOX team False --help boolean Show this message and exit. False","title":"testable"},{"location":"ddev/cli/#ddev-release-trello-update-rc-links","text":"Update links to RCs in the QA board Trello cards Usage: ddev release trello update-rc-links [OPTIONS] TARGET_REF Options: Name Type Description Default --help boolean Show this message and exit. False","title":"update-rc-links"},{"location":"ddev/cli/#ddev-release-upload","text":"Release a specific check to PyPI as it is on the repo HEAD. Usage: ddev release upload [OPTIONS] CHECK Options: Name Type Description Default --sdist , -s boolean N/A False --dry-run , -n boolean N/A False --help boolean Show this message and exit. False","title":"upload"},{"location":"ddev/cli/#ddev-run","text":"Run commands in the proper repo. Usage: ddev run [OPTIONS] [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"run"},{"location":"ddev/cli/#ddev-test","text":"Run tests for Agent-based checks. If no checks are specified, this will only test checks that were changed compared to the master branch. You can also select specific comma-separated environments to test like so: $ ddev test mysql:mysql57,maria10130 Usage: ddev test [OPTIONS] [CHECKS]... Options: Name Type Description Default --format-style , -fs boolean Run only the code style formatter False --style , -s boolean Run only style checks False --bench , -b boolean Run only benchmarks False --latest-metrics boolean Only verify support of new metrics False --e2e boolean Run only end-to-end tests False --ddtrace boolean Run tests using dd-trace-py False --cov , -c boolean Measure code coverage False --cov-missing , -cm boolean Show line numbers of statements that were not executed False --junit , -j boolean Generate junit reports False --marker , -m text Only run tests matching given marker expression required --filter , -k text Only run tests matching given substring expression required --pdb boolean Drop to PDB on first failure, then end test session False --debug , -d boolean Set the log level to debug False --verbose , -v integer range ( 0 and above) Increase verbosity (can be used additively) 0 --list , -l boolean List available test environments False --passenv text Additional environment variables to pass down required --changed boolean Only test changed checks False --cov-keep boolean Keep coverage reports False --skip-env boolean Skip environment creation and assume it is already running False --pytest-args , -pa text Additional arguments to pytest required --force-base-unpinned boolean Force using datadog-checks-base as specified by check dep False --force-base-min boolean Force using lowest viable release version of datadog-checks-base False --force-env-rebuild boolean Force creating a new env False --help boolean Show this message and exit. False","title":"test"},{"location":"ddev/cli/#ddev-validate","text":"Verify certain aspects of the repo Usage: ddev validate [OPTIONS] COMMAND [ARGS]... Options: Name Type Description Default --help boolean Show this message and exit. False","title":"validate"},{"location":"ddev/cli/#ddev-validate-agent-reqs","text":"Verify that the checks versions are in sync with the requirements-agent-release.txt file. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate agent-reqs [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"agent-reqs"},{"location":"ddev/cli/#ddev-validate-all","text":"Run all CI validations for a repo. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate all [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"all"},{"location":"ddev/cli/#ddev-validate-ci","text":"Validate CI infrastructure configuration. Usage: ddev validate ci [OPTIONS] Options: Name Type Description Default --fix boolean Attempt to fix errors False --help boolean Show this message and exit. False","title":"ci"},{"location":"ddev/cli/#ddev-validate-codeowners","text":"Validate that every integration has an entry in the CODEOWNERS file. Usage: ddev validate codeowners [OPTIONS] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"codeowners"},{"location":"ddev/cli/#ddev-validate-config","text":"Validate default configuration files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate config [OPTIONS] [CHECK] Options: Name Type Description Default --sync , -s boolean Generate example configuration files based on specifications False --verbose , -v boolean Verbose mode False --help boolean Show this message and exit. False","title":"config"},{"location":"ddev/cli/#ddev-validate-dashboards","text":"Validate all Dashboard definition files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate dashboards [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"dashboards"},{"location":"ddev/cli/#ddev-validate-dep","text":"This command will: Verify the uniqueness of dependency versions across all checks, or optionally a single check Verify all the dependencies are pinned. Verify the embedded Python environment defined in the base check and requirements listed in every integration are compatible. Verify each check specifies a CHECKS_BASE_REQ variable for datadog-checks-base requirement Optionally verify that the datadog-checks-base requirement is lower-bounded Optionally verify that the datadog-checks-base requirement satisfies specific version Usage: ddev validate dep [OPTIONS] [CHECK] Options: Name Type Description Default --require-base-check-version boolean Require specific version for datadog-checks-base requirement False --min-base-check-version text Specify minimum version for datadog-checks-base requirement, e.g. 11.0.0 required --help boolean Show this message and exit. False","title":"dep"},{"location":"ddev/cli/#ddev-validate-eula","text":"Validate all EULA definition files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate eula [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"eula"},{"location":"ddev/cli/#ddev-validate-http","text":"Validate all integrations for usage of http wrapper. Usage: ddev validate http [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"http"},{"location":"ddev/cli/#ddev-validate-imports","text":"Validate proper imports in checks. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate imports [OPTIONS] [CHECK] Options: Name Type Description Default --autofix boolean Apply suggested fix False --help boolean Show this message and exit. False","title":"imports"},{"location":"ddev/cli/#ddev-validate-jmx-metrics","text":"Validate all default JMX metrics definitions. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate jmx-metrics [OPTIONS] [CHECK] Options: Name Type Description Default --verbose , -v boolean Verbose mode False --help boolean Show this message and exit. False","title":"jmx-metrics"},{"location":"ddev/cli/#ddev-validate-legacy-signature","text":"Validate that no integration uses the legacy signature. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate legacy-signature [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"legacy-signature"},{"location":"ddev/cli/#ddev-validate-licenses","text":"Validate third-party license list. Usage: ddev validate licenses [OPTIONS] Options: Name Type Description Default --sync , -s boolean Generate the LICENSE-3rdparty.csv file False --help boolean Show this message and exit. False","title":"licenses"},{"location":"ddev/cli/#ddev-validate-manifest","text":"Validate manifest.json files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate manifest [OPTIONS] [CHECK] Options: Name Type Description Default --fix boolean Attempt to fix errors False --help boolean Show this message and exit. False","title":"manifest"},{"location":"ddev/cli/#ddev-validate-metadata","text":"Validates metadata.csv files If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate metadata [OPTIONS] [CHECK] Options: Name Type Description Default --check-duplicates boolean Output warnings if there are duplicate short names and descriptions False --show-warnings , -w boolean Show warnings in addition to failures False --help boolean Show this message and exit. False","title":"metadata"},{"location":"ddev/cli/#ddev-validate-models","text":"Validate configuration data models. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate models [OPTIONS] [CHECK] Options: Name Type Description Default --sync , -s boolean Generate data models based on specifications False --verbose , -v boolean Verbose mode False --help boolean Show this message and exit. False","title":"models"},{"location":"ddev/cli/#ddev-validate-package","text":"Validate all setup.py files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate package [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"package"},{"location":"ddev/cli/#ddev-validate-readmes","text":"Validates README files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate readmes [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"readmes"},{"location":"ddev/cli/#ddev-validate-recommended-monitors","text":"Validate all recommended monitors definition files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate recommended-monitors [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"recommended-monitors"},{"location":"ddev/cli/#ddev-validate-saved-views","text":"Validates saved view files If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate saved-views [OPTIONS] [CHECK] Options: Name Type Description Default --help boolean Show this message and exit. False","title":"saved-views"},{"location":"ddev/cli/#ddev-validate-service-checks","text":"Validate all service_checks.json files. If check is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty check value will validate all README files. Usage: ddev validate service-checks [OPTIONS] [CHECK] Options: Name Type Description Default --sync boolean Generate example configuration files based on specifications False --help boolean Show this message and exit. False","title":"service-checks"},{"location":"ddev/configuration/","text":"Configuration \u00b6 All configuration can be managed entirely by the ddev config command group. To locate the TOML config file, run: ddev config find Repository \u00b6 All CLI commands are aware of the current repository context, defined by the option repo . This option should be a reference to a key in repos which is set to the path of a supported repository. For example, this configuration: repo = \"core\" [repos] core = \"/path/to/integrations-core\" extras = \"/path/to/integrations-extras\" agent = \"/path/to/datadog-agent\" would make it so running e.g. ddev test nginx will look for an integration named nginx in /path/to/integrations-core no matter what directory you are in. If the selected path does not exist, then the current directory will be used. By default, repo is set to core . Agent \u00b6 For running environments with a live Agent , you can select a specific build version to use with the option agent . This option should be a reference to a key in agents which is a mapping of environment types to Agent versions. For example, this configuration: agent = \"master\" [agents.master] docker = \"datadog/agent-dev:master\" local = \"latest\" [agents.\"7.18.1\"] docker = \"datadog/agent:7.18.1\" local = \"7.18.1\" would make it so environments that define the type as docker will use the Docker image that was built with the latest commit to the datadog-agent repo. Organization \u00b6 You can switch to using a particular organization with the option org . This option should be a reference to a key in orgs which is a mapping containing data specific to the organization. For example, this configuration: org = \"staging\" [orgs.staging] api_key = \"\" app_key = \"\" site = \"datadoghq.eu\" would use the access keys for the organization named staging and would submit data to the EU region. The supported fields are: api_key app_key site dd_url log_url GitHub \u00b6 To avoid GitHub's public API rate limits, you need to set github.user / github.token in your config file or use the DD_GITHUB_USER / DD_GITHUB_TOKEN environment variables. Run ddev config show to see if your GitHub user and token is set. If not: Run ddev config set github.user Create a personal access token with public_repo and read:org permissions Run ddev config set github.token then paste the token Enable single sign-on for the token Trello \u00b6 To participate as an Agent release manager , you need to set trello.key / trello.token in your config file. Run ddev config show to see if your Trello key and token is set. If not: Go to https://trello.com/app-key and copy your API key Run ddev config set trello.key then paste your API key Go to https://trello.com/1/authorize?key=&name=&scope=read,write&expiration=never&response_type=token , where is your API key and is the name to give your token, e.g. ReleaseTestingYourName . Authorize access and copy your token. Run ddev config set trello.token and paste your token Card Assignment \u00b6 When automatically assigning QA cards , the Trello users which are members of the Agent Release Sprint Trello board will be fetched and cards will be assigned at random to them. Make sure people in your team are all members of the Agent Release Sprint board.","title":"Configuration"},{"location":"ddev/configuration/#configuration","text":"All configuration can be managed entirely by the ddev config command group. To locate the TOML config file, run: ddev config find","title":"Configuration"},{"location":"ddev/configuration/#repository","text":"All CLI commands are aware of the current repository context, defined by the option repo . This option should be a reference to a key in repos which is set to the path of a supported repository. For example, this configuration: repo = \"core\" [repos] core = \"/path/to/integrations-core\" extras = \"/path/to/integrations-extras\" agent = \"/path/to/datadog-agent\" would make it so running e.g. ddev test nginx will look for an integration named nginx in /path/to/integrations-core no matter what directory you are in. If the selected path does not exist, then the current directory will be used. By default, repo is set to core .","title":"Repository"},{"location":"ddev/configuration/#agent","text":"For running environments with a live Agent , you can select a specific build version to use with the option agent . This option should be a reference to a key in agents which is a mapping of environment types to Agent versions. For example, this configuration: agent = \"master\" [agents.master] docker = \"datadog/agent-dev:master\" local = \"latest\" [agents.\"7.18.1\"] docker = \"datadog/agent:7.18.1\" local = \"7.18.1\" would make it so environments that define the type as docker will use the Docker image that was built with the latest commit to the datadog-agent repo.","title":"Agent"},{"location":"ddev/configuration/#organization","text":"You can switch to using a particular organization with the option org . This option should be a reference to a key in orgs which is a mapping containing data specific to the organization. For example, this configuration: org = \"staging\" [orgs.staging] api_key = \"\" app_key = \"\" site = \"datadoghq.eu\" would use the access keys for the organization named staging and would submit data to the EU region. The supported fields are: api_key app_key site dd_url log_url","title":"Organization"},{"location":"ddev/configuration/#github","text":"To avoid GitHub's public API rate limits, you need to set github.user / github.token in your config file or use the DD_GITHUB_USER / DD_GITHUB_TOKEN environment variables. Run ddev config show to see if your GitHub user and token is set. If not: Run ddev config set github.user Create a personal access token with public_repo and read:org permissions Run ddev config set github.token then paste the token Enable single sign-on for the token","title":"GitHub"},{"location":"ddev/configuration/#trello","text":"To participate as an Agent release manager , you need to set trello.key / trello.token in your config file. Run ddev config show to see if your Trello key and token is set. If not: Go to https://trello.com/app-key and copy your API key Run ddev config set trello.key then paste your API key Go to https://trello.com/1/authorize?key=&name=&scope=read,write&expiration=never&response_type=token , where is your API key and is the name to give your token, e.g. ReleaseTestingYourName . Authorize access and copy your token. Run ddev config set trello.token and paste your token","title":"Trello"},{"location":"ddev/configuration/#card-assignment","text":"When automatically assigning QA cards , the Trello users which are members of the Agent Release Sprint Trello board will be fetched and cards will be assigned at random to them. Make sure people in your team are all members of the Agent Release Sprint board.","title":"Card Assignment"},{"location":"ddev/plugins/","text":"Plugins \u00b6 tox \u00b6 Our tox plugin dynamically adds environments based on the presence of options defined in the [testenv] section of each integration's tox.ini file. Style \u00b6 Setting dd_check_style to true will enable 2 environments for enforcing our style conventions : style - This will check the formatting and will error if any issues are found. You may use the -s/--style flag of ddev test to execute only this environment. format_style - This will format the code for you, resolving the most common issues caught by style environment. You can run the formatter by using the -fs/--format-style flag of ddev test . pytest \u00b6 Our pytest plugin makes a few fixtures available globally for use during tests. Also, it's responsible for managing the control flow of E2E environments. Fixtures \u00b6 Agent stubs \u00b6 The stubs provided by each fixture will automatically have their state reset before each test. aggregator datadog_agent Check execution \u00b6 Most tests will execute checks via the run method of the AgentCheck interface (if the check is stateful ). A consequence of this is that, unlike the check method, exceptions are not propagated to the caller meaning not only can an exception not be asserted, but also errors are silently ignored. The dd_run_check fixture takes a check instance and executes it while also propagating any exceptions like normal. def test_metrics ( aggregator , dd_run_check ): check = AwesomeCheck ( 'awesome' , {}, [{ 'port' : 8080 }]) dd_run_check ( check ) ... You can use the extract_message option to condense any exception message to just the original message rather than the full traceback. def test_config ( dd_run_check ): check = AwesomeCheck ( 'awesome' , {}, [{ 'port' : 'foo' }]) with pytest . raises ( Exception , match = '^Option `port` must be an integer$' ): dd_run_check ( check , extract_message = True ) E2E \u00b6 Agent check runner \u00b6 The dd_agent_check fixture will run the integration with a given configuration on a live Agent and return a populated aggregator . It accepts a single dict configuration representing either: a single instance a full configuration with top level keys instances , init_config , etc. Internally, this is a wrapper around ddev env check and you can pass through any supported options or flags. This fixture can only be used from tests marked as e2e . For example: @pytest . mark . e2e def test_e2e_metrics ( dd_agent_check , instance ): aggregator = dd_agent_check ( instance , rate = True ) ... State \u00b6 Occasionally, you will need to persist some data only known at the time of environment creation (like a generated token) through the test and environment tear down phases. To do so, use the following fixtures: dd_save_state - When executing the necessary steps to spin up an environment you may use this to save any object that can be serialized to JSON. For example: dd_save_state ( 'my_data' , { 'foo' : 'bar' }) dd_get_state - This may be used to retrieve the data: my_data = dd_get_state ( 'my_data' , default = {}) Environment manager \u00b6 The fixture dd_environment_runner manages communication between environments and the ddev env command group. You will never use it directly as it runs automatically. It acts upon a fixture named dd_environment that every integration's test suite will define if E2E testing on a live Agent is desired. This fixture is responsible for starting and stopping environments and must adhere to the following requirements: It yield s a single dict representing the default configuration the Agent will use. It must be either: a single instance a full configuration with top level keys instances , init_config , etc. Additionally, you can pass a second dict containing metadata . The setup logic must occur before the yield and the tear down logic must occur after it. Also, both steps must only execute based on the value of environment variables. Setup - only if DDEV_E2E_UP is not set to false Tear down - only if DDEV_E2E_DOWN is not set to false Note The provided Docker and Terraform environment runner utilities will do this automatically for you. Metadata \u00b6 env_type - This is the type of interface that will be used to interact with the Agent. Currently, we support docker (default) and local . env_vars - A dict of environment variables and their values that will be present when starting the Agent. docker_volumes - A list of str representing Docker volume mounts if env_type is docker e.g. /local/path:/agent/container/path:ro . docker_platform - The container architecture to use if env_type is docker . Currently, we support linux (default) and windows . logs_config - A list of configs that will be used by the Logs Agent. You will never need to use this directly, but rather via higher level abstractions .","title":"Plugins"},{"location":"ddev/plugins/#plugins","text":"","title":"Plugins"},{"location":"ddev/plugins/#tox","text":"Our tox plugin dynamically adds environments based on the presence of options defined in the [testenv] section of each integration's tox.ini file.","title":"tox"},{"location":"ddev/plugins/#style","text":"Setting dd_check_style to true will enable 2 environments for enforcing our style conventions : style - This will check the formatting and will error if any issues are found. You may use the -s/--style flag of ddev test to execute only this environment. format_style - This will format the code for you, resolving the most common issues caught by style environment. You can run the formatter by using the -fs/--format-style flag of ddev test .","title":"Style"},{"location":"ddev/plugins/#pytest","text":"Our pytest plugin makes a few fixtures available globally for use during tests. Also, it's responsible for managing the control flow of E2E environments.","title":"pytest"},{"location":"ddev/plugins/#fixtures","text":"","title":"Fixtures"},{"location":"ddev/plugins/#agent-stubs","text":"The stubs provided by each fixture will automatically have their state reset before each test. aggregator datadog_agent","title":"Agent stubs"},{"location":"ddev/plugins/#check-execution","text":"Most tests will execute checks via the run method of the AgentCheck interface (if the check is stateful ). A consequence of this is that, unlike the check method, exceptions are not propagated to the caller meaning not only can an exception not be asserted, but also errors are silently ignored. The dd_run_check fixture takes a check instance and executes it while also propagating any exceptions like normal. def test_metrics ( aggregator , dd_run_check ): check = AwesomeCheck ( 'awesome' , {}, [{ 'port' : 8080 }]) dd_run_check ( check ) ... You can use the extract_message option to condense any exception message to just the original message rather than the full traceback. def test_config ( dd_run_check ): check = AwesomeCheck ( 'awesome' , {}, [{ 'port' : 'foo' }]) with pytest . raises ( Exception , match = '^Option `port` must be an integer$' ): dd_run_check ( check , extract_message = True )","title":"Check execution"},{"location":"ddev/plugins/#e2e","text":"","title":"E2E"},{"location":"ddev/plugins/#agent-check-runner","text":"The dd_agent_check fixture will run the integration with a given configuration on a live Agent and return a populated aggregator . It accepts a single dict configuration representing either: a single instance a full configuration with top level keys instances , init_config , etc. Internally, this is a wrapper around ddev env check and you can pass through any supported options or flags. This fixture can only be used from tests marked as e2e . For example: @pytest . mark . e2e def test_e2e_metrics ( dd_agent_check , instance ): aggregator = dd_agent_check ( instance , rate = True ) ...","title":"Agent check runner"},{"location":"ddev/plugins/#state","text":"Occasionally, you will need to persist some data only known at the time of environment creation (like a generated token) through the test and environment tear down phases. To do so, use the following fixtures: dd_save_state - When executing the necessary steps to spin up an environment you may use this to save any object that can be serialized to JSON. For example: dd_save_state ( 'my_data' , { 'foo' : 'bar' }) dd_get_state - This may be used to retrieve the data: my_data = dd_get_state ( 'my_data' , default = {})","title":"State"},{"location":"ddev/plugins/#environment-manager","text":"The fixture dd_environment_runner manages communication between environments and the ddev env command group. You will never use it directly as it runs automatically. It acts upon a fixture named dd_environment that every integration's test suite will define if E2E testing on a live Agent is desired. This fixture is responsible for starting and stopping environments and must adhere to the following requirements: It yield s a single dict representing the default configuration the Agent will use. It must be either: a single instance a full configuration with top level keys instances , init_config , etc. Additionally, you can pass a second dict containing metadata . The setup logic must occur before the yield and the tear down logic must occur after it. Also, both steps must only execute based on the value of environment variables. Setup - only if DDEV_E2E_UP is not set to false Tear down - only if DDEV_E2E_DOWN is not set to false Note The provided Docker and Terraform environment runner utilities will do this automatically for you.","title":"Environment manager"},{"location":"ddev/plugins/#metadata","text":"env_type - This is the type of interface that will be used to interact with the Agent. Currently, we support docker (default) and local . env_vars - A dict of environment variables and their values that will be present when starting the Agent. docker_volumes - A list of str representing Docker volume mounts if env_type is docker e.g. /local/path:/agent/container/path:ro . docker_platform - The container architecture to use if env_type is docker . Currently, we support linux (default) and windows . logs_config - A list of configs that will be used by the Logs Agent. You will never need to use this directly, but rather via higher level abstractions .","title":"Metadata"},{"location":"ddev/test/","text":"Test framework \u00b6 Environments \u00b6 Most integrations monitor services like databases or web servers, rather than system properties like CPU usage. For such cases, you'll want to spin up an environment and gracefully tear it down when tests finish. We define all environment actions in a fixture called dd_environment that looks semantically like this: @pytest . fixture ( scope = 'session' ) def dd_environment (): try : set_up_env () yield some_default_config finally : tear_down_env () This is not only used for regular tests, but is also the basis of our E2E testing . The start command executes everything before the yield and the stop command executes everything after it. We provide a few utilities for common environment types. Docker \u00b6 The docker_run utility makes it easy to create services using docker-compose . from datadog_checks.dev import docker_run @pytest . fixture ( scope = 'session' ) def dd_environment (): with docker_run ( os . path . join ( HERE , 'docker' , 'compose.yaml' )): yield ... Read the reference for more information. Terraform \u00b6 The terraform_run utility makes it easy to create services from a directory of Terraform files. from datadog_checks.dev.terraform import terraform_run @pytest . fixture ( scope = 'session' ) def dd_environment (): with terraform_run ( os . path . join ( HERE , 'terraform' )): yield ... Currently, we only use this for services that would be too complex to setup with Docker (like OpenStack) or things that cannot be provided by Docker (like vSphere). We provide some ready-to-use cloud templates that are available for referencing by default. We prefer using GCP when possible. Terraform E2E tests are not run in our public CI as that would needlessly slow down builds. Read the reference for more information. Mocker \u00b6 The mocker fixture is provided by the pytest-mock plugin. This fixture automatically restores anything that was mocked at the end of each test and is more ergonomic to use than stacking decorators or nesting context managers. Here's an example from their docs: def test_foo ( mocker ): # all valid calls mocker . patch ( 'os.remove' ) mocker . patch . object ( os , 'listdir' , autospec = True ) mocked_isfile = mocker . patch ( 'os.path.isfile' ) It also has many other nice features, like using pytest introspection when comparing calls. Benchmarks \u00b6 The benchmark fixture is provided by the pytest-benchmark plugin. It enables the profiling of functions with the low-overhead cProfile module. It is quite useful for seeing the approximate time a given check takes to run, as well as gaining insight into any potential performance bottlenecks. You would use it like this: def test_large_payload ( benchmark , dd_run_check ): check = AwesomeCheck ( 'awesome' , {}, [ instance ]) # Run once to get any initialization out of the way. dd_run_check ( check ) benchmark ( dd_run_check , check ) To add benchmarks, define environments in tox.ini with bench somewhere in their names: [tox] ... envlist = ... bench ... [testenv:bench] By default, the test command skips all benchmark environments. To run only benchmark environments use the --bench / -b flag. The results are sorted by tottime , which is the total time spent in the given function (and excluding time made in calls to sub-functions). Logs \u00b6 We provide an easy way to utilize log collection with E2E Docker environments . Pass mount_logs=True to docker_run . This will use the logs example in the integration's config spec . For example, the following defines 2 example log files: - template : logs example : - type : file path : /var/log/apache2/access.log source : apache service : apache - type : file path : /var/log/apache2/error.log source : apache service : apache Alternatives If mount_logs is a sequence of int , only the selected indices (starting at 1) will be used. So, using the Apache example above, to only monitor the error log you would set it to [2] . In lieu of a config spec, for whatever reason, you may set mount_logs to a dict containing the standard logs key. All requested log files are available to reference as environment variables for any Docker calls as DD_LOG_ where the indices start at 1. volumes : - ${DD_LOG_1}:/usr/local/apache2/logs/access_log - ${DD_LOG_2}:/usr/local/apache2/logs/error_log When starting the environment, pass -e DD_LOGS_ENABLED=true to activate the Logs Agent. To send logs to a custom url, pass -e DD_LOGS_CONFIG_LOGS_DD_URL=[CUSTOM_URL]:[CUSTOM_PORT] when starting the environment Reference \u00b6 datadog_checks.dev.docker \u00b6 compose_file_active ( compose_file ) \u00b6 Returns a bool indicating whether or not a compose file has any active services. Source code in datadog_checks/dev/docker.py def compose_file_active ( compose_file ): \"\"\" Returns a `bool` indicating whether or not a compose file has any active services. \"\"\" command = [ 'docker-compose' , '-f' , compose_file , 'ps' ] lines = run_command ( command , capture = 'out' , check = True ) . stdout . splitlines () for i , line in enumerate ( lines , 1 ): if set ( line . strip ()) == { '-' }: return len ( lines [ i :]) >= 1 return False docker_run ( compose_file = None , build = False , service_name = None , up = None , down = None , on_error = None , sleep = None , endpoints = None , log_patterns = None , mount_logs = False , conditions = None , env_vars = None , wrappers = None , attempts = None , attempts_wait = 1 ) \u00b6 A convenient context manager for safely setting up and tearing down Docker environments. compose_file ( str ) - A path to a Docker compose file. A custom tear down is not required when using this. build ( bool ) - Whether or not to build images for when compose_file is provided service_name ( str ) - Optional name for when compose_file is provided up ( callable ) - A custom setup callable down ( callable ) - A custom tear down callable. This is required when using a custom setup. on_error ( callable ) - A callable called in case of an unhandled exception sleep ( float ) - Number of seconds to wait before yielding. This occurs after all conditions are successful. endpoints ( List[str] ) - Endpoints to verify access for before yielding. Shorthand for adding CheckEndpoints(endpoints) to the conditions argument. log_patterns ( List[str|re.Pattern] ) - Regular expression patterns to find in Docker logs before yielding. This is only available when compose_file is provided. Shorthand for adding CheckDockerLogs(compose_file, log_patterns) to the conditions argument. mount_logs ( bool ) - Whether or not to mount log files in Agent containers based on example logs configuration conditions ( callable ) - A list of callable objects that will be executed before yielding to check for errors env_vars ( dict ) - A dictionary to update os.environ with during execution wrappers ( List[callable] ) - A list of context managers to use during execution attempts ( int ) - Number of attempts to run up successfully attempts_wait ( int ) - Time to wait between attempts Source code in datadog_checks/dev/docker.py @contextmanager def docker_run ( compose_file = None , build = False , service_name = None , up = None , down = None , on_error = None , sleep = None , endpoints = None , log_patterns = None , mount_logs = False , conditions = None , env_vars = None , wrappers = None , attempts = None , attempts_wait = 1 , ): \"\"\" A convenient context manager for safely setting up and tearing down Docker environments. - **compose_file** (_str_) - A path to a Docker compose file. A custom tear down is not required when using this. - **build** (_bool_) - Whether or not to build images for when `compose_file` is provided - **service_name** (_str_) - Optional name for when ``compose_file`` is provided - **up** (_callable_) - A custom setup callable - **down** (_callable_) - A custom tear down callable. This is required when using a custom setup. - **on_error** (_callable_) - A callable called in case of an unhandled exception - **sleep** (_float_) - Number of seconds to wait before yielding. This occurs after all conditions are successful. - **endpoints** (_List[str]_) - Endpoints to verify access for before yielding. Shorthand for adding `CheckEndpoints(endpoints)` to the `conditions` argument. - **log_patterns** (_List[str|re.Pattern]_) - Regular expression patterns to find in Docker logs before yielding. This is only available when `compose_file` is provided. Shorthand for adding `CheckDockerLogs(compose_file, log_patterns)` to the `conditions` argument. - **mount_logs** (_bool_) - Whether or not to mount log files in Agent containers based on example logs configuration - **conditions** (_callable_) - A list of callable objects that will be executed before yielding to check for errors - **env_vars** (_dict_) - A dictionary to update `os.environ` with during execution - **wrappers** (_List[callable]_) - A list of context managers to use during execution - **attempts** (_int_) - Number of attempts to run `up` successfully - **attempts_wait** (_int_) - Time to wait between attempts \"\"\" if compose_file and up : raise TypeError ( 'You must select either a compose file or a custom setup callable, not both.' ) if compose_file is not None : if not isinstance ( compose_file , string_types ): raise TypeError ( 'The path to the compose file is not a string: {} ' . format ( repr ( compose_file ))) set_up = ComposeFileUp ( compose_file , build = build , service_name = service_name ) if down is not None : tear_down = down else : tear_down = ComposeFileDown ( compose_file ) if on_error is None : on_error = ComposeFileLogs ( compose_file ) else : set_up = up tear_down = down if attempts is not None : saved_set_up = set_up @retry ( wait = wait_fixed ( attempts_wait ), stop = stop_after_attempt ( attempts )) def set_up_with_retry (): return saved_set_up () set_up = set_up_with_retry docker_conditions = [] if log_patterns is not None : if compose_file is None : raise ValueError ( 'The `log_patterns` convenience is unavailable when using ' 'a custom setup. Please use a custom condition instead.' ) docker_conditions . append ( CheckDockerLogs ( compose_file , log_patterns )) if conditions is not None : docker_conditions . extend ( conditions ) wrappers = list ( wrappers ) if wrappers is not None else [] if mount_logs : if isinstance ( mount_logs , dict ): wrappers . append ( shared_logs ( mount_logs [ 'logs' ])) # Easy mode, read example config else : # An extra level deep because of the context manager check_root = find_check_root ( depth = 2 ) example_log_configs = _read_example_logs_config ( check_root ) if mount_logs is True : wrappers . append ( shared_logs ( example_log_configs )) elif isinstance ( mount_logs , ( list , set )): wrappers . append ( shared_logs ( example_log_configs , mount_whitelist = mount_logs )) else : raise TypeError ( 'mount_logs: expected True, a list or a set, but got {} ' . format ( type ( mount_logs ) . __name__ ) ) with environment_run ( up = set_up , down = tear_down , on_error = on_error , sleep = sleep , endpoints = endpoints , conditions = docker_conditions , env_vars = env_vars , wrappers = wrappers , ) as result : yield result get_container_ip ( container_id_or_name ) \u00b6 Get a Docker container's IP address from its ID or name. Source code in datadog_checks/dev/docker.py def get_container_ip ( container_id_or_name ): \"\"\" Get a Docker container's IP address from its ID or name. \"\"\" command = [ 'docker' , 'inspect' , '-f' , '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' , container_id_or_name , ] return run_command ( command , capture = 'out' , check = True ) . stdout . strip () get_docker_hostname () \u00b6 Determine the hostname Docker uses based on the environment, defaulting to localhost . Source code in datadog_checks/dev/docker.py def get_docker_hostname (): \"\"\" Determine the hostname Docker uses based on the environment, defaulting to `localhost`. \"\"\" return urlparse ( os . getenv ( 'DOCKER_HOST' , '' )) . hostname or 'localhost' datadog_checks.dev.terraform \u00b6 terraform_run ( directory , sleep = None , endpoints = None , conditions = None , env_vars = None , wrappers = None ) \u00b6 A convenient context manager for safely setting up and tearing down Terraform environments. directory ( str ) - A path containing Terraform files sleep ( float ) - Number of seconds to wait before yielding. This occurs after all conditions are successful. endpoints ( List[str] ) - Endpoints to verify access for before yielding. Shorthand for adding CheckEndpoints(endpoints) to the conditions argument. conditions ( callable ) - A list of callable objects that will be executed before yielding to check for errors env_vars ( dict ) - A dictionary to update os.environ with during execution wrappers ( List[callable] ) - A list of context managers to use during execution Source code in datadog_checks/dev/terraform.py @contextmanager def terraform_run ( directory , sleep = None , endpoints = None , conditions = None , env_vars = None , wrappers = None ): \"\"\" A convenient context manager for safely setting up and tearing down Terraform environments. - **directory** (_str_) - A path containing Terraform files - **sleep** (_float_) - Number of seconds to wait before yielding. This occurs after all conditions are successful. - **endpoints** (_List[str]_) - Endpoints to verify access for before yielding. Shorthand for adding `CheckEndpoints(endpoints)` to the `conditions` argument. - **conditions** (_callable_) - A list of callable objects that will be executed before yielding to check for errors - **env_vars** (_dict_) - A dictionary to update `os.environ` with during execution - **wrappers** (_List[callable]_) - A list of context managers to use during execution \"\"\" if not which ( 'terraform' ): pytest . skip ( 'Terraform not available' ) set_up = TerraformUp ( directory ) tear_down = TerraformDown ( directory ) with environment_run ( up = set_up , down = tear_down , sleep = sleep , endpoints = endpoints , conditions = conditions , env_vars = env_vars , wrappers = wrappers , ) as result : yield result","title":"Test framework"},{"location":"ddev/test/#test-framework","text":"","title":"Test framework"},{"location":"ddev/test/#environments","text":"Most integrations monitor services like databases or web servers, rather than system properties like CPU usage. For such cases, you'll want to spin up an environment and gracefully tear it down when tests finish. We define all environment actions in a fixture called dd_environment that looks semantically like this: @pytest . fixture ( scope = 'session' ) def dd_environment (): try : set_up_env () yield some_default_config finally : tear_down_env () This is not only used for regular tests, but is also the basis of our E2E testing . The start command executes everything before the yield and the stop command executes everything after it. We provide a few utilities for common environment types.","title":"Environments"},{"location":"ddev/test/#docker","text":"The docker_run utility makes it easy to create services using docker-compose . from datadog_checks.dev import docker_run @pytest . fixture ( scope = 'session' ) def dd_environment (): with docker_run ( os . path . join ( HERE , 'docker' , 'compose.yaml' )): yield ... Read the reference for more information.","title":"Docker"},{"location":"ddev/test/#terraform","text":"The terraform_run utility makes it easy to create services from a directory of Terraform files. from datadog_checks.dev.terraform import terraform_run @pytest . fixture ( scope = 'session' ) def dd_environment (): with terraform_run ( os . path . join ( HERE , 'terraform' )): yield ... Currently, we only use this for services that would be too complex to setup with Docker (like OpenStack) or things that cannot be provided by Docker (like vSphere). We provide some ready-to-use cloud templates that are available for referencing by default. We prefer using GCP when possible. Terraform E2E tests are not run in our public CI as that would needlessly slow down builds. Read the reference for more information.","title":"Terraform"},{"location":"ddev/test/#mocker","text":"The mocker fixture is provided by the pytest-mock plugin. This fixture automatically restores anything that was mocked at the end of each test and is more ergonomic to use than stacking decorators or nesting context managers. Here's an example from their docs: def test_foo ( mocker ): # all valid calls mocker . patch ( 'os.remove' ) mocker . patch . object ( os , 'listdir' , autospec = True ) mocked_isfile = mocker . patch ( 'os.path.isfile' ) It also has many other nice features, like using pytest introspection when comparing calls.","title":"Mocker"},{"location":"ddev/test/#benchmarks","text":"The benchmark fixture is provided by the pytest-benchmark plugin. It enables the profiling of functions with the low-overhead cProfile module. It is quite useful for seeing the approximate time a given check takes to run, as well as gaining insight into any potential performance bottlenecks. You would use it like this: def test_large_payload ( benchmark , dd_run_check ): check = AwesomeCheck ( 'awesome' , {}, [ instance ]) # Run once to get any initialization out of the way. dd_run_check ( check ) benchmark ( dd_run_check , check ) To add benchmarks, define environments in tox.ini with bench somewhere in their names: [tox] ... envlist = ... bench ... [testenv:bench] By default, the test command skips all benchmark environments. To run only benchmark environments use the --bench / -b flag. The results are sorted by tottime , which is the total time spent in the given function (and excluding time made in calls to sub-functions).","title":"Benchmarks"},{"location":"ddev/test/#logs","text":"We provide an easy way to utilize log collection with E2E Docker environments . Pass mount_logs=True to docker_run . This will use the logs example in the integration's config spec . For example, the following defines 2 example log files: - template : logs example : - type : file path : /var/log/apache2/access.log source : apache service : apache - type : file path : /var/log/apache2/error.log source : apache service : apache Alternatives If mount_logs is a sequence of int , only the selected indices (starting at 1) will be used. So, using the Apache example above, to only monitor the error log you would set it to [2] . In lieu of a config spec, for whatever reason, you may set mount_logs to a dict containing the standard logs key. All requested log files are available to reference as environment variables for any Docker calls as DD_LOG_ where the indices start at 1. volumes : - ${DD_LOG_1}:/usr/local/apache2/logs/access_log - ${DD_LOG_2}:/usr/local/apache2/logs/error_log When starting the environment, pass -e DD_LOGS_ENABLED=true to activate the Logs Agent. To send logs to a custom url, pass -e DD_LOGS_CONFIG_LOGS_DD_URL=[CUSTOM_URL]:[CUSTOM_PORT] when starting the environment","title":"Logs"},{"location":"ddev/test/#reference","text":"","title":"Reference"},{"location":"ddev/test/#datadog_checks.dev.docker","text":"","title":"docker"},{"location":"ddev/test/#datadog_checks.dev.docker.compose_file_active","text":"Returns a bool indicating whether or not a compose file has any active services. Source code in datadog_checks/dev/docker.py def compose_file_active ( compose_file ): \"\"\" Returns a `bool` indicating whether or not a compose file has any active services. \"\"\" command = [ 'docker-compose' , '-f' , compose_file , 'ps' ] lines = run_command ( command , capture = 'out' , check = True ) . stdout . splitlines () for i , line in enumerate ( lines , 1 ): if set ( line . strip ()) == { '-' }: return len ( lines [ i :]) >= 1 return False","title":"compose_file_active()"},{"location":"ddev/test/#datadog_checks.dev.docker.docker_run","text":"A convenient context manager for safely setting up and tearing down Docker environments. compose_file ( str ) - A path to a Docker compose file. A custom tear down is not required when using this. build ( bool ) - Whether or not to build images for when compose_file is provided service_name ( str ) - Optional name for when compose_file is provided up ( callable ) - A custom setup callable down ( callable ) - A custom tear down callable. This is required when using a custom setup. on_error ( callable ) - A callable called in case of an unhandled exception sleep ( float ) - Number of seconds to wait before yielding. This occurs after all conditions are successful. endpoints ( List[str] ) - Endpoints to verify access for before yielding. Shorthand for adding CheckEndpoints(endpoints) to the conditions argument. log_patterns ( List[str|re.Pattern] ) - Regular expression patterns to find in Docker logs before yielding. This is only available when compose_file is provided. Shorthand for adding CheckDockerLogs(compose_file, log_patterns) to the conditions argument. mount_logs ( bool ) - Whether or not to mount log files in Agent containers based on example logs configuration conditions ( callable ) - A list of callable objects that will be executed before yielding to check for errors env_vars ( dict ) - A dictionary to update os.environ with during execution wrappers ( List[callable] ) - A list of context managers to use during execution attempts ( int ) - Number of attempts to run up successfully attempts_wait ( int ) - Time to wait between attempts Source code in datadog_checks/dev/docker.py @contextmanager def docker_run ( compose_file = None , build = False , service_name = None , up = None , down = None , on_error = None , sleep = None , endpoints = None , log_patterns = None , mount_logs = False , conditions = None , env_vars = None , wrappers = None , attempts = None , attempts_wait = 1 , ): \"\"\" A convenient context manager for safely setting up and tearing down Docker environments. - **compose_file** (_str_) - A path to a Docker compose file. A custom tear down is not required when using this. - **build** (_bool_) - Whether or not to build images for when `compose_file` is provided - **service_name** (_str_) - Optional name for when ``compose_file`` is provided - **up** (_callable_) - A custom setup callable - **down** (_callable_) - A custom tear down callable. This is required when using a custom setup. - **on_error** (_callable_) - A callable called in case of an unhandled exception - **sleep** (_float_) - Number of seconds to wait before yielding. This occurs after all conditions are successful. - **endpoints** (_List[str]_) - Endpoints to verify access for before yielding. Shorthand for adding `CheckEndpoints(endpoints)` to the `conditions` argument. - **log_patterns** (_List[str|re.Pattern]_) - Regular expression patterns to find in Docker logs before yielding. This is only available when `compose_file` is provided. Shorthand for adding `CheckDockerLogs(compose_file, log_patterns)` to the `conditions` argument. - **mount_logs** (_bool_) - Whether or not to mount log files in Agent containers based on example logs configuration - **conditions** (_callable_) - A list of callable objects that will be executed before yielding to check for errors - **env_vars** (_dict_) - A dictionary to update `os.environ` with during execution - **wrappers** (_List[callable]_) - A list of context managers to use during execution - **attempts** (_int_) - Number of attempts to run `up` successfully - **attempts_wait** (_int_) - Time to wait between attempts \"\"\" if compose_file and up : raise TypeError ( 'You must select either a compose file or a custom setup callable, not both.' ) if compose_file is not None : if not isinstance ( compose_file , string_types ): raise TypeError ( 'The path to the compose file is not a string: {} ' . format ( repr ( compose_file ))) set_up = ComposeFileUp ( compose_file , build = build , service_name = service_name ) if down is not None : tear_down = down else : tear_down = ComposeFileDown ( compose_file ) if on_error is None : on_error = ComposeFileLogs ( compose_file ) else : set_up = up tear_down = down if attempts is not None : saved_set_up = set_up @retry ( wait = wait_fixed ( attempts_wait ), stop = stop_after_attempt ( attempts )) def set_up_with_retry (): return saved_set_up () set_up = set_up_with_retry docker_conditions = [] if log_patterns is not None : if compose_file is None : raise ValueError ( 'The `log_patterns` convenience is unavailable when using ' 'a custom setup. Please use a custom condition instead.' ) docker_conditions . append ( CheckDockerLogs ( compose_file , log_patterns )) if conditions is not None : docker_conditions . extend ( conditions ) wrappers = list ( wrappers ) if wrappers is not None else [] if mount_logs : if isinstance ( mount_logs , dict ): wrappers . append ( shared_logs ( mount_logs [ 'logs' ])) # Easy mode, read example config else : # An extra level deep because of the context manager check_root = find_check_root ( depth = 2 ) example_log_configs = _read_example_logs_config ( check_root ) if mount_logs is True : wrappers . append ( shared_logs ( example_log_configs )) elif isinstance ( mount_logs , ( list , set )): wrappers . append ( shared_logs ( example_log_configs , mount_whitelist = mount_logs )) else : raise TypeError ( 'mount_logs: expected True, a list or a set, but got {} ' . format ( type ( mount_logs ) . __name__ ) ) with environment_run ( up = set_up , down = tear_down , on_error = on_error , sleep = sleep , endpoints = endpoints , conditions = docker_conditions , env_vars = env_vars , wrappers = wrappers , ) as result : yield result","title":"docker_run()"},{"location":"ddev/test/#datadog_checks.dev.docker.get_container_ip","text":"Get a Docker container's IP address from its ID or name. Source code in datadog_checks/dev/docker.py def get_container_ip ( container_id_or_name ): \"\"\" Get a Docker container's IP address from its ID or name. \"\"\" command = [ 'docker' , 'inspect' , '-f' , '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' , container_id_or_name , ] return run_command ( command , capture = 'out' , check = True ) . stdout . strip ()","title":"get_container_ip()"},{"location":"ddev/test/#datadog_checks.dev.docker.get_docker_hostname","text":"Determine the hostname Docker uses based on the environment, defaulting to localhost . Source code in datadog_checks/dev/docker.py def get_docker_hostname (): \"\"\" Determine the hostname Docker uses based on the environment, defaulting to `localhost`. \"\"\" return urlparse ( os . getenv ( 'DOCKER_HOST' , '' )) . hostname or 'localhost'","title":"get_docker_hostname()"},{"location":"ddev/test/#datadog_checks.dev.terraform","text":"","title":"terraform"},{"location":"ddev/test/#datadog_checks.dev.terraform.terraform_run","text":"A convenient context manager for safely setting up and tearing down Terraform environments. directory ( str ) - A path containing Terraform files sleep ( float ) - Number of seconds to wait before yielding. This occurs after all conditions are successful. endpoints ( List[str] ) - Endpoints to verify access for before yielding. Shorthand for adding CheckEndpoints(endpoints) to the conditions argument. conditions ( callable ) - A list of callable objects that will be executed before yielding to check for errors env_vars ( dict ) - A dictionary to update os.environ with during execution wrappers ( List[callable] ) - A list of context managers to use during execution Source code in datadog_checks/dev/terraform.py @contextmanager def terraform_run ( directory , sleep = None , endpoints = None , conditions = None , env_vars = None , wrappers = None ): \"\"\" A convenient context manager for safely setting up and tearing down Terraform environments. - **directory** (_str_) - A path containing Terraform files - **sleep** (_float_) - Number of seconds to wait before yielding. This occurs after all conditions are successful. - **endpoints** (_List[str]_) - Endpoints to verify access for before yielding. Shorthand for adding `CheckEndpoints(endpoints)` to the `conditions` argument. - **conditions** (_callable_) - A list of callable objects that will be executed before yielding to check for errors - **env_vars** (_dict_) - A dictionary to update `os.environ` with during execution - **wrappers** (_List[callable]_) - A list of context managers to use during execution \"\"\" if not which ( 'terraform' ): pytest . skip ( 'Terraform not available' ) set_up = TerraformUp ( directory ) tear_down = TerraformDown ( directory ) with environment_run ( up = set_up , down = tear_down , sleep = sleep , endpoints = endpoints , conditions = conditions , env_vars = env_vars , wrappers = wrappers , ) as result : yield result","title":"terraform_run()"},{"location":"faq/acknowledgements/","text":"Acknowledgements \u00b6 This is not meant to be an exhaustive list of all the things we use, but rather a token of appreciation for the services and open source software we publicly benefit from. Base \u00b6 The Python programming language , the default language of Agent Integrations, enables us and contributors to think about problems abstractly and express intent as clearly and concisely as possible. Dependencies \u00b6 We would be unable to move as fast as we do without the massive ecosystem of established software others have built. If you've contributed to one of the following projects, thank you! Your code is deployed on many systems and devices across the world. We stand on the shoulders of giants. Dependencies Core adodbapi aerospike aws-requests-auth beautifulsoup4 binary boto boto3 botocore cachetools clickhouse-cityhash clickhouse-driver contextlib2 cryptography cx-oracle ddtrace dnspython enum34 flup flup-py3 futures gearman immutables in-toto ipaddress jaydebeapi jpype1 kafka-python kazoo kubernetes ldap3 lxml lz4 mmh3 openstacksdk orjson paramiko ply prometheus-client protobuf psutil psycopg2-binary pyasn1 pycryptodomex pydantic pyhdb pyjwt pymongo pymqi pymysql pyodbc pyro4 pysmi pysnmp pysnmp-mibs pysocks python-binary-memcached python-dateutil python3-gearman pyvmomi pywin32 pyyaml redis requests requests-kerberos requests-unixsocket requests_ntlm requests_toolbelt rethinkdb scandir securesystemslib selectors34 semver serpent service_identity simplejson six snowflake-connector-python supervisor tuf typing uptime vertica-python win-inet-pton Other Rick Hosting \u00b6 A huge thanks to everyone involved in maintaining PyPI . We rely on it for providing all dependencies for not only tests, but also all Datadog Agent deployments. Documentation \u00b6 MkDocs provides us with powerful and extensible static site generation capabilities, leading to an equally impressive community around it. The Material for MkDocs theme allows us to create beautiful documentation with cross-browser and mobile support. PyMdown Extensions gives us the ability to use advanced HTML, CSS, and JavaScript functionality with simple, easy to use Markdown. CI/CD \u00b6 Azure Pipelines is used for testing all Agent Integrations. A special shout-out to Microsoft for being extremely generous with our allowance of parallel runners; only they were able to meet the requirements of our unique monorepo. GitHub Actions is used for all repository automation, like documentation deployment and pull request labeling.","title":"Acknowledgements"},{"location":"faq/acknowledgements/#acknowledgements","text":"This is not meant to be an exhaustive list of all the things we use, but rather a token of appreciation for the services and open source software we publicly benefit from.","title":"Acknowledgements"},{"location":"faq/acknowledgements/#base","text":"The Python programming language , the default language of Agent Integrations, enables us and contributors to think about problems abstractly and express intent as clearly and concisely as possible.","title":"Base"},{"location":"faq/acknowledgements/#dependencies","text":"We would be unable to move as fast as we do without the massive ecosystem of established software others have built. If you've contributed to one of the following projects, thank you! Your code is deployed on many systems and devices across the world. We stand on the shoulders of giants. Dependencies Core adodbapi aerospike aws-requests-auth beautifulsoup4 binary boto boto3 botocore cachetools clickhouse-cityhash clickhouse-driver contextlib2 cryptography cx-oracle ddtrace dnspython enum34 flup flup-py3 futures gearman immutables in-toto ipaddress jaydebeapi jpype1 kafka-python kazoo kubernetes ldap3 lxml lz4 mmh3 openstacksdk orjson paramiko ply prometheus-client protobuf psutil psycopg2-binary pyasn1 pycryptodomex pydantic pyhdb pyjwt pymongo pymqi pymysql pyodbc pyro4 pysmi pysnmp pysnmp-mibs pysocks python-binary-memcached python-dateutil python3-gearman pyvmomi pywin32 pyyaml redis requests requests-kerberos requests-unixsocket requests_ntlm requests_toolbelt rethinkdb scandir securesystemslib selectors34 semver serpent service_identity simplejson six snowflake-connector-python supervisor tuf typing uptime vertica-python win-inet-pton Other Rick","title":"Dependencies"},{"location":"faq/acknowledgements/#hosting","text":"A huge thanks to everyone involved in maintaining PyPI . We rely on it for providing all dependencies for not only tests, but also all Datadog Agent deployments.","title":"Hosting"},{"location":"faq/acknowledgements/#documentation","text":"MkDocs provides us with powerful and extensible static site generation capabilities, leading to an equally impressive community around it. The Material for MkDocs theme allows us to create beautiful documentation with cross-browser and mobile support. PyMdown Extensions gives us the ability to use advanced HTML, CSS, and JavaScript functionality with simple, easy to use Markdown.","title":"Documentation"},{"location":"faq/acknowledgements/#cicd","text":"Azure Pipelines is used for testing all Agent Integrations. A special shout-out to Microsoft for being extremely generous with our allowance of parallel runners; only they were able to meet the requirements of our unique monorepo. GitHub Actions is used for all repository automation, like documentation deployment and pull request labeling.","title":"CI/CD"},{"location":"faq/faq/","text":"FAQ \u00b6 Integration vs Check \u00b6 A Check is any integration whose execution is triggered directly in code by the Datadog Agent . Therefore, all Agent-based integrations written in Python or Go are considered Checks. Why test tests \u00b6 We track the coverage of tests in all cases as a drop in test coverage for test code means a test function or part of it is not called. For an example see this test bug fixed thanks to test coverage. See pyca/pynacl!290 and !4280 for more details.","title":"FAQ"},{"location":"faq/faq/#faq","text":"","title":"FAQ"},{"location":"faq/faq/#integration-vs-check","text":"A Check is any integration whose execution is triggered directly in code by the Datadog Agent . Therefore, all Agent-based integrations written in Python or Go are considered Checks.","title":"Integration vs Check"},{"location":"faq/faq/#why-test-tests","text":"We track the coverage of tests in all cases as a drop in test coverage for test code means a test function or part of it is not called. For an example see this test bug fixed thanks to test coverage. See pyca/pynacl!290 and !4280 for more details.","title":"Why test tests"},{"location":"guidelines/conventions/","text":"Conventions \u00b6 File naming \u00b6 Often, libraries that interact with a product will name their packages after the product. So if you name a file .py , and inside try to import the library of the same name, you will get import errors that will be difficult to diagnose. Never name a Python file the same as the integration's name. Attribute naming \u00b6 The base classes may freely add new attributes for new features. Therefore to avoid collisions it is recommended that attribute names be prefixed with underscores, especially for names that are generic. For an example, see below . Stateful checks \u00b6 Since Agent v6, every instance of AgentCheck corresponds to a single YAML instance of an integration defined in the instances array of user configuration. As such, the instance argument the check method accepts is redundant and wasteful since you are parsing the same configuration at every run. Parse configuration once and save the results. Do this class AwesomeCheck ( AgentCheck ): def __init__ ( self , name , init_config , instances ): super ( AwesomeCheck , self ) . __init__ ( name , init_config , instances ) self . _server = self . instance . get ( 'server' , '' ) self . _port = int ( self . instance . get ( 'port' , 8080 )) self . _tags = list ( self . instance . get ( 'tags' , [])) self . _tags . append ( 'server: {} ' . format ( self . _server )) self . _tags . append ( 'port: {} ' . format ( self . _port )) def check ( self , _ ): ... Do NOT do this class AwesomeCheck ( AgentCheck ): def check ( self , instance ): server = instance . get ( 'server' , '' ) port = int ( instance . get ( 'port' , 8080 )) tags = list ( instance . get ( 'tags' , [])) tags . append ( 'server: {} ' . format ( server )) tags . append ( 'port: {} ' . format ( port )) ...","title":"Conventions"},{"location":"guidelines/conventions/#conventions","text":"","title":"Conventions"},{"location":"guidelines/conventions/#file-naming","text":"Often, libraries that interact with a product will name their packages after the product. So if you name a file .py , and inside try to import the library of the same name, you will get import errors that will be difficult to diagnose. Never name a Python file the same as the integration's name.","title":"File naming"},{"location":"guidelines/conventions/#attribute-naming","text":"The base classes may freely add new attributes for new features. Therefore to avoid collisions it is recommended that attribute names be prefixed with underscores, especially for names that are generic. For an example, see below .","title":"Attribute naming"},{"location":"guidelines/conventions/#stateful-checks","text":"Since Agent v6, every instance of AgentCheck corresponds to a single YAML instance of an integration defined in the instances array of user configuration. As such, the instance argument the check method accepts is redundant and wasteful since you are parsing the same configuration at every run. Parse configuration once and save the results. Do this class AwesomeCheck ( AgentCheck ): def __init__ ( self , name , init_config , instances ): super ( AwesomeCheck , self ) . __init__ ( name , init_config , instances ) self . _server = self . instance . get ( 'server' , '' ) self . _port = int ( self . instance . get ( 'port' , 8080 )) self . _tags = list ( self . instance . get ( 'tags' , [])) self . _tags . append ( 'server: {} ' . format ( self . _server )) self . _tags . append ( 'port: {} ' . format ( self . _port )) def check ( self , _ ): ... Do NOT do this class AwesomeCheck ( AgentCheck ): def check ( self , instance ): server = instance . get ( 'server' , '' ) port = int ( instance . get ( 'port' , 8080 )) tags = list ( instance . get ( 'tags' , [])) tags . append ( 'server: {} ' . format ( server )) tags . append ( 'port: {} ' . format ( port )) ...","title":"Stateful checks"},{"location":"guidelines/dashboards/","text":"Dashboards \u00b6 Datadog dashboards enable you to efficiently monitor your infrastructure and integrations by displaying and tracking key metrics. Integration Preset Dashboards \u00b6 If you would like to create a default dashboard for an integration, follow the guidelines in the Best Practices section. Exporting a dashboard payload \u00b6 When you've created a dashboard in the Datadog UI, you can export the dashboard payload to be included in its integration's assets directory. Ensure that you have set an api_key and app_key for the org that contains the new dashboard in the ddev configuration . Run the following command to export the dashboard : ddev meta dash export Tip If the dashboard is for a contributor-maintained integration in the integration-extras repo, run the command with the --extras or -e flag. The command will add the dashboard definition to the manifest.json file of the integration. The dashboard JSON payload will be available in /assets/dashboards/.json . Commit the changes and create a pull request. Verify the Preset Dashboard \u00b6 Once your PR is merged and synced on production, you can find your dashboard in the Dashboard List page. Tip Make sure the integration tile is Installed in order to see the preset dashboard in the list. Ensure logos render correctly on the Dashboard List page and within the preset dashboard. Best Practices \u00b6 General \u00b6 When creating a new dashboard, select the default dashboard type (internally called multisize layout). Dashboard titles should contain the integration name. Some examples of a good dashboard title are Syclla and Cilium Overview . Warning Avoid using - (hyphen) in the dashboard title as the dashboard URL is generated from the title. Research the metrics supported by the integration and consider grouping them in relevant categories. Important metrics that are key to the performance and overview of the integration should be at the top. Always include an \"about\" group for the integration containing a brief description and helpful links. Also include an \"overview\" group containing a few of the most important metrics, and place it at the top of the dashboard. Edit the \"about\" group and select the \"banner\" display option, then link to a banner image like this: /static/images/integration_dashboard/your-image.png . We store integration banner images in github, add a new one by creating a PR in the web-ui repo . The \"about\" section should contain content, not data; the \"overview\" section should contain data. Avoid making the \"about\" section full-width. Use Group widgets to title and group sections, rather than note widgets as you might on a screenboard. Use partial width groups to display groups side-by-side. Most dashboards should display every widget within a group. Timeseries widgets should be at least 4 columns wide in order not to appear squashed on smaller displays Stream widgets should be at least 6 columns wide (half the dashboard width) for readability. Avoid placing full-width stream widgets in the middle of a dashboard as they'll \"trap\" scroll events. It's also useful to put stream widgets in a group by themselves so they can be collapsed. Add an event stream only if the service monitored by the dashboard is reporting events. Use sources:service_name . Which widgets best represent your data? Try using a mix of widget types and sizes. Explore visualizations and formatting options until you're confident your dashboard is as clear as it can be. Sometimes a whole dashboard of timeseries is ok, but other times variety can improve things. The most commonly used metric widgets are timeseries , query values , and tables . For more information on the available widget types, see the list of supported dashboard widgets . Add a logo to the dashboard header. The integration logo will automatically appear in the header if the icon exists here and the integration_id matches the icon name. That means it will only appear when the dashboard you're working on is made into the official integration board. Try to make the left and right halves of your dashboard symmetrical in high density mode. Users with large monitors will see your dashboard in high density mode by default, so it's important to make sure the group relationships make sense, and the dashboard looks good. You can adjust group heights to achieve this, and move groups between the left and right halves. a. (perfectly symmetrical) b. (close enough) Template variables allow you to dynamically filter one or more widgets in a dashboard. Template variables must be universal and accessible by any user or account using the monitored service. Make sure all relevant graphs are listening to the relevant template variable filters. Tip Adding *=scope as a template variable is useful since users can access all their own tags. Copy \u00b6 Prefer concise graph titles that start with the most important information. Avoid common phrases such as \"number of\", and don't include the integration title e.g. \"Memcached Load\". Concise title (good) Verbose title (bad) Events per node Number of Kubernetes events per node Pending tasks: [$node_name] Total number of pending tasks in [$node_name] Read/write operations Number of read/write operations Connections to server - rate Rate of connections to server Load Memcached Load Avoid repeating the group title or integration name in every widget in a group, especially if the widgets are query values with a custom unit of the same name. Note the word \"shards\" in each widget title in the group named \"shards\". Always alias formulas Group titles should be title case. Widget titles should be sentence case. If you're showing a legend, make sure the aliases are easy to understand. Graph titles should summarize the queried metric. Do not indicate the unit in the graph title because unit types are displayed automatically from metadata. An exception to this is if the calculation of the query represents a different type of unit. QA \u00b6 Always check a dashboard at 1280px wide and 2560px wide to see how it looks on a smaller laptop and a larger monitor. The most common screen widths for dashboards are 1920, 1680, 1440, 2560, and 1280px, making up more than half of all dashboard page views combined. Tip Use TV Mode to ensure that the whole dashboard fits your screen. Visual Style \u00b6 Format notes to make them fit their use case. Try the presets \"caption\", \"annotation\", or \"header\", or pick your own combination of styles. Avoid using the smallest font size for notes that are long or include complex formatting, like bulleted lists or code blocks. Use colors to highlight important relationships and to improve readability, not for style. If several groups are related, apply the same group header color to all of them. If you've applied a green header color to a group, try making its notes green as well. If two groups are related, but one is more important, try using the \"vivid\" color on the important group and the \"light\" color on the less important group. Don't be afraid to leave groups with white headers, and be careful not to overuse color e.g. don't make every group on a dashboard vivid blue. Also avoid using gray headers. Use legends when they make sense. Legends make it easy to read a graph without having to hover over each series or maximize the widget. Make sure you use aliases so the legend is easy to read. Automatic mode for legends is a great option that hides legends when space is tight and shows them when there's room. If you want users to compare two graphs side-by-side, make sure their x-axes align. If one graph is showing a legend and the other isn't, the x-axes won't align - make sure they either both show a legend or both do not. For timeseries, base the display type on the type of metric Types of metric Display type Volume (e.g. Number of connections) area Counts (e.g. Number of errors) bars Multiple groups or default lines Examples \u00b6 Elasticsearch \u00b6 Attention-grabbing \"about\" section with a banner image, concise copy, useful links, and a good typography hierarchy A brief, annotated \"overview\" section with the most important statistics, right at the top Simple graph titles. Group titles in title-case Close to symmetrical in high density mode Well formatted, concise notes Color coordination between related groups, notes within groups, and graphs within groups","title":"Dashboards"},{"location":"guidelines/dashboards/#dashboards","text":"Datadog dashboards enable you to efficiently monitor your infrastructure and integrations by displaying and tracking key metrics.","title":"Dashboards"},{"location":"guidelines/dashboards/#integration-preset-dashboards","text":"If you would like to create a default dashboard for an integration, follow the guidelines in the Best Practices section.","title":"Integration Preset Dashboards"},{"location":"guidelines/dashboards/#exporting-a-dashboard-payload","text":"When you've created a dashboard in the Datadog UI, you can export the dashboard payload to be included in its integration's assets directory. Ensure that you have set an api_key and app_key for the org that contains the new dashboard in the ddev configuration . Run the following command to export the dashboard : ddev meta dash export Tip If the dashboard is for a contributor-maintained integration in the integration-extras repo, run the command with the --extras or -e flag. The command will add the dashboard definition to the manifest.json file of the integration. The dashboard JSON payload will be available in /assets/dashboards/.json . Commit the changes and create a pull request.","title":"Exporting a dashboard payload"},{"location":"guidelines/dashboards/#verify-the-preset-dashboard","text":"Once your PR is merged and synced on production, you can find your dashboard in the Dashboard List page. Tip Make sure the integration tile is Installed in order to see the preset dashboard in the list. Ensure logos render correctly on the Dashboard List page and within the preset dashboard.","title":"Verify the Preset Dashboard"},{"location":"guidelines/dashboards/#best-practices","text":"","title":"Best Practices"},{"location":"guidelines/dashboards/#general","text":"When creating a new dashboard, select the default dashboard type (internally called multisize layout). Dashboard titles should contain the integration name. Some examples of a good dashboard title are Syclla and Cilium Overview . Warning Avoid using - (hyphen) in the dashboard title as the dashboard URL is generated from the title. Research the metrics supported by the integration and consider grouping them in relevant categories. Important metrics that are key to the performance and overview of the integration should be at the top. Always include an \"about\" group for the integration containing a brief description and helpful links. Also include an \"overview\" group containing a few of the most important metrics, and place it at the top of the dashboard. Edit the \"about\" group and select the \"banner\" display option, then link to a banner image like this: /static/images/integration_dashboard/your-image.png . We store integration banner images in github, add a new one by creating a PR in the web-ui repo . The \"about\" section should contain content, not data; the \"overview\" section should contain data. Avoid making the \"about\" section full-width. Use Group widgets to title and group sections, rather than note widgets as you might on a screenboard. Use partial width groups to display groups side-by-side. Most dashboards should display every widget within a group. Timeseries widgets should be at least 4 columns wide in order not to appear squashed on smaller displays Stream widgets should be at least 6 columns wide (half the dashboard width) for readability. Avoid placing full-width stream widgets in the middle of a dashboard as they'll \"trap\" scroll events. It's also useful to put stream widgets in a group by themselves so they can be collapsed. Add an event stream only if the service monitored by the dashboard is reporting events. Use sources:service_name . Which widgets best represent your data? Try using a mix of widget types and sizes. Explore visualizations and formatting options until you're confident your dashboard is as clear as it can be. Sometimes a whole dashboard of timeseries is ok, but other times variety can improve things. The most commonly used metric widgets are timeseries , query values , and tables . For more information on the available widget types, see the list of supported dashboard widgets . Add a logo to the dashboard header. The integration logo will automatically appear in the header if the icon exists here and the integration_id matches the icon name. That means it will only appear when the dashboard you're working on is made into the official integration board. Try to make the left and right halves of your dashboard symmetrical in high density mode. Users with large monitors will see your dashboard in high density mode by default, so it's important to make sure the group relationships make sense, and the dashboard looks good. You can adjust group heights to achieve this, and move groups between the left and right halves. a. (perfectly symmetrical) b. (close enough) Template variables allow you to dynamically filter one or more widgets in a dashboard. Template variables must be universal and accessible by any user or account using the monitored service. Make sure all relevant graphs are listening to the relevant template variable filters. Tip Adding *=scope as a template variable is useful since users can access all their own tags.","title":"General"},{"location":"guidelines/dashboards/#copy","text":"Prefer concise graph titles that start with the most important information. Avoid common phrases such as \"number of\", and don't include the integration title e.g. \"Memcached Load\". Concise title (good) Verbose title (bad) Events per node Number of Kubernetes events per node Pending tasks: [$node_name] Total number of pending tasks in [$node_name] Read/write operations Number of read/write operations Connections to server - rate Rate of connections to server Load Memcached Load Avoid repeating the group title or integration name in every widget in a group, especially if the widgets are query values with a custom unit of the same name. Note the word \"shards\" in each widget title in the group named \"shards\". Always alias formulas Group titles should be title case. Widget titles should be sentence case. If you're showing a legend, make sure the aliases are easy to understand. Graph titles should summarize the queried metric. Do not indicate the unit in the graph title because unit types are displayed automatically from metadata. An exception to this is if the calculation of the query represents a different type of unit.","title":"Copy"},{"location":"guidelines/dashboards/#qa","text":"Always check a dashboard at 1280px wide and 2560px wide to see how it looks on a smaller laptop and a larger monitor. The most common screen widths for dashboards are 1920, 1680, 1440, 2560, and 1280px, making up more than half of all dashboard page views combined. Tip Use TV Mode to ensure that the whole dashboard fits your screen.","title":"QA"},{"location":"guidelines/dashboards/#visual-style","text":"Format notes to make them fit their use case. Try the presets \"caption\", \"annotation\", or \"header\", or pick your own combination of styles. Avoid using the smallest font size for notes that are long or include complex formatting, like bulleted lists or code blocks. Use colors to highlight important relationships and to improve readability, not for style. If several groups are related, apply the same group header color to all of them. If you've applied a green header color to a group, try making its notes green as well. If two groups are related, but one is more important, try using the \"vivid\" color on the important group and the \"light\" color on the less important group. Don't be afraid to leave groups with white headers, and be careful not to overuse color e.g. don't make every group on a dashboard vivid blue. Also avoid using gray headers. Use legends when they make sense. Legends make it easy to read a graph without having to hover over each series or maximize the widget. Make sure you use aliases so the legend is easy to read. Automatic mode for legends is a great option that hides legends when space is tight and shows them when there's room. If you want users to compare two graphs side-by-side, make sure their x-axes align. If one graph is showing a legend and the other isn't, the x-axes won't align - make sure they either both show a legend or both do not. For timeseries, base the display type on the type of metric Types of metric Display type Volume (e.g. Number of connections) area Counts (e.g. Number of errors) bars Multiple groups or default lines","title":"Visual Style"},{"location":"guidelines/dashboards/#examples","text":"","title":"Examples"},{"location":"guidelines/dashboards/#elasticsearch","text":"Attention-grabbing \"about\" section with a banner image, concise copy, useful links, and a good typography hierarchy A brief, annotated \"overview\" section with the most important statistics, right at the top Simple graph titles. Group titles in title-case Close to symmetrical in high density mode Well formatted, concise notes Color coordination between related groups, notes within groups, and graphs within groups","title":"Elasticsearch"},{"location":"guidelines/pr/","text":"Pull requests \u00b6 Title \u00b6 The release command uses the title of pull requests as-is to generate changelog entries. Therefore, be as explicit and concise as possible when describing code changes. For example, do not say Fix typo , but rather something like Fix typo in debug log messages . As each integration has its own release cycle and changelog, and every pull request is automatically labeled appropriately by our CI, there is no need include the integration's name in the title. For the base package and dev package , you may want to prefix the title with the component being modified e.g. [openmetrics] or [cli] . Changelog label \u00b6 Our labeler will automatically detect if changes would not impact shipped code and apply changelog/no-changelog . In all other cases, you must manually apply changelog/ . For changelog types, we adhere to those defined by Keep a Changelog : Added for new features or any non-trivial refactors. Changed for changes in existing functionality. Deprecated for soon-to-be removed features. Removed for now removed features. Fixed for any bug fixes. Security in case of vulnerabilities. Caveat If you are fixing something that is not yet released, apply changelog/no-changelog . Separation of concerns \u00b6 Every pull request should do one thing only, for many reasons: Easy Git management - For example, if you are editing documentation and notice an error in the shipped example configuration, you should fix the error in a separate pull request. Doing so will enable a clean cherry-pick or revert of the bug fix should the need arise. Easier release management - Let's consider how the release command would handle the case of making a code change to multiple integrations. If one of the changes only fixes a typo in a code comment, that integration will still be released as indicated by the label. If both changes should indeed be released but they do different things, only one integration's changelog entry would make sense. Merges \u00b6 We only allow GitHub's squash and merge , for 2 reasons: To keep a clean Git history Our release tooling relies on commits being suffixed with the PR number in order to list changes between versions","title":"Pull requests"},{"location":"guidelines/pr/#pull-requests","text":"","title":"Pull requests"},{"location":"guidelines/pr/#title","text":"The release command uses the title of pull requests as-is to generate changelog entries. Therefore, be as explicit and concise as possible when describing code changes. For example, do not say Fix typo , but rather something like Fix typo in debug log messages . As each integration has its own release cycle and changelog, and every pull request is automatically labeled appropriately by our CI, there is no need include the integration's name in the title. For the base package and dev package , you may want to prefix the title with the component being modified e.g. [openmetrics] or [cli] .","title":"Title"},{"location":"guidelines/pr/#changelog-label","text":"Our labeler will automatically detect if changes would not impact shipped code and apply changelog/no-changelog . In all other cases, you must manually apply changelog/ . For changelog types, we adhere to those defined by Keep a Changelog : Added for new features or any non-trivial refactors. Changed for changes in existing functionality. Deprecated for soon-to-be removed features. Removed for now removed features. Fixed for any bug fixes. Security in case of vulnerabilities. Caveat If you are fixing something that is not yet released, apply changelog/no-changelog .","title":"Changelog label"},{"location":"guidelines/pr/#separation-of-concerns","text":"Every pull request should do one thing only, for many reasons: Easy Git management - For example, if you are editing documentation and notice an error in the shipped example configuration, you should fix the error in a separate pull request. Doing so will enable a clean cherry-pick or revert of the bug fix should the need arise. Easier release management - Let's consider how the release command would handle the case of making a code change to multiple integrations. If one of the changes only fixes a typo in a code comment, that integration will still be released as indicated by the label. If both changes should indeed be released but they do different things, only one integration's changelog entry would make sense.","title":"Separation of concerns"},{"location":"guidelines/pr/#merges","text":"We only allow GitHub's squash and merge , for 2 reasons: To keep a clean Git history Our release tooling relies on commits being suffixed with the PR number in order to list changes between versions","title":"Merges"},{"location":"guidelines/style/","text":"Style \u00b6 These are all the checkers used by our style enforcement . black \u00b6 An opinionated formatter, like JavaScript's prettier and Golang's gofmt . isort \u00b6 A tool to sort imports lexicographically, by section, and by type. We use the 5 standard sections: __future__ , stdlib, third party, first party, and local. datadog_checks is configured as a first party namespace. flake8 \u00b6 An easy-to-use wrapper around pycodestyle and pyflakes . We select everything it provides and only ignore a few things to give precedence to other tools. bugbear \u00b6 A flake8 plugin for finding likely bugs and design problems in programs. We enable: B001 : Do not use bare except: , it also catches unexpected events like memory errors, interrupts, system exit, and so on. Prefer except Exception: . B003 : Assigning to os.environ doesn't clear the environment. Subprocesses are going to see outdated variables, in disagreement with the current process. Use os.environ.clear() or the env= argument to Popen. B006 : Do not use mutable data structures for argument defaults. All calls reuse one instance of that data structure, persisting changes between them. B007 : Loop control variable not used within the loop body. If this is intended, start the name with an underscore. B301 : Python 3 does not include .iter* methods on dictionaries. The default behavior is to return iterables. Simply remove the iter prefix from the method. For Python 2 compatibility, also prefer the Python 3 equivalent if you expect that the size of the dict to be small and bounded. The performance regression on Python 2 will be negligible and the code is going to be the clearest. Alternatively, use six.iter* . B305 : .next() is not a thing on Python 3. Use the next() builtin. For Python 2 compatibility, use six.next() . B306 : BaseException.message has been deprecated as of Python 2.6 and is removed in Python 3. Use str(e) to access the user-readable message. Use e.args to access arguments passed to the exception. B902 : Invalid first argument used for method. Use self for instance methods, and cls for class methods. logging-format \u00b6 A flake8 plugin for ensuring a consistent logging format. We enable: G001 : Logging statements should not use string.format() for their first argument G002 : Logging statements should not use % formatting for their first argument G003 : Logging statements should not use + concatenation for their first argument G004 : Logging statements should not use f\"...\" for their first argument (only in Python 3.6+) G010 : Logging statements should not use warn (use warning instead) G100 : Logging statements should not use extra arguments unless whitelisted G201 : Logging statements should not use error(..., exc_info=True) (use exception(...) instead) G202 : Logging statements should not use redundant exc_info=True in exception Mypy \u00b6 A comment-based type checker allowing a mix of dynamic and static typing. This is optional for now. In order to enable mypy for a specific integration, open its tox.ini file and add the 2 lines in the correct section: [testenv] dd_check_types = true dd_mypy_args = --py2 datadog_checks/ tests/ ... The dd_mypy_args defines the mypy command line option for this specific integration. --py2 is here to make sure the integration is Python2.7 compatible. Here are some useful flags you can add: --check-untyped-defs : Type-checks the interior of functions without type annotations. --disallow-untyped-defs : Disallows defining functions without type annotations or with incomplete type annotations. The datadog_checks/ tests/ arguments represent the list of files that mypy should type check. Feel free to edit them as desired, including removing tests/ (if you'd prefer to not type-check the test suite), or targeting specific files (when doing partial type checking). For a complete example, see the datadog_checks_base tox configuration . Note that there is a default configuration in the mypy.ini file. Example \u00b6 Extracted from rethinkdb : from typing import Any , Iterator # Contains the different types used import rethinkdb from .document_db.types import Metric class RethinkDBCheck ( AgentCheck ): def __init__ ( self , * args , ** kwargs ): # type: (*Any, **Any) -> None super ( RethinkDBCheck , self ) . __init__ ( * args , ** kwargs ) def collect_metrics ( self , conn ): # type: (rethinkdb.net.Connection) -> Iterator[Metric] \"\"\" Collect metrics from the RethinkDB cluster we are connected to. \"\"\" for query in self . queries : for metric in query . run ( logger = self . log , conn = conn , config = self . _config ): yield metric Take a look at vsphere or ibm_mq integrations for more examples.","title":"Style"},{"location":"guidelines/style/#style","text":"These are all the checkers used by our style enforcement .","title":"Style"},{"location":"guidelines/style/#black","text":"An opinionated formatter, like JavaScript's prettier and Golang's gofmt .","title":"black"},{"location":"guidelines/style/#isort","text":"A tool to sort imports lexicographically, by section, and by type. We use the 5 standard sections: __future__ , stdlib, third party, first party, and local. datadog_checks is configured as a first party namespace.","title":"isort"},{"location":"guidelines/style/#flake8","text":"An easy-to-use wrapper around pycodestyle and pyflakes . We select everything it provides and only ignore a few things to give precedence to other tools.","title":"flake8"},{"location":"guidelines/style/#bugbear","text":"A flake8 plugin for finding likely bugs and design problems in programs. We enable: B001 : Do not use bare except: , it also catches unexpected events like memory errors, interrupts, system exit, and so on. Prefer except Exception: . B003 : Assigning to os.environ doesn't clear the environment. Subprocesses are going to see outdated variables, in disagreement with the current process. Use os.environ.clear() or the env= argument to Popen. B006 : Do not use mutable data structures for argument defaults. All calls reuse one instance of that data structure, persisting changes between them. B007 : Loop control variable not used within the loop body. If this is intended, start the name with an underscore. B301 : Python 3 does not include .iter* methods on dictionaries. The default behavior is to return iterables. Simply remove the iter prefix from the method. For Python 2 compatibility, also prefer the Python 3 equivalent if you expect that the size of the dict to be small and bounded. The performance regression on Python 2 will be negligible and the code is going to be the clearest. Alternatively, use six.iter* . B305 : .next() is not a thing on Python 3. Use the next() builtin. For Python 2 compatibility, use six.next() . B306 : BaseException.message has been deprecated as of Python 2.6 and is removed in Python 3. Use str(e) to access the user-readable message. Use e.args to access arguments passed to the exception. B902 : Invalid first argument used for method. Use self for instance methods, and cls for class methods.","title":"bugbear"},{"location":"guidelines/style/#logging-format","text":"A flake8 plugin for ensuring a consistent logging format. We enable: G001 : Logging statements should not use string.format() for their first argument G002 : Logging statements should not use % formatting for their first argument G003 : Logging statements should not use + concatenation for their first argument G004 : Logging statements should not use f\"...\" for their first argument (only in Python 3.6+) G010 : Logging statements should not use warn (use warning instead) G100 : Logging statements should not use extra arguments unless whitelisted G201 : Logging statements should not use error(..., exc_info=True) (use exception(...) instead) G202 : Logging statements should not use redundant exc_info=True in exception","title":"logging-format"},{"location":"guidelines/style/#mypy","text":"A comment-based type checker allowing a mix of dynamic and static typing. This is optional for now. In order to enable mypy for a specific integration, open its tox.ini file and add the 2 lines in the correct section: [testenv] dd_check_types = true dd_mypy_args = --py2 datadog_checks/ tests/ ... The dd_mypy_args defines the mypy command line option for this specific integration. --py2 is here to make sure the integration is Python2.7 compatible. Here are some useful flags you can add: --check-untyped-defs : Type-checks the interior of functions without type annotations. --disallow-untyped-defs : Disallows defining functions without type annotations or with incomplete type annotations. The datadog_checks/ tests/ arguments represent the list of files that mypy should type check. Feel free to edit them as desired, including removing tests/ (if you'd prefer to not type-check the test suite), or targeting specific files (when doing partial type checking). For a complete example, see the datadog_checks_base tox configuration . Note that there is a default configuration in the mypy.ini file.","title":"Mypy"},{"location":"guidelines/style/#example","text":"Extracted from rethinkdb : from typing import Any , Iterator # Contains the different types used import rethinkdb from .document_db.types import Metric class RethinkDBCheck ( AgentCheck ): def __init__ ( self , * args , ** kwargs ): # type: (*Any, **Any) -> None super ( RethinkDBCheck , self ) . __init__ ( * args , ** kwargs ) def collect_metrics ( self , conn ): # type: (rethinkdb.net.Connection) -> Iterator[Metric] \"\"\" Collect metrics from the RethinkDB cluster we are connected to. \"\"\" for query in self . queries : for metric in query . run ( logger = self . log , conn = conn , config = self . _config ): yield metric Take a look at vsphere or ibm_mq integrations for more examples.","title":"Example"},{"location":"meta/cd/","text":"Continuous delivery \u00b6","title":"CD"},{"location":"meta/cd/#continuous-delivery","text":"","title":"Continuous delivery"},{"location":"meta/ci/","text":"Continuous integration \u00b6 Tests \u00b6 All Agent Integrations use Azure Pipelines to execute tests. Execution \u00b6 Every runner will execute test stages in the following order: Unit & integration E2E Benchmarks Platforms \u00b6 We make extensive use of Microsoft-hosted agents . Windows-only integrations run on Windows Server 2019 with Visual Studio 2019 All other integrations run on Ubuntu 18.04 LTS Some things are tested on multiple platforms, like the base package and the Disk check. Pipelines \u00b6 Pull requests \u00b6 Every commit to a branch tied to an open pull request triggers a Linux and Windows job . Each runner will test any integration that was changed , with the Windows runner being further restricted to Windows-only integrations. If the base package is modified, jobs will be triggered for every integration, similar to the pipeline for master . Master \u00b6 Every commit to the master branch triggers one or more jobs for every integration . Scripts \u00b6 Some integrations require additional set up such as the installation of system dependencies. As we only want these extra steps to occur when necessary, there is a stage ran for every job that will detect what needs to be done and execute the appropriate scripts . As integrations may need different set up on different platforms, all scripts live under a directory named after the platform. All scripts in the directory will be executed in lexicographical order. Validations \u00b6 In addition to running tests on our CI, there are also some validations that are run to check for correctness of changes to various components of integrations. If any of these validations fail on your branch, then the CI will fail. In short, each validation is a ddev command, which fails if the component it is validating is not correct. See the ddev documentation and source code for the full docs for each validation. Tip A list of the current validations can be found here . CI configuration \u00b6 ddev validate ci This validates that all CI entries for integrations are valid. This includes checking if the integration has the correct codecov config, and has a valid CI entry if it is testable. Tip Run ddev validate ci --fix to resolve most errors. Agent requirements \u00b6 ddev validate agent-reqs This validates that each integration version is in sync with the requirements-agent-release.txt file. It is uncommon for this to fail because the release process is automated. Codeowners \u00b6 ddev validate codeowners This validates that every integration has a codeowner entry . If you fail this validation, add an entry in the codewners file corresponding to any newly added integration. Note: This validation command is only run when contributing to integrations-extras Default configuration files \u00b6 ddev validate config This verifies that the config specs for all integrations are valid by enforcing our configuration spec schema . The most common failure at this validation stage is some version of File needs to be synced. To resolve this issue, you can run ddev validate config --sync If you see failures regarding formatting or missing parameters, see our config spec documentation for more details on how to construc configuration specs. Dashboard definition files \u00b6 ddev validate dashboards This validates that dashboards are formatted correctly. This means that they need to be proper JSON and generated from Datadog's /dashboard API . Tip If you see a failure regarding use of the screen endpoint, consider using our dashboard utility command to generate your dashboard payload. Dependencies \u00b6 ddev validate dep This command: Verifies the uniqueness of dependency versions across all checks. Verifies all the dependencies are pinned. Verifies the embedded Python environment defined in the base check and requirements listed in every integration are compatible. This validation only applies if your work introduces new external dependencies. Manifest files \u00b6 ddev validate manifest This validates that the manifest files contain required fields, are formatted correctly, and don't contain common errors. See the Datadog docs for more detailed constraints. Metadata \u00b6 ddev validate metadata This checks that every metadata.csv file is formatted correctly. See the Datadog docs for more detailed constraints. README files \u00b6 ddev validate readmes This ensures that every integration's README.md file is formatted correctly. The main purpose of this validation is to ensure that any image linked in the readme exists and that all images are located in an integration's /image directory. Saved views data \u00b6 ddev validate saved-views This validates that saved views for an integration are formatted correctly and contain required fields, such as \"type\". Tip View example saved views for inspiration and guidance. Service check data \u00b6 ddev validate service-checks This checks that every service check file is formatted correctly. See the Datadog docs for more specific constraints. Imports \u00b6 ddev validate imports This verifies that all integrations import the base package in the correct way, such as: from datadog_checks.base.foo import bar Tip See the New Integration Instructions for more examples of how to use the base package. Labeler \u00b6 We use a GitHub Action to automatically add labels to pull requests. Tip If the Labeler CI step fails on your PR, it's probably because your PR is from a fork. Don't worry if this happens- the team can manually add labels for you. The labeler is configured to add the following: Label Condition integration/ any directory at the root that actually contains an integration documentation any Markdown, config specs , manifest.json , or anything in /docs/ dev/testing Codecov or Azure Pipelines config dev/tooling GitLab (see CD ), GitHub Actions , or Stale bot config, or the ddev CLI dependencies any change in shipped dependencies release any base package , dev package , or integration release changelog/no-changelog any release, or if all files don't modify code that is shipped The changelog/ label must be applied manually . Fork \u00b6 We forked the official action to support the following: actions/labeler!43 actions/labeler!44 a special all: prefix modifier indicating the pattern must match every file Docs \u00b6 Stale bot \u00b6 We use a GitHub App that is configured to address abandoned issues and pull requests.","title":"CI"},{"location":"meta/ci/#continuous-integration","text":"","title":"Continuous integration"},{"location":"meta/ci/#tests","text":"All Agent Integrations use Azure Pipelines to execute tests.","title":"Tests"},{"location":"meta/ci/#execution","text":"Every runner will execute test stages in the following order: Unit & integration E2E Benchmarks","title":"Execution"},{"location":"meta/ci/#platforms","text":"We make extensive use of Microsoft-hosted agents . Windows-only integrations run on Windows Server 2019 with Visual Studio 2019 All other integrations run on Ubuntu 18.04 LTS Some things are tested on multiple platforms, like the base package and the Disk check.","title":"Platforms"},{"location":"meta/ci/#pipelines","text":"","title":"Pipelines"},{"location":"meta/ci/#pull-requests","text":"Every commit to a branch tied to an open pull request triggers a Linux and Windows job . Each runner will test any integration that was changed , with the Windows runner being further restricted to Windows-only integrations. If the base package is modified, jobs will be triggered for every integration, similar to the pipeline for master .","title":"Pull requests"},{"location":"meta/ci/#master","text":"Every commit to the master branch triggers one or more jobs for every integration .","title":"Master"},{"location":"meta/ci/#scripts","text":"Some integrations require additional set up such as the installation of system dependencies. As we only want these extra steps to occur when necessary, there is a stage ran for every job that will detect what needs to be done and execute the appropriate scripts . As integrations may need different set up on different platforms, all scripts live under a directory named after the platform. All scripts in the directory will be executed in lexicographical order.","title":"Scripts"},{"location":"meta/ci/#validations","text":"In addition to running tests on our CI, there are also some validations that are run to check for correctness of changes to various components of integrations. If any of these validations fail on your branch, then the CI will fail. In short, each validation is a ddev command, which fails if the component it is validating is not correct. See the ddev documentation and source code for the full docs for each validation. Tip A list of the current validations can be found here .","title":"Validations"},{"location":"meta/ci/#ci-configuration","text":"ddev validate ci This validates that all CI entries for integrations are valid. This includes checking if the integration has the correct codecov config, and has a valid CI entry if it is testable. Tip Run ddev validate ci --fix to resolve most errors.","title":"CI configuration"},{"location":"meta/ci/#agent-requirements","text":"ddev validate agent-reqs This validates that each integration version is in sync with the requirements-agent-release.txt file. It is uncommon for this to fail because the release process is automated.","title":"Agent requirements"},{"location":"meta/ci/#codeowners","text":"ddev validate codeowners This validates that every integration has a codeowner entry . If you fail this validation, add an entry in the codewners file corresponding to any newly added integration. Note: This validation command is only run when contributing to integrations-extras","title":"Codeowners"},{"location":"meta/ci/#default-configuration-files","text":"ddev validate config This verifies that the config specs for all integrations are valid by enforcing our configuration spec schema . The most common failure at this validation stage is some version of File needs to be synced. To resolve this issue, you can run ddev validate config --sync If you see failures regarding formatting or missing parameters, see our config spec documentation for more details on how to construc configuration specs.","title":"Default configuration files"},{"location":"meta/ci/#dashboard-definition-files","text":"ddev validate dashboards This validates that dashboards are formatted correctly. This means that they need to be proper JSON and generated from Datadog's /dashboard API . Tip If you see a failure regarding use of the screen endpoint, consider using our dashboard utility command to generate your dashboard payload.","title":"Dashboard definition files"},{"location":"meta/ci/#dependencies","text":"ddev validate dep This command: Verifies the uniqueness of dependency versions across all checks. Verifies all the dependencies are pinned. Verifies the embedded Python environment defined in the base check and requirements listed in every integration are compatible. This validation only applies if your work introduces new external dependencies.","title":"Dependencies"},{"location":"meta/ci/#manifest-files","text":"ddev validate manifest This validates that the manifest files contain required fields, are formatted correctly, and don't contain common errors. See the Datadog docs for more detailed constraints.","title":"Manifest files"},{"location":"meta/ci/#metadata","text":"ddev validate metadata This checks that every metadata.csv file is formatted correctly. See the Datadog docs for more detailed constraints.","title":"Metadata"},{"location":"meta/ci/#readme-files","text":"ddev validate readmes This ensures that every integration's README.md file is formatted correctly. The main purpose of this validation is to ensure that any image linked in the readme exists and that all images are located in an integration's /image directory.","title":"README files"},{"location":"meta/ci/#saved-views-data","text":"ddev validate saved-views This validates that saved views for an integration are formatted correctly and contain required fields, such as \"type\". Tip View example saved views for inspiration and guidance.","title":"Saved views data"},{"location":"meta/ci/#service-check-data","text":"ddev validate service-checks This checks that every service check file is formatted correctly. See the Datadog docs for more specific constraints.","title":"Service check data"},{"location":"meta/ci/#imports","text":"ddev validate imports This verifies that all integrations import the base package in the correct way, such as: from datadog_checks.base.foo import bar Tip See the New Integration Instructions for more examples of how to use the base package.","title":"Imports"},{"location":"meta/ci/#labeler","text":"We use a GitHub Action to automatically add labels to pull requests. Tip If the Labeler CI step fails on your PR, it's probably because your PR is from a fork. Don't worry if this happens- the team can manually add labels for you. The labeler is configured to add the following: Label Condition integration/ any directory at the root that actually contains an integration documentation any Markdown, config specs , manifest.json , or anything in /docs/ dev/testing Codecov or Azure Pipelines config dev/tooling GitLab (see CD ), GitHub Actions , or Stale bot config, or the ddev CLI dependencies any change in shipped dependencies release any base package , dev package , or integration release changelog/no-changelog any release, or if all files don't modify code that is shipped The changelog/ label must be applied manually .","title":"Labeler"},{"location":"meta/ci/#fork","text":"We forked the official action to support the following: actions/labeler!43 actions/labeler!44 a special all: prefix modifier indicating the pattern must match every file","title":"Fork"},{"location":"meta/ci/#docs","text":"","title":"Docs"},{"location":"meta/ci/#stale-bot","text":"We use a GitHub App that is configured to address abandoned issues and pull requests.","title":"Stale bot"},{"location":"meta/config-models/","text":"Config models \u00b6 All integrations use pydantic models as the primary way to validate and interface with configuration. As config spec data types are based on OpenAPI 3, we automatically generate the necessary code. The models reside in a package named config_models located at the root of a check's namespaced package. For example, a new integration named foo : foo \u2502 ... \u251c\u2500\u2500 datadog_checks \u2502 \u2514\u2500\u2500 foo \u2502 \u2514\u2500\u2500 config_models \u2502 \u251c\u2500\u2500 __init__.py \u2502 \u251c\u2500\u2500 defaults.py \u2502 \u251c\u2500\u2500 instance.py \u2502 \u251c\u2500\u2500 shared.py \u2502 \u2514\u2500\u2500 validators.py \u2502 \u2514\u2500\u2500 __init__.py \u2502 ... ... There are 2 possible models: SharedConfig (ID: shared ) that corresponds to the init_config section InstanceConfig (ID: instance ) that corresponds to a check's entry in the instances section All models are defined in .py and are available for import directly under config_models . Default values \u00b6 The default values for optional settings are populated in defaults.py and are derived from the value property of config spec options. The precedence is: the default key the example key, if it appears to represent a real value rather than an illustrative example and the type is a primitive the default value of the type e.g. string -> str() , object -> dict() , etc. Validation \u00b6 The validation of fields for every model occurs in 6 stages. Initial \u00b6 def initialize_ < ID > ( values : dict [ str , Any ], ** kwargs ) -> dict [ str , Any ]: ... If such a validator exists in validators.py , then it is called once with the raw config that was supplied by the user. The returned mapping is used as the input config for the subsequent stages. Default value population \u00b6 If a field was not supplied by the user nor during the initialization stage, then its default value is taken from defaults.py . This stage is skipped for required fields. Default field validators \u00b6 At this point pydantic will parse the values and perform validation of types, etc. Custom field validators \u00b6 The contents of validators.py are entirely custom and contain functions to perform extra validation if necessary. def < ID > _ < OPTION_NAME > ( value : Any , * , field : pydantic . fields . ModelField , ** kwargs ) -> Any : ... Such validators are called for the appropriate field of the proper model if the option was supplied by the user. The returned value is used as the new value of the option for the subsequent stages. Pre-defined field validators \u00b6 A new validators key under the value property of config spec options is considered. Every entry will refer to a relative import path to a field validator under datadog_checks.base.utils.models.validation and is executed in the defined order. The last returned value is used as the new value of the option for the final stage. Final \u00b6 def finalize_ < ID > ( values : dict [ str , Any ], ** kwargs ) -> dict [ str , Any ]: ... If such a validator exists in validators.py , then it is called with the cumulative result of all fields. The returned mapping is used to instantiate the model. Loading \u00b6 A check initialization occurs before a check's first run that loads the config models. Validation errors will thus prevent check execution. Interface \u00b6 The config models package contains a class ConfigMixin from which checks inherit: from datadog_checks.base import AgentCheck from .config_models import ConfigMixin class Check ( AgentCheck , ConfigMixin ): ... It exposes the instantiated InstanceConfig model at self.config and SharedConfig model at self.shared_config . Immutability \u00b6 All generated models are configured as immutable . Additionally, every list is converted to tuple and every dict is converted to immutables.Map . Deprecation \u00b6 Every option marked as deprecated in the config spec will log a warning with information about when it will be removed and what to do. Enforcement \u00b6 A validation command ddev validate models runs in our CI. To locally generate the proper files, run ddev validate models [CHECK] --sync .","title":"Config models"},{"location":"meta/config-models/#config-models","text":"All integrations use pydantic models as the primary way to validate and interface with configuration. As config spec data types are based on OpenAPI 3, we automatically generate the necessary code. The models reside in a package named config_models located at the root of a check's namespaced package. For example, a new integration named foo : foo \u2502 ... \u251c\u2500\u2500 datadog_checks \u2502 \u2514\u2500\u2500 foo \u2502 \u2514\u2500\u2500 config_models \u2502 \u251c\u2500\u2500 __init__.py \u2502 \u251c\u2500\u2500 defaults.py \u2502 \u251c\u2500\u2500 instance.py \u2502 \u251c\u2500\u2500 shared.py \u2502 \u2514\u2500\u2500 validators.py \u2502 \u2514\u2500\u2500 __init__.py \u2502 ... ... There are 2 possible models: SharedConfig (ID: shared ) that corresponds to the init_config section InstanceConfig (ID: instance ) that corresponds to a check's entry in the instances section All models are defined in .py and are available for import directly under config_models .","title":"Config models"},{"location":"meta/config-models/#default-values","text":"The default values for optional settings are populated in defaults.py and are derived from the value property of config spec options. The precedence is: the default key the example key, if it appears to represent a real value rather than an illustrative example and the type is a primitive the default value of the type e.g. string -> str() , object -> dict() , etc.","title":"Default values"},{"location":"meta/config-models/#validation","text":"The validation of fields for every model occurs in 6 stages.","title":"Validation"},{"location":"meta/config-models/#initial","text":"def initialize_ < ID > ( values : dict [ str , Any ], ** kwargs ) -> dict [ str , Any ]: ... If such a validator exists in validators.py , then it is called once with the raw config that was supplied by the user. The returned mapping is used as the input config for the subsequent stages.","title":"Initial"},{"location":"meta/config-models/#default-value-population","text":"If a field was not supplied by the user nor during the initialization stage, then its default value is taken from defaults.py . This stage is skipped for required fields.","title":"Default value population"},{"location":"meta/config-models/#default-field-validators","text":"At this point pydantic will parse the values and perform validation of types, etc.","title":"Default field validators"},{"location":"meta/config-models/#custom-field-validators","text":"The contents of validators.py are entirely custom and contain functions to perform extra validation if necessary. def < ID > _ < OPTION_NAME > ( value : Any , * , field : pydantic . fields . ModelField , ** kwargs ) -> Any : ... Such validators are called for the appropriate field of the proper model if the option was supplied by the user. The returned value is used as the new value of the option for the subsequent stages.","title":"Custom field validators"},{"location":"meta/config-models/#pre-defined-field-validators","text":"A new validators key under the value property of config spec options is considered. Every entry will refer to a relative import path to a field validator under datadog_checks.base.utils.models.validation and is executed in the defined order. The last returned value is used as the new value of the option for the final stage.","title":"Pre-defined field validators"},{"location":"meta/config-models/#final","text":"def finalize_ < ID > ( values : dict [ str , Any ], ** kwargs ) -> dict [ str , Any ]: ... If such a validator exists in validators.py , then it is called with the cumulative result of all fields. The returned mapping is used to instantiate the model.","title":"Final"},{"location":"meta/config-models/#loading","text":"A check initialization occurs before a check's first run that loads the config models. Validation errors will thus prevent check execution.","title":"Loading"},{"location":"meta/config-models/#interface","text":"The config models package contains a class ConfigMixin from which checks inherit: from datadog_checks.base import AgentCheck from .config_models import ConfigMixin class Check ( AgentCheck , ConfigMixin ): ... It exposes the instantiated InstanceConfig model at self.config and SharedConfig model at self.shared_config .","title":"Interface"},{"location":"meta/config-models/#immutability","text":"All generated models are configured as immutable . Additionally, every list is converted to tuple and every dict is converted to immutables.Map .","title":"Immutability"},{"location":"meta/config-models/#deprecation","text":"Every option marked as deprecated in the config spec will log a warning with information about when it will be removed and what to do.","title":"Deprecation"},{"location":"meta/config-models/#enforcement","text":"A validation command ddev validate models runs in our CI. To locally generate the proper files, run ddev validate models [CHECK] --sync .","title":"Enforcement"},{"location":"meta/config-specs/","text":"Configuration specification \u00b6 Every integration has a specification detailing all the options that influence behavior. These YAML files are located at /assets/configuration/spec.yaml . Producer \u00b6 The producer 's job is to read a specification and: Validate for correctness Populate all unset default fields Resolve any defined templates Output the complete specification as JSON for arbitrary consumers Consumers \u00b6 Consumers may utilize specs in a number of scenarios, such as: rendering example configuration shipped to end users documenting all options in-app & on the docs site form for creating configuration in multiple formats on Integration tiles automatic configuration loading for Checks Agent based and/or in-app validator for user-supplied configuration Schema \u00b6 The root of every spec is a map with 3 keys: name - The display name of what the spec refers to e.g. Postgres , Datadog Agent , etc. version - The released version of what the spec refers to files - A list of all files that influence behavior Files \u00b6 Every file has 3 possible attributes: name - This is the name of the file the Agent will look for ( REQUIRED ) example_name - This is the name of the example file the Agent will ship. If none is provided, the default will be conf.yaml.example . The exception is auto-discovery files, which are also named auto_conf.yaml . options - A list of options ( REQUIRED ) Options \u00b6 Every option has 10 possible attributes: name - This is the name of the option ( REQUIRED ) description - Information about the option ( REQUIRED ) required - Whether or not the option is required for basic functionality. It defaults to false . hidden - Whether or not the option should not be publicly exposed. It defaults to false . display_priority - An integer representing the relative visual rank the option should take on compared to other options when publicly exposed. It defaults to 0 , meaning that every option will be displayed in the order defined in the spec. deprecation - If the option is deprecated, a mapping of relevant information. For example: deprecation : Release : 8.0.0 Migration : | do this and that multiple - Whether or not options may be selected multiple times like instances or just once like init_config metadata_tags - A list of tags (like docs:foo ) that can serve for unexpected use cases in the future options - Nested options, indicating that this is a section like instances or logs value - The expected type data There are 2 types of options: those with and without a value . Those with a value attribute are the actual user-controlled settings that influence behavior like username . Those without are expected to be sections and therefore must have an options attribute. An option cannot have both attributes. Options with a value (non-section) also support: secret - Whether or not consumers should treat the option as sensitive information like password . It defaults to false . Info The option vs section logic was chosen instead of going fully typed to avoid deeply nested value s. Values \u00b6 The type system is based on a loose subset of OpenAPI 3 data types . The differences are: Only the minimum and maximum numeric modifiers are supported Only the pattern string modifier is supported The properties object modifier is not a map, but rather a list of maps with a required name attribute. This is so consumers will load objects consistently regardless of language guarantees regarding map key order. Values also support 1 field of our own: example - An example value, only required if the type is boolean . The default is . Templates \u00b6 Every option may reference pre-defined templates using a key called template . The template format looks like path/to/template_file where path/to must point an existing directory relative to a template directory and template_file must have the file extension .yaml or .yml . You can use custom templates that will take precedence over the pre-defined templates by using the template_paths parameter of the ConfigSpec class. Override \u00b6 For occasions when deeply nested default template values need to be overridden, there is the ability to redefine attributes via a . (dot) accessor. options : - template : instances/http overrides : timeout.value.example : 42 Example file consumer \u00b6 The example consumer uses each spec to render the example configuration files that are shipped with every Agent and individual Integration release. It respects a few extra option -level attributes: example - A complete example of an option in lieu of a strictly typed value attribute enabled - Whether or not to un-comment the option, overriding the behavior of required display_priority - This is an integer affecting the order in which options are displayed, with higher values indicating higher priority. The default is 0 . It also respects a few extra fields under the value attribute of each option: display_default - This is the default value that will be shown in the header of each option, useful if it differs from the example . You may set it to null explicitly to disable showing this part of the header. compact_example - Whether or not to display complex types like arrays in their most compact representation. It defaults to false . Usage \u00b6 Use the --sync flag of the config validation command to render the example configuration files. Data model consumer \u00b6 The model consumer uses each spec to render the pydantic models that checks use to validate and interface with configuration. The models are shipped with every Agent and individual Integration release. It respects an extra field under the value attribute of each option: default - This is the default value that options will be set to, taking precedence over the example . validators - This refers to an array of pre-defined field validators to use. Every entry will refer to a relative import path to a field validator under datadog_checks.base.utils.models.validation and will be executed in the defined order. Usage \u00b6 Use the --sync flag of the model validation command to render the data model files. API \u00b6 datadog_checks.dev.tooling.specs.configuration.core.ConfigSpec \u00b6 __init__ ( self , contents , template_paths = None , source = None , version = None ) special \u00b6 Source code in datadog_checks/dev/tooling/specs/configuration/core.py def __init__ ( self , contents , template_paths = None , source = None , version = None ): super () . __init__ ( contents , template_paths , source , version ) self . spec_type = 'Configuration' self . templates = ConfigTemplates ( template_paths )","title":"Config specs"},{"location":"meta/config-specs/#configuration-specification","text":"Every integration has a specification detailing all the options that influence behavior. These YAML files are located at /assets/configuration/spec.yaml .","title":"Configuration specification"},{"location":"meta/config-specs/#producer","text":"The producer 's job is to read a specification and: Validate for correctness Populate all unset default fields Resolve any defined templates Output the complete specification as JSON for arbitrary consumers","title":"Producer"},{"location":"meta/config-specs/#consumers","text":"Consumers may utilize specs in a number of scenarios, such as: rendering example configuration shipped to end users documenting all options in-app & on the docs site form for creating configuration in multiple formats on Integration tiles automatic configuration loading for Checks Agent based and/or in-app validator for user-supplied configuration","title":"Consumers"},{"location":"meta/config-specs/#schema","text":"The root of every spec is a map with 3 keys: name - The display name of what the spec refers to e.g. Postgres , Datadog Agent , etc. version - The released version of what the spec refers to files - A list of all files that influence behavior","title":"Schema"},{"location":"meta/config-specs/#files","text":"Every file has 3 possible attributes: name - This is the name of the file the Agent will look for ( REQUIRED ) example_name - This is the name of the example file the Agent will ship. If none is provided, the default will be conf.yaml.example . The exception is auto-discovery files, which are also named auto_conf.yaml . options - A list of options ( REQUIRED )","title":"Files"},{"location":"meta/config-specs/#options","text":"Every option has 10 possible attributes: name - This is the name of the option ( REQUIRED ) description - Information about the option ( REQUIRED ) required - Whether or not the option is required for basic functionality. It defaults to false . hidden - Whether or not the option should not be publicly exposed. It defaults to false . display_priority - An integer representing the relative visual rank the option should take on compared to other options when publicly exposed. It defaults to 0 , meaning that every option will be displayed in the order defined in the spec. deprecation - If the option is deprecated, a mapping of relevant information. For example: deprecation : Release : 8.0.0 Migration : | do this and that multiple - Whether or not options may be selected multiple times like instances or just once like init_config metadata_tags - A list of tags (like docs:foo ) that can serve for unexpected use cases in the future options - Nested options, indicating that this is a section like instances or logs value - The expected type data There are 2 types of options: those with and without a value . Those with a value attribute are the actual user-controlled settings that influence behavior like username . Those without are expected to be sections and therefore must have an options attribute. An option cannot have both attributes. Options with a value (non-section) also support: secret - Whether or not consumers should treat the option as sensitive information like password . It defaults to false . Info The option vs section logic was chosen instead of going fully typed to avoid deeply nested value s.","title":"Options"},{"location":"meta/config-specs/#values","text":"The type system is based on a loose subset of OpenAPI 3 data types . The differences are: Only the minimum and maximum numeric modifiers are supported Only the pattern string modifier is supported The properties object modifier is not a map, but rather a list of maps with a required name attribute. This is so consumers will load objects consistently regardless of language guarantees regarding map key order. Values also support 1 field of our own: example - An example value, only required if the type is boolean . The default is .","title":"Values"},{"location":"meta/config-specs/#templates","text":"Every option may reference pre-defined templates using a key called template . The template format looks like path/to/template_file where path/to must point an existing directory relative to a template directory and template_file must have the file extension .yaml or .yml . You can use custom templates that will take precedence over the pre-defined templates by using the template_paths parameter of the ConfigSpec class.","title":"Templates"},{"location":"meta/config-specs/#override","text":"For occasions when deeply nested default template values need to be overridden, there is the ability to redefine attributes via a . (dot) accessor. options : - template : instances/http overrides : timeout.value.example : 42","title":"Override"},{"location":"meta/config-specs/#example-file-consumer","text":"The example consumer uses each spec to render the example configuration files that are shipped with every Agent and individual Integration release. It respects a few extra option -level attributes: example - A complete example of an option in lieu of a strictly typed value attribute enabled - Whether or not to un-comment the option, overriding the behavior of required display_priority - This is an integer affecting the order in which options are displayed, with higher values indicating higher priority. The default is 0 . It also respects a few extra fields under the value attribute of each option: display_default - This is the default value that will be shown in the header of each option, useful if it differs from the example . You may set it to null explicitly to disable showing this part of the header. compact_example - Whether or not to display complex types like arrays in their most compact representation. It defaults to false .","title":"Example file consumer"},{"location":"meta/config-specs/#usage","text":"Use the --sync flag of the config validation command to render the example configuration files.","title":"Usage"},{"location":"meta/config-specs/#data-model-consumer","text":"The model consumer uses each spec to render the pydantic models that checks use to validate and interface with configuration. The models are shipped with every Agent and individual Integration release. It respects an extra field under the value attribute of each option: default - This is the default value that options will be set to, taking precedence over the example . validators - This refers to an array of pre-defined field validators to use. Every entry will refer to a relative import path to a field validator under datadog_checks.base.utils.models.validation and will be executed in the defined order.","title":"Data model consumer"},{"location":"meta/config-specs/#usage_1","text":"Use the --sync flag of the model validation command to render the data model files.","title":"Usage"},{"location":"meta/config-specs/#api","text":"","title":"API"},{"location":"meta/config-specs/#datadog_checks.dev.tooling.specs.configuration.core.ConfigSpec","text":"","title":"ConfigSpec"},{"location":"meta/config-specs/#datadog_checks.dev.tooling.specs.configuration.core.ConfigSpec.__init__","text":"Source code in datadog_checks/dev/tooling/specs/configuration/core.py def __init__ ( self , contents , template_paths = None , source = None , version = None ): super () . __init__ ( contents , template_paths , source , version ) self . spec_type = 'Configuration' self . templates = ConfigTemplates ( template_paths )","title":"__init__()"},{"location":"meta/docs-specs/","text":"Documentation specification \u00b6 Building on top of the configuration spec implementation, we also incorporate a documentation spec. Similar to configuration specs, these YAML files are located at /assets/documentation/spec.yaml , and referenced in the check's manifest.json file. Producer \u00b6 The producer s job is to read a specification and: Validate for correctness Populate all unset default fields Gather and prioritize other schema for inclusion Resolve any defined templates Normalize links to embedded style Output the complete specification as JSON for arbitrary consumers This spec is dependent on other config files within an integration check, in order of precedence: manifest.json assets/service_checks.json assets/configuration/spec.yaml (included for reference, but unused for now) Consumers \u00b6 Consumers may utilize specs in a number of scenarios, such as: rendering README.md files for git and user documentation rendering HTML files for user documentation on our datadoghq.com site easily updating common components via base template changes creating single-source-of-truth for data such as short_description Schema \u00b6 The root of every spec is a map with 3 keys: name - The display name of what the spec refers to e.g. Postgres , Nagios , etc. version - The released version of what the spec refers to options - Top-level spec options related to the check overall (optional) files - A list of all files that influence behavior Spec Options \u00b6 Every spec has a set of optional options: autodiscovery - Indicates if this check supports autodiscovery. Default: false Files \u00b6 Every file has 3 possible attributes: name - This is the name of the file the Agent will look for ( REQUIRED ) render_name - This is the name of the rendered file, and defaults to README.md . Consumers may choose their own output name, or may read from this value. sections - A list of sections ( REQUIRED ) Sections \u00b6 Every section has these possible attributes: name - The title of the section. header_level - Level of indentation. tab - If not null, then the name of the tab, and all sections of the same indent must specify. description - Actual text content for the section. May be parameterized using keyword argument formatter strings, see parameterization for more info. Hyperlinks may be embedded or reference-style. parameters - Mapping of extra parameters for string formatting in the description . prepend_text - Text to insert in front of the description field. Useful for overrides. append_text - Text to append after the description field. Useful for overrides. processor - Reference to a Python function which should be invoked. If the function returns None , the default description carries forward, otherwise the results of the function will be used for the description . Used by the data_collected/service_checks template, for example. hidden - Whether or not the section should be publicly exposed. It defaults to false . sections - Nested sections, this will increase the header_level of embedded sections accordingly. template - See templates below for more. overrides - Override specific attributes within a given template. See overrides for more. Parameters \u00b6 When constructing each text section, the description field will first prepend and append values from prepend_text and append_text , respectively. Next string formatting operations will take place by using a default set of parameters joined with any parameters explicitly defined in the parameter attribute. Default parameters which will be present for all sections and passed as keyword args during string formatting include: name - the formal name of the check all fields from manifest.json objects from service_checks.json Templates \u00b6 Every section may reference pre-defined doc templates using a key called template . The template format looks like path/to/template_file where path/to must point an existing directory relative to a template directory and template_file must have the file extension .yaml or .yml . You can use custom templates that will take precedence over the pre-defined templates by using the template_paths parameter of the ConfigSpec class. Overrides \u00b6 Commonly used to update a description of a given template, or to inject specific parameters: sections : - template : setup/installation overrides : description : | The Nagios check is included in the [Datadog Agent][1] package, so you don't need to install anything else on your Nagios servers. [1]: https://docs.datadoghq.com/agent/ For occasions when deeply nested default template values need to be overridden, there is the ability to redefine attributes via a . (dot) accessor. options : - template : setup/configuration overrides : templates.log_collection.hidden : true README file consumer \u00b6 The README example consumer uses the documentation spec to render the README files that are included with every Integration package. Links \u00b6 As a custom with our README.md files, we use reference style links . Each section description may have embedded or reference style links, and as part of the Producer step, these will be all normalized to embedded links. This ensures that any consumers can handle them as needed. For the README consumer, it will translate everything to reference style as part of its output stage. Usage \u00b6 Use the --sync flag of the config validation command to render the README files. API \u00b6 datadog_checks.dev.tooling.specs.docs.core.DocsSpec \u00b6 __init__ ( self , contents , template_paths = None , source = None , version = None ) special \u00b6 Source code in datadog_checks/dev/tooling/specs/docs/core.py def __init__ ( self , contents , template_paths = None , source = None , version = None ): super () . __init__ ( contents , template_paths , source , version ) self . spec_type = 'Docs' self . templates = DocsTemplates ( template_paths ) normalize_links ( self ) \u00b6 Translate all reference-style links to inline links. Source code in datadog_checks/dev/tooling/specs/docs/core.py def normalize_links ( self ): \"\"\"Translate all reference-style links to inline links.\"\"\" # Markdown doc reference: https://www.markdownguide.org/basic-syntax/#links for fidx , file in enumerate ( self . data [ 'files' ], 1 ): sections = deque ( enumerate ( file [ 'sections' ], 1 )) while sections : sidx , section = sections . popleft () section [ 'prepend_text' ] = self . _normalize ( section [ 'prepend_text' ], fidx , sidx ) section [ 'description' ] = self . _normalize ( section [ 'description' ], fidx , sidx ) section [ 'append_text' ] = self . _normalize ( section [ 'append_text' ], fidx , sidx ) if 'sections' in section : nested_sections = [ ( f ' { sidx } . { subidx } ' , subsection ) for subidx , subsection in enumerate ( section [ 'sections' ], 1 ) ] # extend left backwards for correct order of sections sections . extendleft ( nested_sections [:: - 1 ]) validate ( self ) \u00b6 Source code in datadog_checks/dev/tooling/specs/docs/core.py def validate ( self ): spec_validator ( self . data , self ) if self . errors : return self . normalize_links () rendering: heading_level: 3 selection: members: - init - load","title":"Docs specs"},{"location":"meta/docs-specs/#documentation-specification","text":"Building on top of the configuration spec implementation, we also incorporate a documentation spec. Similar to configuration specs, these YAML files are located at /assets/documentation/spec.yaml , and referenced in the check's manifest.json file.","title":"Documentation specification"},{"location":"meta/docs-specs/#producer","text":"The producer s job is to read a specification and: Validate for correctness Populate all unset default fields Gather and prioritize other schema for inclusion Resolve any defined templates Normalize links to embedded style Output the complete specification as JSON for arbitrary consumers This spec is dependent on other config files within an integration check, in order of precedence: manifest.json assets/service_checks.json assets/configuration/spec.yaml (included for reference, but unused for now)","title":"Producer"},{"location":"meta/docs-specs/#consumers","text":"Consumers may utilize specs in a number of scenarios, such as: rendering README.md files for git and user documentation rendering HTML files for user documentation on our datadoghq.com site easily updating common components via base template changes creating single-source-of-truth for data such as short_description","title":"Consumers"},{"location":"meta/docs-specs/#schema","text":"The root of every spec is a map with 3 keys: name - The display name of what the spec refers to e.g. Postgres , Nagios , etc. version - The released version of what the spec refers to options - Top-level spec options related to the check overall (optional) files - A list of all files that influence behavior","title":"Schema"},{"location":"meta/docs-specs/#spec-options","text":"Every spec has a set of optional options: autodiscovery - Indicates if this check supports autodiscovery. Default: false","title":"Spec Options"},{"location":"meta/docs-specs/#files","text":"Every file has 3 possible attributes: name - This is the name of the file the Agent will look for ( REQUIRED ) render_name - This is the name of the rendered file, and defaults to README.md . Consumers may choose their own output name, or may read from this value. sections - A list of sections ( REQUIRED )","title":"Files"},{"location":"meta/docs-specs/#sections","text":"Every section has these possible attributes: name - The title of the section. header_level - Level of indentation. tab - If not null, then the name of the tab, and all sections of the same indent must specify. description - Actual text content for the section. May be parameterized using keyword argument formatter strings, see parameterization for more info. Hyperlinks may be embedded or reference-style. parameters - Mapping of extra parameters for string formatting in the description . prepend_text - Text to insert in front of the description field. Useful for overrides. append_text - Text to append after the description field. Useful for overrides. processor - Reference to a Python function which should be invoked. If the function returns None , the default description carries forward, otherwise the results of the function will be used for the description . Used by the data_collected/service_checks template, for example. hidden - Whether or not the section should be publicly exposed. It defaults to false . sections - Nested sections, this will increase the header_level of embedded sections accordingly. template - See templates below for more. overrides - Override specific attributes within a given template. See overrides for more.","title":"Sections"},{"location":"meta/docs-specs/#parameters","text":"When constructing each text section, the description field will first prepend and append values from prepend_text and append_text , respectively. Next string formatting operations will take place by using a default set of parameters joined with any parameters explicitly defined in the parameter attribute. Default parameters which will be present for all sections and passed as keyword args during string formatting include: name - the formal name of the check all fields from manifest.json objects from service_checks.json","title":"Parameters"},{"location":"meta/docs-specs/#templates","text":"Every section may reference pre-defined doc templates using a key called template . The template format looks like path/to/template_file where path/to must point an existing directory relative to a template directory and template_file must have the file extension .yaml or .yml . You can use custom templates that will take precedence over the pre-defined templates by using the template_paths parameter of the ConfigSpec class.","title":"Templates"},{"location":"meta/docs-specs/#overrides","text":"Commonly used to update a description of a given template, or to inject specific parameters: sections : - template : setup/installation overrides : description : | The Nagios check is included in the [Datadog Agent][1] package, so you don't need to install anything else on your Nagios servers. [1]: https://docs.datadoghq.com/agent/ For occasions when deeply nested default template values need to be overridden, there is the ability to redefine attributes via a . (dot) accessor. options : - template : setup/configuration overrides : templates.log_collection.hidden : true","title":"Overrides"},{"location":"meta/docs-specs/#readme-file-consumer","text":"The README example consumer uses the documentation spec to render the README files that are included with every Integration package.","title":"README file consumer"},{"location":"meta/docs-specs/#links","text":"As a custom with our README.md files, we use reference style links . Each section description may have embedded or reference style links, and as part of the Producer step, these will be all normalized to embedded links. This ensures that any consumers can handle them as needed. For the README consumer, it will translate everything to reference style as part of its output stage.","title":"Links"},{"location":"meta/docs-specs/#usage","text":"Use the --sync flag of the config validation command to render the README files.","title":"Usage"},{"location":"meta/docs-specs/#api","text":"","title":"API"},{"location":"meta/docs-specs/#datadog_checks.dev.tooling.specs.docs.core.DocsSpec","text":"","title":"DocsSpec"},{"location":"meta/docs-specs/#datadog_checks.dev.tooling.specs.docs.core.DocsSpec.__init__","text":"Source code in datadog_checks/dev/tooling/specs/docs/core.py def __init__ ( self , contents , template_paths = None , source = None , version = None ): super () . __init__ ( contents , template_paths , source , version ) self . spec_type = 'Docs' self . templates = DocsTemplates ( template_paths )","title":"__init__()"},{"location":"meta/docs-specs/#datadog_checks.dev.tooling.specs.docs.core.DocsSpec.normalize_links","text":"Translate all reference-style links to inline links. Source code in datadog_checks/dev/tooling/specs/docs/core.py def normalize_links ( self ): \"\"\"Translate all reference-style links to inline links.\"\"\" # Markdown doc reference: https://www.markdownguide.org/basic-syntax/#links for fidx , file in enumerate ( self . data [ 'files' ], 1 ): sections = deque ( enumerate ( file [ 'sections' ], 1 )) while sections : sidx , section = sections . popleft () section [ 'prepend_text' ] = self . _normalize ( section [ 'prepend_text' ], fidx , sidx ) section [ 'description' ] = self . _normalize ( section [ 'description' ], fidx , sidx ) section [ 'append_text' ] = self . _normalize ( section [ 'append_text' ], fidx , sidx ) if 'sections' in section : nested_sections = [ ( f ' { sidx } . { subidx } ' , subsection ) for subidx , subsection in enumerate ( section [ 'sections' ], 1 ) ] # extend left backwards for correct order of sections sections . extendleft ( nested_sections [:: - 1 ])","title":"normalize_links()"},{"location":"meta/docs-specs/#datadog_checks.dev.tooling.specs.docs.core.DocsSpec.validate","text":"Source code in datadog_checks/dev/tooling/specs/docs/core.py def validate ( self ): spec_validator ( self . data , self ) if self . errors : return self . normalize_links () rendering: heading_level: 3 selection: members: - init - load","title":"validate()"},{"location":"meta/docs/","text":"Documentation \u00b6 Generation \u00b6 Our docs are configured to be rendered by the static site generator MkDocs with the beautiful Material for MkDocs theme. Plugins \u00b6 We use a select few MkDocs plugins to achieve the following: minify HTML ( :octicons-octoface-24: ) display the date of the last Git modification of every page ( :octicons-octoface-24: ) automatically generate docs based on code and docstrings ( :octicons-octoface-24: ) export the site as a PDF ( :octicons-octoface-24: ) Extensions \u00b6 We also depend on a few Python-Markdown extensions to achieve the following: support for emojis, collapsible elements, code highlighting, and other advanced features courtesy of the PyMdown extension suite ( :octicons-octoface-24: ) ability to inline SVG icons from Material , FontAwesome , and Octicons ( :octicons-octoface-24: ) allow arbitrary scripts to modify MkDocs input files ( :octicons-octoface-24: ) automatically generate reference docs for Click -based command line interfaces ( :octicons-octoface-24: ) References \u00b6 All references are automatically available to all pages. Abbreviations \u00b6 These allow for the expansion of text on hover, useful for acronyms and definitions. For example, if you add the following to the list of abbreviations : *[CERN]: European Organization for Nuclear Research then anywhere you type CERN the organization's full name will appear on hover. External links \u00b6 All links to external resources should be added to the list of external links rather than defined on a per-page basis, for many reasons: it keeps the Markdown content compact and thus easy to read and modify the ability to re-use a link, even if you forsee no immediate use elsewhere easy automation of stale link detection when links to external resources change, the last date of Git modification displayed on pages will not Scripts \u00b6 We use some scripts to dynamically modify pages before being processed by other extensions and MkDocs itself, to achieve the following: add references to the bottom of every page render the status of various aspects of integrations enumerate all the dependencies that are shipped with the Datadog Agent Build \u00b6 We configure a tox environment called docs that provides all the dependencies necessary to build the documentation. To build and view the documentation in your browser, run the serve command (the first invocation may take a few extra moments): ddev docs serve By default, live reloading is enabled so any modification will be reflected in near-real time. Note: In order to export the site as a PDF, you can use the --pdf flag, but you will need some external dependencies . Deploy \u00b6 Our CI deploys the documentation to GitHub Pages if any changes occur on commits to the master branch. Danger Never make documentation non-deterministic as it will trigger deploys for every single commit. For example, say you want to display the valid values of a CLI option and the enumeration is represented as a set . Formatting the sequence directly will produce inconsistent results because sets do not guarantee order like dictionaries do, so you must sort it first.","title":"Docs"},{"location":"meta/docs/#documentation","text":"","title":"Documentation"},{"location":"meta/docs/#generation","text":"Our docs are configured to be rendered by the static site generator MkDocs with the beautiful Material for MkDocs theme.","title":"Generation"},{"location":"meta/docs/#plugins","text":"We use a select few MkDocs plugins to achieve the following: minify HTML ( :octicons-octoface-24: ) display the date of the last Git modification of every page ( :octicons-octoface-24: ) automatically generate docs based on code and docstrings ( :octicons-octoface-24: ) export the site as a PDF ( :octicons-octoface-24: )","title":"Plugins"},{"location":"meta/docs/#extensions","text":"We also depend on a few Python-Markdown extensions to achieve the following: support for emojis, collapsible elements, code highlighting, and other advanced features courtesy of the PyMdown extension suite ( :octicons-octoface-24: ) ability to inline SVG icons from Material , FontAwesome , and Octicons ( :octicons-octoface-24: ) allow arbitrary scripts to modify MkDocs input files ( :octicons-octoface-24: ) automatically generate reference docs for Click -based command line interfaces ( :octicons-octoface-24: )","title":"Extensions"},{"location":"meta/docs/#references","text":"All references are automatically available to all pages.","title":"References"},{"location":"meta/docs/#abbreviations","text":"These allow for the expansion of text on hover, useful for acronyms and definitions. For example, if you add the following to the list of abbreviations : *[CERN]: European Organization for Nuclear Research then anywhere you type CERN the organization's full name will appear on hover.","title":"Abbreviations"},{"location":"meta/docs/#external-links","text":"All links to external resources should be added to the list of external links rather than defined on a per-page basis, for many reasons: it keeps the Markdown content compact and thus easy to read and modify the ability to re-use a link, even if you forsee no immediate use elsewhere easy automation of stale link detection when links to external resources change, the last date of Git modification displayed on pages will not","title":"External links"},{"location":"meta/docs/#scripts","text":"We use some scripts to dynamically modify pages before being processed by other extensions and MkDocs itself, to achieve the following: add references to the bottom of every page render the status of various aspects of integrations enumerate all the dependencies that are shipped with the Datadog Agent","title":"Scripts"},{"location":"meta/docs/#build","text":"We configure a tox environment called docs that provides all the dependencies necessary to build the documentation. To build and view the documentation in your browser, run the serve command (the first invocation may take a few extra moments): ddev docs serve By default, live reloading is enabled so any modification will be reflected in near-real time. Note: In order to export the site as a PDF, you can use the --pdf flag, but you will need some external dependencies .","title":"Build"},{"location":"meta/docs/#deploy","text":"Our CI deploys the documentation to GitHub Pages if any changes occur on commits to the master branch. Danger Never make documentation non-deterministic as it will trigger deploys for every single commit. For example, say you want to display the valid values of a CLI option and the enumeration is represented as a set . Formatting the sequence directly will produce inconsistent results because sets do not guarantee order like dictionaries do, so you must sort it first.","title":"Deploy"},{"location":"meta/status/","text":"Status \u00b6 Dashboards \u00b6 78.91% Completed 116/147 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_active_directory azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cockroachdb confluent_platform consul consul_connect containerd coredns couch couchbase cri crio databricks directory disk dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly jmeter journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios network nfsstat nginx nginx_ingress_controller nvidia_jetson oom_kill openldap openshift openstack openstack_controller oracle otel pan_firewall pgbouncer php_fpm postfix postgres powerdns_recursor presto process proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snowflake solr sonarqube spark sqlserver squid statsd system_core systemd tcp_check tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere yarn zk Logs support \u00b6 92.73% Completed 102/110 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio druid ecs_fargate eks_fargate elastic envoy etcd exchange_server flink fluentd gearmand gitlab gitlab_runner glusterfs gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kyototycoon lighttpd linkerd mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openstack openstack_controller pgbouncer php_fpm postfix postgres powerdns_recursor presto proxysql rabbitmq redisdb rethinkdb riak scylla sidekiq solr sonarqube spark sqlserver squid statsd supervisord teamcity tenable tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log yarn zk Recommended monitors \u00b6 17.48% Completed 25/143 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk Config specs \u00b6 94.41% Completed 135/143 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk Docs specs \u00b6 0.70% Completed 1/143 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk E2E tests \u00b6 77.86% Completed 109/140 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk Config validation \u00b6 36.50% Completed 50/137 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory disk dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_scheduler kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios network nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk Metadata submission \u00b6 30.71% Completed 43/140 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk Process signatures \u00b6 30.56% Completed 44/144 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory disk dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios network nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snmp solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk Agent 8 check signatures \u00b6 50.34% Completed 73/145 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory disk dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios network nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk Default saved views (for integrations with logs) \u00b6 44.66% Completed 46/103 active_directory activemq activemq_xml aerospike airflow ambari apache aspdotnet azure_iot_edge cacti cassandra cassandra_nodetool ceph cilium clickhouse confluent_platform consul coredns couch couchbase druid ecs_fargate eks_fargate elastic envoy etcd exchange_server flink fluentd gearmand gitlab gitlab_runner glusterfs gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_scheduler kyototycoon lighttpd linkerd mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openstack openstack_controller pgbouncer postfix postgres powerdns_recursor presto proxysql rabbitmq redisdb rethinkdb riak scylla sidekiq solr sonarqube spark sqlserver squid statsd supervisord teamcity tenable tomcat twemproxy twistlock varnish vault vertica voltdb win32_event_log yarn zk","title":"Status"},{"location":"meta/status/#status","text":"","title":"Status"},{"location":"meta/status/#dashboards","text":"78.91% Completed 116/147 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_active_directory azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cockroachdb confluent_platform consul consul_connect containerd coredns couch couchbase cri crio databricks directory disk dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly jmeter journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios network nfsstat nginx nginx_ingress_controller nvidia_jetson oom_kill openldap openshift openstack openstack_controller oracle otel pan_firewall pgbouncer php_fpm postfix postgres powerdns_recursor presto process proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snowflake solr sonarqube spark sqlserver squid statsd system_core systemd tcp_check tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere yarn zk","title":"Dashboards"},{"location":"meta/status/#logs-support","text":"92.73% Completed 102/110 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio druid ecs_fargate eks_fargate elastic envoy etcd exchange_server flink fluentd gearmand gitlab gitlab_runner glusterfs gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kyototycoon lighttpd linkerd mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openstack openstack_controller pgbouncer php_fpm postfix postgres powerdns_recursor presto proxysql rabbitmq redisdb rethinkdb riak scylla sidekiq solr sonarqube spark sqlserver squid statsd supervisord teamcity tenable tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log yarn zk","title":"Logs support"},{"location":"meta/status/#recommended-monitors","text":"17.48% Completed 25/143 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk","title":"Recommended monitors"},{"location":"meta/status/#config-specs","text":"94.41% Completed 135/143 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk","title":"Config specs"},{"location":"meta/status/#docs-specs","text":"0.70% Completed 1/143 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk","title":"Docs specs"},{"location":"meta/status/#e2e-tests","text":"77.86% Completed 109/140 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk","title":"E2E tests"},{"location":"meta/status/#config-validation","text":"36.50% Completed 50/137 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory disk dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_scheduler kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios network nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk","title":"Config validation"},{"location":"meta/status/#metadata-submission","text":"30.71% Completed 43/140 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk","title":"Metadata submission"},{"location":"meta/status/#process-signatures","text":"30.56% Completed 44/144 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory disk dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios network nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snmp solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk","title":"Process signatures"},{"location":"meta/status/#agent-8-check-signatures","text":"50.34% Completed 73/145 active_directory activemq activemq_xml aerospike airflow amazon_msk ambari apache aspdotnet azure_iot_edge btrfs cacti cassandra cassandra_nodetool ceph cilium cisco_aci clickhouse cloud_foundry_api cockroachdb confluent_platform consul coredns couch couchbase crio directory disk dns_check dotnetclr druid ecs_fargate eks_fargate elastic envoy etcd exchange_server external_dns flink fluentd gearmand gitlab gitlab_runner glusterfs go_expvar gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq http_check hyperv ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_apiserver_metrics kube_controller_manager kube_dns kube_metrics_server kube_proxy kube_scheduler kubelet kubernetes_state kyototycoon lighttpd linkerd linux_proc_extras mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios network nfsstat nginx nginx_ingress_controller openldap openmetrics openstack openstack_controller oracle pdh_check pgbouncer php_fpm postfix postgres powerdns_recursor presto process prometheus proxysql rabbitmq redisdb rethinkdb riak riakcs sap_hana scylla sidekiq snmp snowflake solr sonarqube spark sqlserver squid ssh_check statsd supervisord system_core system_swap tcp_check teamcity tenable tls tokumx tomcat twemproxy twistlock varnish vault vertica voltdb vsphere win32_event_log windows_service wmi_check yarn zk","title":"Agent 8 check signatures"},{"location":"meta/status/#default-saved-views-for-integrations-with-logs","text":"44.66% Completed 46/103 active_directory activemq activemq_xml aerospike airflow ambari apache aspdotnet azure_iot_edge cacti cassandra cassandra_nodetool ceph cilium clickhouse confluent_platform consul coredns couch couchbase druid ecs_fargate eks_fargate elastic envoy etcd exchange_server flink fluentd gearmand gitlab gitlab_runner glusterfs gunicorn haproxy harbor hazelcast hdfs_datanode hdfs_namenode hive hivemq ibm_db2 ibm_mq ibm_was ignite iis istio jboss_wildfly journald kafka kafka_consumer kong kube_scheduler kyototycoon lighttpd linkerd mapr mapreduce marathon marklogic mcache mesos_master mesos_slave mongo mysql nagios nfsstat nginx nginx_ingress_controller openldap openstack openstack_controller pgbouncer postfix postgres powerdns_recursor presto proxysql rabbitmq redisdb rethinkdb riak scylla sidekiq solr sonarqube spark sqlserver squid statsd supervisord teamcity tenable tomcat twemproxy twistlock varnish vault vertica voltdb win32_event_log yarn zk","title":"Default saved views (for integrations with logs)"},{"location":"process/integration-release/","text":"Integration release \u00b6 Each Agent integration has its own release cycle. Many integrations are actively developed and released often while some are rarely touched (usually indicating feature-completeness). Versioning \u00b6 All releases adhere to Semantic Versioning . Tags in the form - are added to the Git repository. Therefore, it's possible to checkout and build the code for a certain version of a specific check. Setup \u00b6 Configure your GitHub auth. Identify changes \u00b6 Note If you already know which integration you'd like to release, skip this section. To see all checks that need to be released, run ddev release show ready . Steps \u00b6 Checkout and pull the most recent version of the master branch. git checkout master git pull Important Not using the latest version of master may cause errors in the build pipeline . Review which PRs were merged in between the latest release and the master branch. ddev release show changes You should ensure that PR titles and changelog labels are correct. Create a release branch from master (suggested naming format is /release- ). This has the purpose of opening a PR so others can review the changelog. Important It is critical the branch name is not in the form /- because one of our Gitlab jobs is triggered whenever a Git reference matches that pattern, see !3843 & !3980 . Make the release. ddev release make You may need to touch your Yubikey multiple times. This will automatically: update the version in /datadog_checks//__about__.py update the changelog update the requirements-agent-release.txt file update in-toto metadata commit the above changes Push your branch to GitHub and create a pull request. Update the title of the PR to something like [Release] Bumped version to . Ask for a review in Slack. Merge the pull request after approval. PyPI \u00b6 If you released datadog_checks_base or datadog_checks_dev then these must be uploaded to PyPI for use by integrations-extras . This is automatically handled by two GitHub Action jobs: release-base.yml and release-dev.yml . In case you need to do it manually: ddev release upload datadog_checks_[base|dev] Metadata \u00b6 You need to run certain jobs if any changes modified integration metadata. See the Declarative Integration Pipeline wiki. Bulk releases \u00b6 To create a release for every integration that has changed, use all as the integration name in the ddev release make step above. ddev release make all You may also pass a comma-separated list of checks to skip using the --exclude option, e.g.: ddev release make all --exclude datadog_checks_dev Warning There is a known GitHub limitation where if an issue has too many labels (100), its state cannot be modified. If you cannot merge the pull request: Run the remove-labels command After merging, manually add back the changelog/no-changelog label Betas \u00b6 Creating pre-releases is the same workflow except you do not open a pull request but rather release directly from a branch. In the ddev release make step set --version to [major|minor|patch],[rc|alpha|beta] . For example, if the current version of an integration is 1.1.3 , the following command will bump it to 1.2.0-rc.1 : ddev release make --version minor,rc After pushing the release commits to GitHub, run: ddev release tag This manually triggers the build pipeline . To increment the version, omit the first part, e.g.: ddev release make --version rc New integrations \u00b6 To bump a new integration to 1.0.0 if it is not already there, run: ddev release make --new To ensure this for all integrations, run: ddev release make all --new If a release was created, run: ddev agent requirements Troubleshooting \u00b6 If you encounter errors when signing with your Yubikey, ensure you ran gpg --import .gpg.pub . If the build pipeline failed, it is likely that you modified a file in the pull request without re-signing. To resolve this, you'll need to bootstrap metadata for every integration: Checkout and pull the most recent version of the master branch. git checkout master git pull Sign everything. ddev release make all --sign-only You may need to touch your Yubikey multiple times. Push your branch to GitHub. Manually trigger a build. git tag bootstrap-1.0.0 -m bootstrap-1.0.0 The tag name is irrelevant, it just needs to look like an integration release. Gitlab doesn't sync deleted tags, so any subsequent manual trigger tags will need to increment the version number. Delete the branch and tag, locally and on GitHub. Releasers \u00b6 For whom it may concern, the following is a list of GPG public key fingerprints known to correspond to developers who, at the time of writing (28-02-2020), can trigger a build by signing in-toto metadata . Christine Chen 57CE 2495 EA48 D456 B9C4 BA4F 66E8 2239 9141 D9D3 36C0 82E7 38C7 B4A1 E169 11C0 D633 59C4 875A 1A9A Paul Coignet 024E 42FE 76AD F19F 5D57 7503 07E5 2EA3 88E4 08FD 1286 0553 D1DC 93A7 2CD1 6956 2D98 DCE7 FBFF C9C2 Dave Coleman 8278 C406 C1BB F1F2 DFBB 5AD6 0AE7 E246 4F8F D375 98A5 37CD CCA2 8DFF B35B 0551 5D50 0742 90F6 422F Paola Ducolin EAC5 F27E C6B1 A814 1222 1942 C4E1 549E 937E F5A2 A40A DD71 41EB C767 BBFB E0B8 9128 2E2F E536 C858 Mike Garabedian F90C 0097 67F2 4B27 9DC2 C83D A227 6601 6CB4 CF1D 2669 6E67 28D2 0CB0 C1E0 D2BE 6643 5756 8398 9306 Thomas Herv\u00e9 59DB 2532 75A5 BD4E 55C7 C5AA 0678 55A2 8E90 3B3B E2BD 994F 95C0 BC0B B923 1D21 F752 1EC8 F485 90D0 Ofek Lev C295 CF63 B355 DFEB 3316 02F7 F426 A944 35BE 6F99 D009 8861 8057 D2F4 D855 5A62 B472 442C B7D3 AF42 Florimond Manca B023 B02A 0331 9CD8 D19A 4328 83ED 89A4 5548 48FC 0992 11D9 AA67 D21E 7098 7B59 7C7D CB06 C9F2 0C13 Greg Marabout Demazure 01CC 90D7 F047 93D4 30DF 9C7B 825B 84BD 1EE8 E57C C719 8925 CAE5 11DE 7FC2 EB15 A9B3 5A96 7570 B459 Julia Simon 4A54 09A2 3361 109C 047C C76A DC8A 42C2 8B95 0123 129A 26CF A726 3C85 98A6 94B0 8659 1366 CBA1 BF3C Florian Veaux 3109 1C85 5D78 7789 93E5 0348 9BFE 5299 D02F 83E9 7A73 0C5E 48B0 6986 1045 CF8B 8B2D 16D6 5DE4 C95E Alexandre Yang FBC6 3AE0 9D0C A9B4 584C 9D7F 4291 A11A 36EA 52CD F8D9 181D 9309 F8A4 957D 636A 27F8 F48B 18AE 91AA","title":"Integration release"},{"location":"process/integration-release/#integration-release","text":"Each Agent integration has its own release cycle. Many integrations are actively developed and released often while some are rarely touched (usually indicating feature-completeness).","title":"Integration release"},{"location":"process/integration-release/#versioning","text":"All releases adhere to Semantic Versioning . Tags in the form - are added to the Git repository. Therefore, it's possible to checkout and build the code for a certain version of a specific check.","title":"Versioning"},{"location":"process/integration-release/#setup","text":"Configure your GitHub auth.","title":"Setup"},{"location":"process/integration-release/#identify-changes","text":"Note If you already know which integration you'd like to release, skip this section. To see all checks that need to be released, run ddev release show ready .","title":"Identify changes"},{"location":"process/integration-release/#steps","text":"Checkout and pull the most recent version of the master branch. git checkout master git pull Important Not using the latest version of master may cause errors in the build pipeline . Review which PRs were merged in between the latest release and the master branch. ddev release show changes You should ensure that PR titles and changelog labels are correct. Create a release branch from master (suggested naming format is /release- ). This has the purpose of opening a PR so others can review the changelog. Important It is critical the branch name is not in the form /- because one of our Gitlab jobs is triggered whenever a Git reference matches that pattern, see !3843 & !3980 . Make the release. ddev release make You may need to touch your Yubikey multiple times. This will automatically: update the version in /datadog_checks//__about__.py update the changelog update the requirements-agent-release.txt file update in-toto metadata commit the above changes Push your branch to GitHub and create a pull request. Update the title of the PR to something like [Release] Bumped version to . Ask for a review in Slack. Merge the pull request after approval.","title":"Steps"},{"location":"process/integration-release/#pypi","text":"If you released datadog_checks_base or datadog_checks_dev then these must be uploaded to PyPI for use by integrations-extras . This is automatically handled by two GitHub Action jobs: release-base.yml and release-dev.yml . In case you need to do it manually: ddev release upload datadog_checks_[base|dev]","title":"PyPI"},{"location":"process/integration-release/#metadata","text":"You need to run certain jobs if any changes modified integration metadata. See the Declarative Integration Pipeline wiki.","title":"Metadata"},{"location":"process/integration-release/#bulk-releases","text":"To create a release for every integration that has changed, use all as the integration name in the ddev release make step above. ddev release make all You may also pass a comma-separated list of checks to skip using the --exclude option, e.g.: ddev release make all --exclude datadog_checks_dev Warning There is a known GitHub limitation where if an issue has too many labels (100), its state cannot be modified. If you cannot merge the pull request: Run the remove-labels command After merging, manually add back the changelog/no-changelog label","title":"Bulk releases"},{"location":"process/integration-release/#betas","text":"Creating pre-releases is the same workflow except you do not open a pull request but rather release directly from a branch. In the ddev release make step set --version to [major|minor|patch],[rc|alpha|beta] . For example, if the current version of an integration is 1.1.3 , the following command will bump it to 1.2.0-rc.1 : ddev release make --version minor,rc After pushing the release commits to GitHub, run: ddev release tag This manually triggers the build pipeline . To increment the version, omit the first part, e.g.: ddev release make --version rc","title":"Betas"},{"location":"process/integration-release/#new-integrations","text":"To bump a new integration to 1.0.0 if it is not already there, run: ddev release make --new To ensure this for all integrations, run: ddev release make all --new If a release was created, run: ddev agent requirements","title":"New integrations"},{"location":"process/integration-release/#troubleshooting","text":"If you encounter errors when signing with your Yubikey, ensure you ran gpg --import .gpg.pub . If the build pipeline failed, it is likely that you modified a file in the pull request without re-signing. To resolve this, you'll need to bootstrap metadata for every integration: Checkout and pull the most recent version of the master branch. git checkout master git pull Sign everything. ddev release make all --sign-only You may need to touch your Yubikey multiple times. Push your branch to GitHub. Manually trigger a build. git tag bootstrap-1.0.0 -m bootstrap-1.0.0 The tag name is irrelevant, it just needs to look like an integration release. Gitlab doesn't sync deleted tags, so any subsequent manual trigger tags will need to increment the version number. Delete the branch and tag, locally and on GitHub.","title":"Troubleshooting"},{"location":"process/integration-release/#releasers","text":"For whom it may concern, the following is a list of GPG public key fingerprints known to correspond to developers who, at the time of writing (28-02-2020), can trigger a build by signing in-toto metadata . Christine Chen 57CE 2495 EA48 D456 B9C4 BA4F 66E8 2239 9141 D9D3 36C0 82E7 38C7 B4A1 E169 11C0 D633 59C4 875A 1A9A Paul Coignet 024E 42FE 76AD F19F 5D57 7503 07E5 2EA3 88E4 08FD 1286 0553 D1DC 93A7 2CD1 6956 2D98 DCE7 FBFF C9C2 Dave Coleman 8278 C406 C1BB F1F2 DFBB 5AD6 0AE7 E246 4F8F D375 98A5 37CD CCA2 8DFF B35B 0551 5D50 0742 90F6 422F Paola Ducolin EAC5 F27E C6B1 A814 1222 1942 C4E1 549E 937E F5A2 A40A DD71 41EB C767 BBFB E0B8 9128 2E2F E536 C858 Mike Garabedian F90C 0097 67F2 4B27 9DC2 C83D A227 6601 6CB4 CF1D 2669 6E67 28D2 0CB0 C1E0 D2BE 6643 5756 8398 9306 Thomas Herv\u00e9 59DB 2532 75A5 BD4E 55C7 C5AA 0678 55A2 8E90 3B3B E2BD 994F 95C0 BC0B B923 1D21 F752 1EC8 F485 90D0 Ofek Lev C295 CF63 B355 DFEB 3316 02F7 F426 A944 35BE 6F99 D009 8861 8057 D2F4 D855 5A62 B472 442C B7D3 AF42 Florimond Manca B023 B02A 0331 9CD8 D19A 4328 83ED 89A4 5548 48FC 0992 11D9 AA67 D21E 7098 7B59 7C7D CB06 C9F2 0C13 Greg Marabout Demazure 01CC 90D7 F047 93D4 30DF 9C7B 825B 84BD 1EE8 E57C C719 8925 CAE5 11DE 7FC2 EB15 A9B3 5A96 7570 B459 Julia Simon 4A54 09A2 3361 109C 047C C76A DC8A 42C2 8B95 0123 129A 26CF A726 3C85 98A6 94B0 8659 1366 CBA1 BF3C Florian Veaux 3109 1C85 5D78 7789 93E5 0348 9BFE 5299 D02F 83E9 7A73 0C5E 48B0 6986 1045 CF8B 8B2D 16D6 5DE4 C95E Alexandre Yang FBC6 3AE0 9D0C A9B4 584C 9D7F 4291 A11A 36EA 52CD F8D9 181D 9309 F8A4 957D 636A 27F8 F48B 18AE 91AA","title":"Releasers"},{"location":"process/agent-release/post-release/","text":"Post release \u00b6 Finalize \u00b6 On the day of the final stable release, tag the branch with ..0 . After the main Agent release manager confirms successful deployment to a few targets, create a branch based on master and run: ddev agent changelog ddev agent integrations See more options for ddev agent changelog and ddev agent integrations . Run the following commands to update the contents: ddev agent changelog -w -f to update the existing AGENT_CHANGELOG file ddev agent integrations -w -f to update the existing AGENT_INTEGRATIONS file. ddev agent integrations-changelog -w to add Agent version to existing CHANGELOG.md releases of integrations. Create a pull request and wait for approval before merging. Patches \u00b6 Important Only critical fixes are included in patches. See definition for critical fixes . Releases after the final Agent release should be reserved for critical issues only. Cherry-picking commits and releases for the patch release is mostly similar to the process for preparing release candidates . However, it's possible that from the time code freeze ended and a bugfix is needed, the integration has other non-critical commits or was released. The next section will describe the process for preparing the patch release candidates. Multiple check releases between bugfix release \u00b6 Given the effort of QA-ing the Agent release, any new changes should be carefully selected and included for the patch. Follow the following steps to add patch release: Cherry-pick the bugfix commit to the release branch . Release the integration on the release branch. Make a pull request with integration release , then merge it to the release branch. Important Remember to trigger the release pipeline and build the wheel. You can do so by tagging the release : `ddev release tag ` Note: only release PRs merged to master automatically build a wheel. Then pull the latest release branch so your branch has both the bugfix commit and release commit. Tag the branch with the new bumped version ..-rc.1 . When the patch release is ready, follow the same steps to finalize the release . Also manually update the changelog of the integrations that were released on the release branch, see example .","title":"Post release"},{"location":"process/agent-release/post-release/#post-release","text":"","title":"Post release"},{"location":"process/agent-release/post-release/#finalize","text":"On the day of the final stable release, tag the branch with ..0 . After the main Agent release manager confirms successful deployment to a few targets, create a branch based on master and run: ddev agent changelog ddev agent integrations See more options for ddev agent changelog and ddev agent integrations . Run the following commands to update the contents: ddev agent changelog -w -f to update the existing AGENT_CHANGELOG file ddev agent integrations -w -f to update the existing AGENT_INTEGRATIONS file. ddev agent integrations-changelog -w to add Agent version to existing CHANGELOG.md releases of integrations. Create a pull request and wait for approval before merging.","title":"Finalize"},{"location":"process/agent-release/post-release/#patches","text":"Important Only critical fixes are included in patches. See definition for critical fixes . Releases after the final Agent release should be reserved for critical issues only. Cherry-picking commits and releases for the patch release is mostly similar to the process for preparing release candidates . However, it's possible that from the time code freeze ended and a bugfix is needed, the integration has other non-critical commits or was released. The next section will describe the process for preparing the patch release candidates.","title":"Patches"},{"location":"process/agent-release/post-release/#multiple-check-releases-between-bugfix-release","text":"Given the effort of QA-ing the Agent release, any new changes should be carefully selected and included for the patch. Follow the following steps to add patch release: Cherry-pick the bugfix commit to the release branch . Release the integration on the release branch. Make a pull request with integration release , then merge it to the release branch. Important Remember to trigger the release pipeline and build the wheel. You can do so by tagging the release : `ddev release tag ` Note: only release PRs merged to master automatically build a wheel. Then pull the latest release branch so your branch has both the bugfix commit and release commit. Tag the branch with the new bumped version ..-rc.1 . When the patch release is ready, follow the same steps to finalize the release . Also manually update the changelog of the integrations that were released on the release branch, see example .","title":"Multiple check releases between bugfix release"},{"location":"process/agent-release/pre-release/","text":"Pre release \u00b6 A new minor version of the Agent is released every 6 weeks (approximately). Each release ships a snapshot of integrations-core . Setup \u00b6 Ensure that you have configured the following: GitHub credentials Trello credentials Trello team mappings Before Freeze \u00b6 Update style dependencies to latest versions (except if comments say otherwise) via PR. Example: ISORT_DEP , BLACK_DEP , etc. Check that the master , py2 and base_check builds are green. Freeze \u00b6 At midnight (EDT/EST) on the Friday before QA week we freeze, at which point the release manager will release all integrations with pending changes then branch off. Release \u00b6 Make a pull request to release any new integrations , then merge it and pull master Make a pull request to release all changed integrations , then merge it and pull master Get 2+ thorough reviews on the changelogs. Entries should have appropriate SemVer levels (e.g. Changed entries must refer to breaking changes only). See also PR guidelines . Consider x-posting the PR to Agent teams that have integrations in integrations-core , so they can check relevant changelogs too. Important Update PyPI if you released datadog_checks_base or datadog_checks_dev . Branch \u00b6 Create a branch based on master named after the highest version of the Agent being released in the form ..x Push the branch to GitHub Tag \u00b6 Run: git tag ..0-rc.1 -m ..0-rc.1 git push origin ..0-rc.1 QA week \u00b6 We test all changes to integrations that were introduced since the last release. Create items \u00b6 Create an item for every change in our board using the Trello subcommand called testable . For example: ddev release trello testable 7.17.1 7.18.0-rc.1 or if the tag is not ready yet: ddev release trello testable 7.17.1 origin/master would select all commits that were merged between the Git references. The command will display each change and prompt you to assign a team or skip. Purely documentation changes are automatically skipped. Cards are automatically assigned if $trello_users_$team table is configured . Release candidates \u00b6 The main Agent release manager will increment and build a new rc every day a bug fix needs to be tested until all QA is complete. Before each build is triggered: Merge any fixes that have been approved, then pull master Release all changed integrations with the exception of datadog_checks_dev For each fix merged, you must cherry-pick to the branch : The commit to master itself The release commit, so the shipped versions match the individually released integrations After all fixes have been cherry-picked: Push the changes to GitHub Tag with the appropriate rc number even if there were no changes Communication \u00b6 The Agent Release Manager will post a daily status for the entire release cycle. Reply in the thread with any pending PRs meant for the next RC and update the spreadsheet PRs included in Agent RCs . Logs \u00b6 Each release candidate is deployed in a staging environment. We observe the WARN or ERROR level logs filtered with the facets Service:datadog-agent and index:main and LogMessage to see if any unexpected or frequent errors start occurring that was not caught during QA. Release week \u00b6 After QA week ends the code freeze is lifted, even if there are items yet to be tested. The release manager will continue the same process outlined above. Notify the Agent Release Manager when code freeze ends.","title":"Pre release"},{"location":"process/agent-release/pre-release/#pre-release","text":"A new minor version of the Agent is released every 6 weeks (approximately). Each release ships a snapshot of integrations-core .","title":"Pre release"},{"location":"process/agent-release/pre-release/#setup","text":"Ensure that you have configured the following: GitHub credentials Trello credentials Trello team mappings","title":"Setup"},{"location":"process/agent-release/pre-release/#before-freeze","text":"Update style dependencies to latest versions (except if comments say otherwise) via PR. Example: ISORT_DEP , BLACK_DEP , etc. Check that the master , py2 and base_check builds are green.","title":"Before Freeze"},{"location":"process/agent-release/pre-release/#freeze","text":"At midnight (EDT/EST) on the Friday before QA week we freeze, at which point the release manager will release all integrations with pending changes then branch off.","title":"Freeze"},{"location":"process/agent-release/pre-release/#release","text":"Make a pull request to release any new integrations , then merge it and pull master Make a pull request to release all changed integrations , then merge it and pull master Get 2+ thorough reviews on the changelogs. Entries should have appropriate SemVer levels (e.g. Changed entries must refer to breaking changes only). See also PR guidelines . Consider x-posting the PR to Agent teams that have integrations in integrations-core , so they can check relevant changelogs too. Important Update PyPI if you released datadog_checks_base or datadog_checks_dev .","title":"Release"},{"location":"process/agent-release/pre-release/#branch","text":"Create a branch based on master named after the highest version of the Agent being released in the form ..x Push the branch to GitHub","title":"Branch"},{"location":"process/agent-release/pre-release/#tag","text":"Run: git tag ..0-rc.1 -m ..0-rc.1 git push origin ..0-rc.1","title":"Tag"},{"location":"process/agent-release/pre-release/#qa-week","text":"We test all changes to integrations that were introduced since the last release.","title":"QA week"},{"location":"process/agent-release/pre-release/#create-items","text":"Create an item for every change in our board using the Trello subcommand called testable . For example: ddev release trello testable 7.17.1 7.18.0-rc.1 or if the tag is not ready yet: ddev release trello testable 7.17.1 origin/master would select all commits that were merged between the Git references. The command will display each change and prompt you to assign a team or skip. Purely documentation changes are automatically skipped. Cards are automatically assigned if $trello_users_$team table is configured .","title":"Create items"},{"location":"process/agent-release/pre-release/#release-candidates","text":"The main Agent release manager will increment and build a new rc every day a bug fix needs to be tested until all QA is complete. Before each build is triggered: Merge any fixes that have been approved, then pull master Release all changed integrations with the exception of datadog_checks_dev For each fix merged, you must cherry-pick to the branch : The commit to master itself The release commit, so the shipped versions match the individually released integrations After all fixes have been cherry-picked: Push the changes to GitHub Tag with the appropriate rc number even if there were no changes","title":"Release candidates"},{"location":"process/agent-release/pre-release/#communication","text":"The Agent Release Manager will post a daily status for the entire release cycle. Reply in the thread with any pending PRs meant for the next RC and update the spreadsheet PRs included in Agent RCs .","title":"Communication"},{"location":"process/agent-release/pre-release/#logs","text":"Each release candidate is deployed in a staging environment. We observe the WARN or ERROR level logs filtered with the facets Service:datadog-agent and index:main and LogMessage to see if any unexpected or frequent errors start occurring that was not caught during QA.","title":"Logs"},{"location":"process/agent-release/pre-release/#release-week","text":"After QA week ends the code freeze is lifted, even if there are items yet to be tested. The release manager will continue the same process outlined above. Notify the Agent Release Manager when code freeze ends.","title":"Release week"},{"location":"tutorials/memory-profiling/","text":"Memory profiling \u00b6","title":"Memory profiling"},{"location":"tutorials/memory-profiling/#memory-profiling","text":"","title":"Memory profiling"},{"location":"tutorials/jmx/integration/","text":"JMX integration \u00b6 Tutorial for starting a JMX integration Step 1: Create a JMX integration scaffolding \u00b6 ddev create --type jmx MyJMXIntegration JMX integration contains specific init configs and instance configs: init_config : is_jmx : true # tells the Agent that the integration is a JMX type of integration collect_default_metrics : true # if true, metrics declared in `metrics.yaml` are collected instances : - host : # JMX hostname port : # JMX port ... Other init and instance configs can be found on JMX integration page Step 2: Define metrics you want to collect \u00b6 Select what metrics you want to collect from JMX. Available metrics can be usually found on official documentation of the service you want to monitor. You can also use tools like VisualVM , JConsole or jmxterm to explore the available JMX beans and their descriptions. Step 3: Define metrics filters \u00b6 Edit the metrics.yaml to define the filters for collecting metrics. The metrics filters format details can be found on JMX integration doc JMXFetch test cases also help understanding how metrics filters work and provide many examples. Example of metrics.yaml jmx_metrics : - include : domain : org.apache.activemq destinationType : Queue attribute : AverageEnqueueTime : alias : activemq.queue.avg_enqueue_time metric_type : gauge ConsumerCount : alias : activemq.queue.consumer_count metric_type : gauge Testing \u00b6 Using ddev tool , you can test against the JMX service by providing a dd_environment in tests/conftest.py like this one: @pytest . fixture ( scope = \"session\" ) def dd_environment (): compose_file = os . path . join ( HERE , 'compose' , 'docker-compose.yaml' ) with docker_run ( compose_file , conditions = [ # Kafka Broker CheckDockerLogs ( 'broker' , 'Monitored service is now ready' ), ], ): yield CHECK_CONFIG , { 'use_jmx' : True } And a e2e test like: @pytest . mark . e2e def test ( dd_agent_check ): instance = {} aggregator = dd_agent_check ( instance ) for metric in ACTIVEMQ_E2E_METRICS + JVM_E2E_METRICS : aggregator . assert_metric ( metric ) aggregator . assert_all_metrics_covered () aggregator . assert_metrics_using_metadata ( get_metadata_metrics (), exclude = JVM_E2E_METRICS ) Real examples of: JMX dd_environment JMX e2e test","title":"JMX integration"},{"location":"tutorials/jmx/integration/#jmx-integration","text":"Tutorial for starting a JMX integration","title":"JMX integration"},{"location":"tutorials/jmx/integration/#step-1-create-a-jmx-integration-scaffolding","text":"ddev create --type jmx MyJMXIntegration JMX integration contains specific init configs and instance configs: init_config : is_jmx : true # tells the Agent that the integration is a JMX type of integration collect_default_metrics : true # if true, metrics declared in `metrics.yaml` are collected instances : - host : # JMX hostname port : # JMX port ... Other init and instance configs can be found on JMX integration page","title":"Step 1: Create a JMX integration scaffolding"},{"location":"tutorials/jmx/integration/#step-2-define-metrics-you-want-to-collect","text":"Select what metrics you want to collect from JMX. Available metrics can be usually found on official documentation of the service you want to monitor. You can also use tools like VisualVM , JConsole or jmxterm to explore the available JMX beans and their descriptions.","title":"Step 2: Define metrics you want to collect"},{"location":"tutorials/jmx/integration/#step-3-define-metrics-filters","text":"Edit the metrics.yaml to define the filters for collecting metrics. The metrics filters format details can be found on JMX integration doc JMXFetch test cases also help understanding how metrics filters work and provide many examples. Example of metrics.yaml jmx_metrics : - include : domain : org.apache.activemq destinationType : Queue attribute : AverageEnqueueTime : alias : activemq.queue.avg_enqueue_time metric_type : gauge ConsumerCount : alias : activemq.queue.consumer_count metric_type : gauge","title":"Step 3: Define metrics filters"},{"location":"tutorials/jmx/integration/#testing","text":"Using ddev tool , you can test against the JMX service by providing a dd_environment in tests/conftest.py like this one: @pytest . fixture ( scope = \"session\" ) def dd_environment (): compose_file = os . path . join ( HERE , 'compose' , 'docker-compose.yaml' ) with docker_run ( compose_file , conditions = [ # Kafka Broker CheckDockerLogs ( 'broker' , 'Monitored service is now ready' ), ], ): yield CHECK_CONFIG , { 'use_jmx' : True } And a e2e test like: @pytest . mark . e2e def test ( dd_agent_check ): instance = {} aggregator = dd_agent_check ( instance ) for metric in ACTIVEMQ_E2E_METRICS + JVM_E2E_METRICS : aggregator . assert_metric ( metric ) aggregator . assert_all_metrics_covered () aggregator . assert_metrics_using_metadata ( get_metadata_metrics (), exclude = JVM_E2E_METRICS ) Real examples of: JMX dd_environment JMX e2e test","title":"Testing"},{"location":"tutorials/jmx/tools/","text":"JMX Tools \u00b6 List JMX beans using JMXTerm \u00b6 curl -L https://github.com/jiaqi/jmxterm/releases/download/v1.0.1/jmxterm-1.0.1-uber.jar -o /tmp/jmxterm-1.0.1-uber.jar java -jar /tmp/jmxterm-1.0.1-uber.jar -l localhost: domains beans Example output: $ curl -L https://github.com/jiaqi/jmxterm/releases/download/v1.0.1/jmxterm-1.0.1-uber.jar -o /tmp/jmxterm-1.0.1-uber.jar $ java -jar /tmp/jmxterm-1.0.1-uber.jar -l localhost:1616 Welcome to JMX terminal. Type \"help\" for available commands. $>domains #following domains are available JMImplementation com.sun.management io.fabric8.insight java.lang java.nio java.util.logging jmx4perl jolokia org.apache.activemq $>beans #domain = JMImplementation: JMImplementation:type=MBeanServerDelegate #domain = com.sun.management: com.sun.management:type=DiagnosticCommand com.sun.management:type=HotSpotDiagnostic #domain = io.fabric8.insight: io.fabric8.insight:type=LogQuery #domain = java.lang: java.lang:name=Code Cache,type=MemoryPool java.lang:name=CodeCacheManager,type=MemoryManager java.lang:name=Compressed Class Space,type=MemoryPool java.lang:name=Metaspace Manager,type=MemoryManager java.lang:name=Metaspace,type=MemoryPool java.lang:name=PS Eden Space,type=MemoryPool java.lang:name=PS MarkSweep,type=GarbageCollector java.lang:name=PS Old Gen,type=MemoryPool java.lang:name=PS Scavenge,type=GarbageCollector java.lang:name=PS Survivor Space,type=MemoryPool java.lang:type=ClassLoading java.lang:type=Compilation java.lang:type=Memory java.lang:type=OperatingSystem java.lang:type=Runtime java.lang:type=Threading [...] List JMX beans using JMXTerm with extra jars \u00b6 In the example below, the extra jar is jboss-client.jar . curl -L https://github.com/jiaqi/jmxterm/releases/download/v1.0.1/jmxterm-1.0.1-uber.jar -o /tmp/jmxterm-1.0.1-uber.jar java -cp /wildfly-17.0.1.Final/bin/client/jboss-client.jar:/tmp/jmxterm-1.0.1-uber.jar org.cyclopsgroup.jmxterm.boot.CliMain --url service:jmx:remote+http://localhost:9990 -u datadog -p pa$$word domains beans","title":"JMX Tools"},{"location":"tutorials/jmx/tools/#jmx-tools","text":"","title":"JMX Tools"},{"location":"tutorials/jmx/tools/#list-jmx-beans-using-jmxterm","text":"curl -L https://github.com/jiaqi/jmxterm/releases/download/v1.0.1/jmxterm-1.0.1-uber.jar -o /tmp/jmxterm-1.0.1-uber.jar java -jar /tmp/jmxterm-1.0.1-uber.jar -l localhost: domains beans Example output: $ curl -L https://github.com/jiaqi/jmxterm/releases/download/v1.0.1/jmxterm-1.0.1-uber.jar -o /tmp/jmxterm-1.0.1-uber.jar $ java -jar /tmp/jmxterm-1.0.1-uber.jar -l localhost:1616 Welcome to JMX terminal. Type \"help\" for available commands. $>domains #following domains are available JMImplementation com.sun.management io.fabric8.insight java.lang java.nio java.util.logging jmx4perl jolokia org.apache.activemq $>beans #domain = JMImplementation: JMImplementation:type=MBeanServerDelegate #domain = com.sun.management: com.sun.management:type=DiagnosticCommand com.sun.management:type=HotSpotDiagnostic #domain = io.fabric8.insight: io.fabric8.insight:type=LogQuery #domain = java.lang: java.lang:name=Code Cache,type=MemoryPool java.lang:name=CodeCacheManager,type=MemoryManager java.lang:name=Compressed Class Space,type=MemoryPool java.lang:name=Metaspace Manager,type=MemoryManager java.lang:name=Metaspace,type=MemoryPool java.lang:name=PS Eden Space,type=MemoryPool java.lang:name=PS MarkSweep,type=GarbageCollector java.lang:name=PS Old Gen,type=MemoryPool java.lang:name=PS Scavenge,type=GarbageCollector java.lang:name=PS Survivor Space,type=MemoryPool java.lang:type=ClassLoading java.lang:type=Compilation java.lang:type=Memory java.lang:type=OperatingSystem java.lang:type=Runtime java.lang:type=Threading [...]","title":"List JMX beans using JMXTerm"},{"location":"tutorials/jmx/tools/#list-jmx-beans-using-jmxterm-with-extra-jars","text":"In the example below, the extra jar is jboss-client.jar . curl -L https://github.com/jiaqi/jmxterm/releases/download/v1.0.1/jmxterm-1.0.1-uber.jar -o /tmp/jmxterm-1.0.1-uber.jar java -cp /wildfly-17.0.1.Final/bin/client/jboss-client.jar:/tmp/jmxterm-1.0.1-uber.jar org.cyclopsgroup.jmxterm.boot.CliMain --url service:jmx:remote+http://localhost:9990 -u datadog -p pa$$word domains beans","title":"List JMX beans using JMXTerm with extra jars"},{"location":"tutorials/snmp/how-to/","text":"SNMP How-To \u00b6 Simulate SNMP devices \u00b6 SNMP is a protocol for gathering metrics from network devices, but automated testing of the integration would not be practical nor reliable if we used actual devices. Our approach is to use a simulated SNMP device that responds to SNMP queries using simulation data . This simulated device is brought up as a Docker container when starting the SNMP test environment using: ddev env start snmp [ ... ] Test SNMP profiles locally \u00b6 Once the environment is up and running, you can modify the instance configuration to test profiles that support simulated metrics. The following is an example of an instance configured to use the Cisco Nexus profile. init_config : profiles : cisco_nexus : definition_file : cisco-nexus.yaml instances : - community_string : cisco_nexus # (1.) ip_address : # (2.) profile : cisco_nexus name : localhost port : 1161 The community_string must match the corresponding device .snmprec file name. For example, myprofile.snmprec gives community_string: myprofile . This also applies to walk files : myprofile.snmpwalk gives community_string: myprofile . To find the IP address of the SNMP container, run: docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dd-snmp Run SNMP queries \u00b6 With the test environment is up and running, we can issue SNMP queries to the simulated device using a command line SNMP client. Prerequisites \u00b6 Make sure you have the Net-SNMP tools installed on your machine. These should come pre-installed by default on Linux and macOS. If necessary, you can download them on the Net-SNMP website . Available commands \u00b6 The Net-SNMP tools provide a number of commands to interact with SNMP devices. The most commonly used commands are: snmpget : to issue an SNMP GET query. snmpgetnext : to issue an SNMP GETNEXT query. snmpwalk : to query an entire OID sub-tree at once. snmptable : to query rows in an SNMP table. Examples \u00b6 GET query \u00b6 To query a specific OID from a device, we can use the snmpget command. For example, the following command will query sysDescr OID of an SNMP device, which returns its human-readable description: $ snmpget -v 2c -c public -IR 127 .0.0.1:1161 system.sysDescr.0 SNMPv2-MIB::sysDescr.0 = STRING: Linux 41ba948911b9 4.9.87-linuxkit-aufs #1 SMP Wed Mar 14 15:12:16 UTC 2018 x86_64 SNMPv2-MIB::sysORUpTime.1 = Timeticks: (9) 0:00:00.09 Let's break this command down: snmpget : this command sends an SNMP GET request, and can be used to query the value of an OID. Here, we are requesting the system.sysDescr.0 OID. -v 2c : instructs your SNMP client to send the request using SNMP version 2c. See SNMP Versions . -c public : instructs the SNMP client to send the community string public along with our request. (This is a form of authentication provided by SNMP v2. See SNMP Versions .) 127.0.0.1:1161 : this is the host and port where the simulated SNMP agent is available at. (Confirm the port used by the ddev environment by inspecting the Docker port mapping via $ docker ps .) system.sysDescr.0 : this is the OID that the client should request. In practice this can refer to either a fully-resolved OID (e.g. 1.3.6.1.4.1[...] ), or a label (e.g. sysDescr.0 ). -IR : this option allows us to use labels for OIDs that aren't in the generic 1.3.6.1.2.1.* sub-tree (see: The OID tree ). TL;DR: always use this option when working with OIDs coming from vendor-specific MIBs. Tip If the above command fails, try using the explicit OID like so: $ snmpget -v 2c -c public -IR 127 .0.0.1:1161 iso.3.6.1.2.1.1.1.0 Table query \u00b6 For tables, use the snmptable command, which will output the rows in the table in a tabular format. Its arguments and options are similar to snmpget . $ snmptable -v 2c -c public -IR -Os 127 .0.0.1:1161 hrStorageTable SNMP table: hrStorageTable hrStorageIndex hrStorageType hrStorageDescr hrStorageAllocationUnits hrStorageSize hrStorageUsed hrStorageAllocationFailures 1 hrStorageRam Physical memory 1024 Bytes 2046940 1969964 ? 3 hrStorageVirtualMemory Virtual memory 1024 Bytes 3095512 1969964 ? 6 hrStorageOther Memory buffers 1024 Bytes 2046940 73580 ? 7 hrStorageOther Cached memory 1024 Bytes 1577648 1577648 ? 8 hrStorageOther Shared memory 1024 Bytes 2940 2940 ? 10 hrStorageVirtualMemory Swap space 1024 Bytes 1048572 0 ? 33 hrStorageFixedDisk /dev 4096 Bytes 16384 0 ? 36 hrStorageFixedDisk /sys/fs/cgroup 4096 Bytes 255867 0 ? 52 hrStorageFixedDisk /etc/resolv.conf 4096 Bytes 16448139 6493059 ? 53 hrStorageFixedDisk /etc/hostname 4096 Bytes 16448139 6493059 ? 54 hrStorageFixedDisk /etc/hosts 4096 Bytes 16448139 6493059 ? 55 hrStorageFixedDisk /dev/shm 4096 Bytes 16384 0 ? 61 hrStorageFixedDisk /proc/kcore 4096 Bytes 16384 0 ? 62 hrStorageFixedDisk /proc/keys 4096 Bytes 16384 0 ? 63 hrStorageFixedDisk /proc/timer_list 4096 Bytes 16384 0 ? 64 hrStorageFixedDisk /proc/sched_debug 4096 Bytes 16384 0 ? 65 hrStorageFixedDisk /sys/firmware 4096 Bytes 255867 0 ? (In this case, we added the -Os option which prints only the last symbolic element and reduces the output of hrStorageTypes .) Walk query \u00b6 A walk query can be used to query all OIDs in a given sub-tree . The snmpwalk command can be used to perform a walk query. To facilitate usage of walk files for debugging, the following options are recommended: -ObentU . Here's what each option does: b : do not break OID indexes down. e : print enums numerically (for example, 24 instead of softwareLoopback(24) ). n : print OIDs numerically (for example, .1.3.6.1.2.1.2.2.1.1.1 instead of IF-MIB::ifIndex.1 ). t : print timeticks numerically (for example, 4226041 instead of Timeticks: (4226041) 11:44:20.41 ). U : don't print units. For example, the following command gets a walk of the 1.3.6.1.2.1.1 ( system ) sub-tree: $ snmpwalk -v 2c -c public -ObentU 127 .0.0.1:1161 1 .3.6.1.2.1.1 .1.3.6.1.2.1.1.1.0 = STRING: Linux 41ba948911b9 4.9.87-linuxkit-aufs #1 SMP Wed Mar 14 15:12:16 UTC 2018 x86_64 .1.3.6.1.2.1.1.2.0 = OID: .1.3.6.1.4.1.8072.3.2.10 .1.3.6.1.2.1.1.3.0 = 4226041 .1.3.6.1.2.1.1.4.0 = STRING: root@localhost .1.3.6.1.2.1.1.5.0 = STRING: 41ba948911b9 .1.3.6.1.2.1.1.6.0 = STRING: Unknown .1.3.6.1.2.1.1.8.0 = 9 .1.3.6.1.2.1.1.9.1.2.1 = OID: .1.3.6.1.6.3.11.3.1.1 .1.3.6.1.2.1.1.9.1.2.2 = OID: .1.3.6.1.6.3.15.2.1.1 .1.3.6.1.2.1.1.9.1.2.3 = OID: .1.3.6.1.6.3.10.3.1.1 .1.3.6.1.2.1.1.9.1.2.4 = OID: .1.3.6.1.6.3.1 .1.3.6.1.2.1.1.9.1.2.5 = OID: .1.3.6.1.2.1.49 .1.3.6.1.2.1.1.9.1.2.6 = OID: .1.3.6.1.2.1.4 .1.3.6.1.2.1.1.9.1.2.7 = OID: .1.3.6.1.2.1.50 .1.3.6.1.2.1.1.9.1.2.8 = OID: .1.3.6.1.6.3.16.2.2.1 .1.3.6.1.2.1.1.9.1.2.9 = OID: .1.3.6.1.6.3.13.3.1.3 .1.3.6.1.2.1.1.9.1.2.10 = OID: .1.3.6.1.2.1.92 .1.3.6.1.2.1.1.9.1.3.1 = STRING: The MIB for Message Processing and Dispatching. .1.3.6.1.2.1.1.9.1.3.2 = STRING: The management information definitions for the SNMP User-based Security Model. .1.3.6.1.2.1.1.9.1.3.3 = STRING: The SNMP Management Architecture MIB. .1.3.6.1.2.1.1.9.1.3.4 = STRING: The MIB module for SNMPv2 entities .1.3.6.1.2.1.1.9.1.3.5 = STRING: The MIB module for managing TCP implementations .1.3.6.1.2.1.1.9.1.3.6 = STRING: The MIB module for managing IP and ICMP implementations .1.3.6.1.2.1.1.9.1.3.7 = STRING: The MIB module for managing UDP implementations .1.3.6.1.2.1.1.9.1.3.8 = STRING: View-based Access Control Model for SNMP. .1.3.6.1.2.1.1.9.1.3.9 = STRING: The MIB modules for managing SNMP Notification, plus filtering. .1.3.6.1.2.1.1.9.1.3.10 = STRING: The MIB module for logging SNMP Notifications. .1.3.6.1.2.1.1.9.1.4.1 = 9 .1.3.6.1.2.1.1.9.1.4.2 = 9 .1.3.6.1.2.1.1.9.1.4.3 = 9 .1.3.6.1.2.1.1.9.1.4.4 = 9 .1.3.6.1.2.1.1.9.1.4.5 = 9 .1.3.6.1.2.1.1.9.1.4.6 = 9 .1.3.6.1.2.1.1.9.1.4.7 = 9 .1.3.6.1.2.1.1.9.1.4.8 = 9 .1.3.6.1.2.1.1.9.1.4.9 = 9 .1.3.6.1.2.1.1.9.1.4.10 = 9 As you can see, all OIDs that the device has available in the .1.3.6.1.2.1.1.* sub-tree are returned. In particular, one can recognize: sysObjectID ( .1.3.6.1.2.1.1.2.0 = OID: .1.3.6.1.4.1.8072.3.2.10 ) sysUpTime ( .1.3.6.1.2.1.1.3.0 = 4226041 ) sysName ( .1.3.6.1.2.1.1.5.0 = STRING: 41ba948911b9 ). Here is another example that queries the entire contents of ifTable (the table in IF-MIB that contains information about network interfaces): snmpwalk -v 2c -c public -OentU 127.0.0.1:1161 1.3.6.1.2.1.2.2 .1.3.6.1.2.1.2.2.1.1.1 = INTEGER: 1 .1.3.6.1.2.1.2.2.1.1.90 = INTEGER: 90 .1.3.6.1.2.1.2.2.1.2.1 = STRING: lo .1.3.6.1.2.1.2.2.1.2.90 = STRING: eth0 .1.3.6.1.2.1.2.2.1.3.1 = INTEGER: 24 .1.3.6.1.2.1.2.2.1.3.90 = INTEGER: 6 .1.3.6.1.2.1.2.2.1.4.1 = INTEGER: 65536 .1.3.6.1.2.1.2.2.1.4.90 = INTEGER: 1500 .1.3.6.1.2.1.2.2.1.5.1 = Gauge32: 10000000 .1.3.6.1.2.1.2.2.1.5.90 = Gauge32: 4294967295 .1.3.6.1.2.1.2.2.1.6.1 = STRING: .1.3.6.1.2.1.2.2.1.6.90 = STRING: 2:42:ac:11:0:2 .1.3.6.1.2.1.2.2.1.7.1 = INTEGER: 1 .1.3.6.1.2.1.2.2.1.7.90 = INTEGER: 1 .1.3.6.1.2.1.2.2.1.8.1 = INTEGER: 1 .1.3.6.1.2.1.2.2.1.8.90 = INTEGER: 1 .1.3.6.1.2.1.2.2.1.9.1 = 0 .1.3.6.1.2.1.2.2.1.9.90 = 0 .1.3.6.1.2.1.2.2.1.10.1 = Counter32: 5300203 .1.3.6.1.2.1.2.2.1.10.90 = Counter32: 2928 .1.3.6.1.2.1.2.2.1.11.1 = Counter32: 63808 .1.3.6.1.2.1.2.2.1.11.90 = Counter32: 40 .1.3.6.1.2.1.2.2.1.12.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.12.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.13.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.13.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.14.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.14.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.15.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.15.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.16.1 = Counter32: 5300203 .1.3.6.1.2.1.2.2.1.16.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.17.1 = Counter32: 63808 .1.3.6.1.2.1.2.2.1.17.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.18.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.18.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.19.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.19.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.20.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.20.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.21.1 = Gauge32: 0 .1.3.6.1.2.1.2.2.1.21.90 = Gauge32: 0 .1.3.6.1.2.1.2.2.1.22.1 = OID: .0.0 .1.3.6.1.2.1.2.2.1.22.90 = OID: .0.0 Generate table simulation data \u00b6 To generate simulation data for tables automatically, use the mib2dev.py tool shipped with snmpsim . This tool will be renamed as snmpsim-record-mibs in the upcoming 1.0 release of the library. First, install snmpsim: pip install snmpsim Then run the tool, specifying the MIB with the start and stop OIDs (which can correspond to .e.g the first and last columns in the table respectively). For example: mib2dev.py --mib-module = --start-oid = 1 .3.6.1.4.1.674.10892.1.400.20 --stop-oid = 1 .3.6.1.4.1.674.10892.1.600.12 > /path/to/mytable.snmprec The following command generates 4 rows for the IF-MIB:ifTable (1.3.6.1.2.1.2.2) : mib2dev.py --mib-module = IF-MIB --start-oid = 1 .3.6.1.2.1.2.2 --stop-oid = 1 .3.6.1.2.1.2.3 --table-size = 4 > /path/to/mytable.snmprec Known issues \u00b6 mib2dev has a known issue with IF-MIB::ifPhysAddress , that is expected to contain an hexadecimal string, but mib2dev fills it with a string. To fix this, provide a valid hextring when prompted on the command line: # Synthesizing row #1 of table 1.3.6.1.2.1.2.2.1 *** Inconsistent value: Display format eval failure: b 'driving kept zombies quaintly forward zombies' : invalid literal for int () with base 16 : 'driving kept zombies quaintly forward zombies' caused by : invalid literal for int () with base 16 : 'driving kept zombies quaintly forward zombies' *** See constraints and suggest a better one for : # Table IF-MIB::ifTable # Row IF-MIB::ifEntry # Index IF-MIB::ifIndex (type InterfaceIndex) # Column IF-MIB::ifPhysAddress (type PhysAddress) # Value ['driving kept zombies quaintly forward zombies'] ? 001122334455 Generate simulation data from a walk \u00b6 As an alternative to .snmprec files , it is possible to use a walk as simulation data . This is especially useful when debugging live devices, since you can export the device walk and use this real data locally. To do so, paste the output of a walk query into a .snmpwalk file, and add this file to the test data directory. Then, pass the name of the walk file as the community_string . For more information, see Test SNMP profiles locally . Find where MIBs are installed on your machine \u00b6 See the Using and loading MIBs Net-SNMP tutorial. Browse locally installed MIBs \u00b6 Since community resources that list MIBs and OIDs are best effort, the MIB you are investigating may not be present or may not be available in its the latest version. In that case, you can use the snmptranslate CLI tool to output similar information for MIBs installed on your system. This tool is part of Net-SNMP - see SNMP queries prerequisites . Steps Run $ snmptranslate -m -Tz -On to get a complete list of OIDs in the MIB along with their labels. Redirect to a file for nicer formatting as needed. Example: $ snmptranslate -m IF-MIB -Tz -On > out.log $ cat out.log \"org\" \"1.3\" \"dod\" \"1.3.6\" \"internet\" \"1.3.6.1\" \"directory\" \"1.3.6.1.1\" \"mgmt\" \"1.3.6.1.2\" \"mib-2\" \"1.3.6.1.2.1\" \"system\" \"1.3.6.1.2.1.1\" \"sysDescr\" \"1.3.6.1.2.1.1.1\" \"sysObjectID\" \"1.3.6.1.2.1.1.2\" \"sysUpTime\" \"1.3.6.1.2.1.1.3\" \"sysContact\" \"1.3.6.1.2.1.1.4\" \"sysName\" \"1.3.6.1.2.1.1.5\" \"sysLocation\" \"1.3.6.1.2.1.1.6\" [...] Tip Use the -M option to specify the directory where snmptranslate should look for MIBs. Useful if you want to inspect a MIB you've just downloaded but not moved to the default MIB directory. Tip Use -Tp for an alternative tree-like formatting.","title":"SNMP How-To"},{"location":"tutorials/snmp/how-to/#snmp-how-to","text":"","title":"SNMP How-To"},{"location":"tutorials/snmp/how-to/#simulate-snmp-devices","text":"SNMP is a protocol for gathering metrics from network devices, but automated testing of the integration would not be practical nor reliable if we used actual devices. Our approach is to use a simulated SNMP device that responds to SNMP queries using simulation data . This simulated device is brought up as a Docker container when starting the SNMP test environment using: ddev env start snmp [ ... ]","title":"Simulate SNMP devices"},{"location":"tutorials/snmp/how-to/#test-snmp-profiles-locally","text":"Once the environment is up and running, you can modify the instance configuration to test profiles that support simulated metrics. The following is an example of an instance configured to use the Cisco Nexus profile. init_config : profiles : cisco_nexus : definition_file : cisco-nexus.yaml instances : - community_string : cisco_nexus # (1.) ip_address : # (2.) profile : cisco_nexus name : localhost port : 1161 The community_string must match the corresponding device .snmprec file name. For example, myprofile.snmprec gives community_string: myprofile . This also applies to walk files : myprofile.snmpwalk gives community_string: myprofile . To find the IP address of the SNMP container, run: docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dd-snmp","title":"Test SNMP profiles locally"},{"location":"tutorials/snmp/how-to/#run-snmp-queries","text":"With the test environment is up and running, we can issue SNMP queries to the simulated device using a command line SNMP client.","title":"Run SNMP queries"},{"location":"tutorials/snmp/how-to/#prerequisites","text":"Make sure you have the Net-SNMP tools installed on your machine. These should come pre-installed by default on Linux and macOS. If necessary, you can download them on the Net-SNMP website .","title":"Prerequisites"},{"location":"tutorials/snmp/how-to/#available-commands","text":"The Net-SNMP tools provide a number of commands to interact with SNMP devices. The most commonly used commands are: snmpget : to issue an SNMP GET query. snmpgetnext : to issue an SNMP GETNEXT query. snmpwalk : to query an entire OID sub-tree at once. snmptable : to query rows in an SNMP table.","title":"Available commands"},{"location":"tutorials/snmp/how-to/#examples","text":"","title":"Examples"},{"location":"tutorials/snmp/how-to/#get-query","text":"To query a specific OID from a device, we can use the snmpget command. For example, the following command will query sysDescr OID of an SNMP device, which returns its human-readable description: $ snmpget -v 2c -c public -IR 127 .0.0.1:1161 system.sysDescr.0 SNMPv2-MIB::sysDescr.0 = STRING: Linux 41ba948911b9 4.9.87-linuxkit-aufs #1 SMP Wed Mar 14 15:12:16 UTC 2018 x86_64 SNMPv2-MIB::sysORUpTime.1 = Timeticks: (9) 0:00:00.09 Let's break this command down: snmpget : this command sends an SNMP GET request, and can be used to query the value of an OID. Here, we are requesting the system.sysDescr.0 OID. -v 2c : instructs your SNMP client to send the request using SNMP version 2c. See SNMP Versions . -c public : instructs the SNMP client to send the community string public along with our request. (This is a form of authentication provided by SNMP v2. See SNMP Versions .) 127.0.0.1:1161 : this is the host and port where the simulated SNMP agent is available at. (Confirm the port used by the ddev environment by inspecting the Docker port mapping via $ docker ps .) system.sysDescr.0 : this is the OID that the client should request. In practice this can refer to either a fully-resolved OID (e.g. 1.3.6.1.4.1[...] ), or a label (e.g. sysDescr.0 ). -IR : this option allows us to use labels for OIDs that aren't in the generic 1.3.6.1.2.1.* sub-tree (see: The OID tree ). TL;DR: always use this option when working with OIDs coming from vendor-specific MIBs. Tip If the above command fails, try using the explicit OID like so: $ snmpget -v 2c -c public -IR 127 .0.0.1:1161 iso.3.6.1.2.1.1.1.0","title":"GET query"},{"location":"tutorials/snmp/how-to/#table-query","text":"For tables, use the snmptable command, which will output the rows in the table in a tabular format. Its arguments and options are similar to snmpget . $ snmptable -v 2c -c public -IR -Os 127 .0.0.1:1161 hrStorageTable SNMP table: hrStorageTable hrStorageIndex hrStorageType hrStorageDescr hrStorageAllocationUnits hrStorageSize hrStorageUsed hrStorageAllocationFailures 1 hrStorageRam Physical memory 1024 Bytes 2046940 1969964 ? 3 hrStorageVirtualMemory Virtual memory 1024 Bytes 3095512 1969964 ? 6 hrStorageOther Memory buffers 1024 Bytes 2046940 73580 ? 7 hrStorageOther Cached memory 1024 Bytes 1577648 1577648 ? 8 hrStorageOther Shared memory 1024 Bytes 2940 2940 ? 10 hrStorageVirtualMemory Swap space 1024 Bytes 1048572 0 ? 33 hrStorageFixedDisk /dev 4096 Bytes 16384 0 ? 36 hrStorageFixedDisk /sys/fs/cgroup 4096 Bytes 255867 0 ? 52 hrStorageFixedDisk /etc/resolv.conf 4096 Bytes 16448139 6493059 ? 53 hrStorageFixedDisk /etc/hostname 4096 Bytes 16448139 6493059 ? 54 hrStorageFixedDisk /etc/hosts 4096 Bytes 16448139 6493059 ? 55 hrStorageFixedDisk /dev/shm 4096 Bytes 16384 0 ? 61 hrStorageFixedDisk /proc/kcore 4096 Bytes 16384 0 ? 62 hrStorageFixedDisk /proc/keys 4096 Bytes 16384 0 ? 63 hrStorageFixedDisk /proc/timer_list 4096 Bytes 16384 0 ? 64 hrStorageFixedDisk /proc/sched_debug 4096 Bytes 16384 0 ? 65 hrStorageFixedDisk /sys/firmware 4096 Bytes 255867 0 ? (In this case, we added the -Os option which prints only the last symbolic element and reduces the output of hrStorageTypes .)","title":"Table query"},{"location":"tutorials/snmp/how-to/#walk-query","text":"A walk query can be used to query all OIDs in a given sub-tree . The snmpwalk command can be used to perform a walk query. To facilitate usage of walk files for debugging, the following options are recommended: -ObentU . Here's what each option does: b : do not break OID indexes down. e : print enums numerically (for example, 24 instead of softwareLoopback(24) ). n : print OIDs numerically (for example, .1.3.6.1.2.1.2.2.1.1.1 instead of IF-MIB::ifIndex.1 ). t : print timeticks numerically (for example, 4226041 instead of Timeticks: (4226041) 11:44:20.41 ). U : don't print units. For example, the following command gets a walk of the 1.3.6.1.2.1.1 ( system ) sub-tree: $ snmpwalk -v 2c -c public -ObentU 127 .0.0.1:1161 1 .3.6.1.2.1.1 .1.3.6.1.2.1.1.1.0 = STRING: Linux 41ba948911b9 4.9.87-linuxkit-aufs #1 SMP Wed Mar 14 15:12:16 UTC 2018 x86_64 .1.3.6.1.2.1.1.2.0 = OID: .1.3.6.1.4.1.8072.3.2.10 .1.3.6.1.2.1.1.3.0 = 4226041 .1.3.6.1.2.1.1.4.0 = STRING: root@localhost .1.3.6.1.2.1.1.5.0 = STRING: 41ba948911b9 .1.3.6.1.2.1.1.6.0 = STRING: Unknown .1.3.6.1.2.1.1.8.0 = 9 .1.3.6.1.2.1.1.9.1.2.1 = OID: .1.3.6.1.6.3.11.3.1.1 .1.3.6.1.2.1.1.9.1.2.2 = OID: .1.3.6.1.6.3.15.2.1.1 .1.3.6.1.2.1.1.9.1.2.3 = OID: .1.3.6.1.6.3.10.3.1.1 .1.3.6.1.2.1.1.9.1.2.4 = OID: .1.3.6.1.6.3.1 .1.3.6.1.2.1.1.9.1.2.5 = OID: .1.3.6.1.2.1.49 .1.3.6.1.2.1.1.9.1.2.6 = OID: .1.3.6.1.2.1.4 .1.3.6.1.2.1.1.9.1.2.7 = OID: .1.3.6.1.2.1.50 .1.3.6.1.2.1.1.9.1.2.8 = OID: .1.3.6.1.6.3.16.2.2.1 .1.3.6.1.2.1.1.9.1.2.9 = OID: .1.3.6.1.6.3.13.3.1.3 .1.3.6.1.2.1.1.9.1.2.10 = OID: .1.3.6.1.2.1.92 .1.3.6.1.2.1.1.9.1.3.1 = STRING: The MIB for Message Processing and Dispatching. .1.3.6.1.2.1.1.9.1.3.2 = STRING: The management information definitions for the SNMP User-based Security Model. .1.3.6.1.2.1.1.9.1.3.3 = STRING: The SNMP Management Architecture MIB. .1.3.6.1.2.1.1.9.1.3.4 = STRING: The MIB module for SNMPv2 entities .1.3.6.1.2.1.1.9.1.3.5 = STRING: The MIB module for managing TCP implementations .1.3.6.1.2.1.1.9.1.3.6 = STRING: The MIB module for managing IP and ICMP implementations .1.3.6.1.2.1.1.9.1.3.7 = STRING: The MIB module for managing UDP implementations .1.3.6.1.2.1.1.9.1.3.8 = STRING: View-based Access Control Model for SNMP. .1.3.6.1.2.1.1.9.1.3.9 = STRING: The MIB modules for managing SNMP Notification, plus filtering. .1.3.6.1.2.1.1.9.1.3.10 = STRING: The MIB module for logging SNMP Notifications. .1.3.6.1.2.1.1.9.1.4.1 = 9 .1.3.6.1.2.1.1.9.1.4.2 = 9 .1.3.6.1.2.1.1.9.1.4.3 = 9 .1.3.6.1.2.1.1.9.1.4.4 = 9 .1.3.6.1.2.1.1.9.1.4.5 = 9 .1.3.6.1.2.1.1.9.1.4.6 = 9 .1.3.6.1.2.1.1.9.1.4.7 = 9 .1.3.6.1.2.1.1.9.1.4.8 = 9 .1.3.6.1.2.1.1.9.1.4.9 = 9 .1.3.6.1.2.1.1.9.1.4.10 = 9 As you can see, all OIDs that the device has available in the .1.3.6.1.2.1.1.* sub-tree are returned. In particular, one can recognize: sysObjectID ( .1.3.6.1.2.1.1.2.0 = OID: .1.3.6.1.4.1.8072.3.2.10 ) sysUpTime ( .1.3.6.1.2.1.1.3.0 = 4226041 ) sysName ( .1.3.6.1.2.1.1.5.0 = STRING: 41ba948911b9 ). Here is another example that queries the entire contents of ifTable (the table in IF-MIB that contains information about network interfaces): snmpwalk -v 2c -c public -OentU 127.0.0.1:1161 1.3.6.1.2.1.2.2 .1.3.6.1.2.1.2.2.1.1.1 = INTEGER: 1 .1.3.6.1.2.1.2.2.1.1.90 = INTEGER: 90 .1.3.6.1.2.1.2.2.1.2.1 = STRING: lo .1.3.6.1.2.1.2.2.1.2.90 = STRING: eth0 .1.3.6.1.2.1.2.2.1.3.1 = INTEGER: 24 .1.3.6.1.2.1.2.2.1.3.90 = INTEGER: 6 .1.3.6.1.2.1.2.2.1.4.1 = INTEGER: 65536 .1.3.6.1.2.1.2.2.1.4.90 = INTEGER: 1500 .1.3.6.1.2.1.2.2.1.5.1 = Gauge32: 10000000 .1.3.6.1.2.1.2.2.1.5.90 = Gauge32: 4294967295 .1.3.6.1.2.1.2.2.1.6.1 = STRING: .1.3.6.1.2.1.2.2.1.6.90 = STRING: 2:42:ac:11:0:2 .1.3.6.1.2.1.2.2.1.7.1 = INTEGER: 1 .1.3.6.1.2.1.2.2.1.7.90 = INTEGER: 1 .1.3.6.1.2.1.2.2.1.8.1 = INTEGER: 1 .1.3.6.1.2.1.2.2.1.8.90 = INTEGER: 1 .1.3.6.1.2.1.2.2.1.9.1 = 0 .1.3.6.1.2.1.2.2.1.9.90 = 0 .1.3.6.1.2.1.2.2.1.10.1 = Counter32: 5300203 .1.3.6.1.2.1.2.2.1.10.90 = Counter32: 2928 .1.3.6.1.2.1.2.2.1.11.1 = Counter32: 63808 .1.3.6.1.2.1.2.2.1.11.90 = Counter32: 40 .1.3.6.1.2.1.2.2.1.12.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.12.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.13.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.13.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.14.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.14.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.15.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.15.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.16.1 = Counter32: 5300203 .1.3.6.1.2.1.2.2.1.16.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.17.1 = Counter32: 63808 .1.3.6.1.2.1.2.2.1.17.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.18.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.18.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.19.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.19.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.20.1 = Counter32: 0 .1.3.6.1.2.1.2.2.1.20.90 = Counter32: 0 .1.3.6.1.2.1.2.2.1.21.1 = Gauge32: 0 .1.3.6.1.2.1.2.2.1.21.90 = Gauge32: 0 .1.3.6.1.2.1.2.2.1.22.1 = OID: .0.0 .1.3.6.1.2.1.2.2.1.22.90 = OID: .0.0","title":"Walk query"},{"location":"tutorials/snmp/how-to/#generate-table-simulation-data","text":"To generate simulation data for tables automatically, use the mib2dev.py tool shipped with snmpsim . This tool will be renamed as snmpsim-record-mibs in the upcoming 1.0 release of the library. First, install snmpsim: pip install snmpsim Then run the tool, specifying the MIB with the start and stop OIDs (which can correspond to .e.g the first and last columns in the table respectively). For example: mib2dev.py --mib-module = --start-oid = 1 .3.6.1.4.1.674.10892.1.400.20 --stop-oid = 1 .3.6.1.4.1.674.10892.1.600.12 > /path/to/mytable.snmprec The following command generates 4 rows for the IF-MIB:ifTable (1.3.6.1.2.1.2.2) : mib2dev.py --mib-module = IF-MIB --start-oid = 1 .3.6.1.2.1.2.2 --stop-oid = 1 .3.6.1.2.1.2.3 --table-size = 4 > /path/to/mytable.snmprec","title":"Generate table simulation data"},{"location":"tutorials/snmp/how-to/#known-issues","text":"mib2dev has a known issue with IF-MIB::ifPhysAddress , that is expected to contain an hexadecimal string, but mib2dev fills it with a string. To fix this, provide a valid hextring when prompted on the command line: # Synthesizing row #1 of table 1.3.6.1.2.1.2.2.1 *** Inconsistent value: Display format eval failure: b 'driving kept zombies quaintly forward zombies' : invalid literal for int () with base 16 : 'driving kept zombies quaintly forward zombies' caused by : invalid literal for int () with base 16 : 'driving kept zombies quaintly forward zombies' *** See constraints and suggest a better one for : # Table IF-MIB::ifTable # Row IF-MIB::ifEntry # Index IF-MIB::ifIndex (type InterfaceIndex) # Column IF-MIB::ifPhysAddress (type PhysAddress) # Value ['driving kept zombies quaintly forward zombies'] ? 001122334455","title":"Known issues"},{"location":"tutorials/snmp/how-to/#generate-simulation-data-from-a-walk","text":"As an alternative to .snmprec files , it is possible to use a walk as simulation data . This is especially useful when debugging live devices, since you can export the device walk and use this real data locally. To do so, paste the output of a walk query into a .snmpwalk file, and add this file to the test data directory. Then, pass the name of the walk file as the community_string . For more information, see Test SNMP profiles locally .","title":"Generate simulation data from a walk"},{"location":"tutorials/snmp/how-to/#find-where-mibs-are-installed-on-your-machine","text":"See the Using and loading MIBs Net-SNMP tutorial.","title":"Find where MIBs are installed on your machine"},{"location":"tutorials/snmp/how-to/#browse-locally-installed-mibs","text":"Since community resources that list MIBs and OIDs are best effort, the MIB you are investigating may not be present or may not be available in its the latest version. In that case, you can use the snmptranslate CLI tool to output similar information for MIBs installed on your system. This tool is part of Net-SNMP - see SNMP queries prerequisites . Steps Run $ snmptranslate -m -Tz -On to get a complete list of OIDs in the MIB along with their labels. Redirect to a file for nicer formatting as needed. Example: $ snmptranslate -m IF-MIB -Tz -On > out.log $ cat out.log \"org\" \"1.3\" \"dod\" \"1.3.6\" \"internet\" \"1.3.6.1\" \"directory\" \"1.3.6.1.1\" \"mgmt\" \"1.3.6.1.2\" \"mib-2\" \"1.3.6.1.2.1\" \"system\" \"1.3.6.1.2.1.1\" \"sysDescr\" \"1.3.6.1.2.1.1.1\" \"sysObjectID\" \"1.3.6.1.2.1.1.2\" \"sysUpTime\" \"1.3.6.1.2.1.1.3\" \"sysContact\" \"1.3.6.1.2.1.1.4\" \"sysName\" \"1.3.6.1.2.1.1.5\" \"sysLocation\" \"1.3.6.1.2.1.1.6\" [...] Tip Use the -M option to specify the directory where snmptranslate should look for MIBs. Useful if you want to inspect a MIB you've just downloaded but not moved to the default MIB directory. Tip Use -Tp for an alternative tree-like formatting.","title":"Browse locally installed MIBs"},{"location":"tutorials/snmp/introduction/","text":"Introduction to SNMP \u00b6 In this introduction, we'll cover general information about the SNMP protocol, including key concepts such as OIDs and MIBs. If you're already familiar with the SNMP protocol, feel free to skip to the next page. What is SNMP? \u00b6 Overview \u00b6 SNMP (Simple Network Management Protocol) is a protocol for monitoring network devices . It uses UDP and supports both a request/response model (commands and queries) and a notification model (traps, informs). In the request/response model, the SNMP manager (eg. the Datadog Agent) issues an SNMP command ( GET , GETNEXT , BULK ) to an SNMP agent (eg. a network device). SNMP was born in the 1980s, so it has been around for a long time. While more modern alternatives like NETCONF and OpenConfig have been gaining attention, a large amount of network devices still use SNMP as their primary monitoring interface. SNMP versions \u00b6 The SNMP protocol exists in 3 versions: v1 (legacy), v2c , and v3 . The main differences between v1/v2c and v3 are the authentication mechanism and transport layer, as summarized below. Version Authentication Transport layer v1/v2c Password (the community string ) Plain text only v3 Username/password Support for packet signing and encryption OIDs \u00b6 What is an OID? \u00b6 Identifiers for queryable quantities An OID , also known as an Object Identifier , is an identifier for a quantity (\"object\") that can be retrieved from an SNMP device. Such quantities may include uptime, temperature, network traffic, etc (quantities available will vary across devices). To make them processable by machines, OIDs are represented as dot-separated sequences of numbers, e.g. 1.3.6.1.2.1.1.1 . Global definition OIDs are globally defined , which means they have the same meaning regardless of the device that processes the SNMP query. For example, querying the 1.3.6.1.2.1.1.1 OID (also known as sysDescr ) on any SNMP agent will make it return the system description. (More on the OID/label mapping can be found in the MIBs section below.) Not all OIDs contain metrics data OIDs can refer to various types of objects, such as strings, numbers, tables, etc. In particular, this means that only a fraction of OIDs refer to numerical quantities that can actually be sent as metrics to Datadog. However, non-numerical OIDs can also be useful, especially for tagging. The OID tree \u00b6 OIDs are structured in a tree-like fashion. Each number in the OID represents a node in the tree. The wildcard notation is often used to refer to a sub-tree of OIDs, e.g. 1.3.6.1.2.* . It so happens that there are two main OID sub-trees: a sub-tree for general-purpose OIDs, and a sub-tree for vendor-specific OIDs. Generic OIDs \u00b6 Located under the sub-tree: 1.3.6.1.2.1.* (a.k.a. SNMPv2-MIB or mib-2 ). These OIDs are applicable to all kinds of network devices (although all devices may not expose all OIDs in this sub-tree). For example, 1.3.6.1.2.1.1.1 corresponds to sysDescr , which contains a free-form, human-readable description of the device. Vendor-specific OIDs \u00b6 Located under the sub-tree: 1.3.6.1.4.1.* (a.k.a. enterprises ). These OIDs are defined and managed by network device vendors themselves. Each vendor is assigned its own enterprise sub-tree in the form of 1.3.6.1.4.1..* . For example: 1.3.6.1.4.1.2.* is the sub-tree for IBM-specific OIDs. 1.3.6.1.4.1.9.* is the sub-tree for Cisco-specific OIDs. The full list of vendor sub-trees can be found here: SNMP OID 1.3.6.1.4.1 . Notable OIDs \u00b6 OID Label Description 1.3.6.1.2.1.2 sysObjectId An OID whose value is an OID that represents the device make and model (yes, it's a bit meta). 1.3.6.1.2.1.1.1 sysDescr A human-readable, free-form description of the device. 1.3.6.1.2.1.1.3 sysUpTimeInstance The device uptime. MIBs \u00b6 What is an MIB? \u00b6 OIDs are grouped in modules called MIBs (Management Information Base). An MIB describes the hierarchy of a given set of OIDs. (This is somewhat analogous to a dictionary that contains the definitions for each word in a spoken language.) For example, the IF-MIB describes the hierarchy of OIDs within the sub-tree 1.3.6.1.2.1.2.* . These OIDs contain metrics about the network interfaces available on the device. (Note how its location under the 1.3.6.1.2.* sub-tree indicates that it is a generic MIB, available on most network devices.) As part of the description of OIDs, an MIB defines a human-readable label for each OID. For example, IF-MIB describes the OID 1.3.6.1.2.1.1 and assigns it the label sysDescr . The operation that consists in finding the OID from a label is called OID resolution . Tools and resources \u00b6 The following resources can be useful when working with MIBs: MIB Discovery : a search engine for OIDs. Use it to find what an OID corresponds to, which MIB it comes from, what label it is known as, etc. Circitor MIB files repository : a repository and search engine where one can download actual .mib files. SNMP Labs MIB repository : alternate repo of many common MIBs. Note : this site hosts the underlying MIBs which the pysnmp-mibs library (used by the SNMP Python check) actually validates against. Double check any MIB you get from an alternate source with what is in this repo. Learn more \u00b6 For other high-level overviews of SNMP, see: How SNMP Works (Youtube) SNMP (Wikipedia) Tutorials: Internet Management and SNMP (YouTube) (In-depth videos about SNMP architecture, MIBs, protocol data structures, security models, monitoring code examples, etc.)","title":"Introduction to SNMP"},{"location":"tutorials/snmp/introduction/#introduction-to-snmp","text":"In this introduction, we'll cover general information about the SNMP protocol, including key concepts such as OIDs and MIBs. If you're already familiar with the SNMP protocol, feel free to skip to the next page.","title":"Introduction to SNMP"},{"location":"tutorials/snmp/introduction/#what-is-snmp","text":"","title":"What is SNMP?"},{"location":"tutorials/snmp/introduction/#overview","text":"SNMP (Simple Network Management Protocol) is a protocol for monitoring network devices . It uses UDP and supports both a request/response model (commands and queries) and a notification model (traps, informs). In the request/response model, the SNMP manager (eg. the Datadog Agent) issues an SNMP command ( GET , GETNEXT , BULK ) to an SNMP agent (eg. a network device). SNMP was born in the 1980s, so it has been around for a long time. While more modern alternatives like NETCONF and OpenConfig have been gaining attention, a large amount of network devices still use SNMP as their primary monitoring interface.","title":"Overview"},{"location":"tutorials/snmp/introduction/#snmp-versions","text":"The SNMP protocol exists in 3 versions: v1 (legacy), v2c , and v3 . The main differences between v1/v2c and v3 are the authentication mechanism and transport layer, as summarized below. Version Authentication Transport layer v1/v2c Password (the community string ) Plain text only v3 Username/password Support for packet signing and encryption","title":"SNMP versions"},{"location":"tutorials/snmp/introduction/#oids","text":"","title":"OIDs"},{"location":"tutorials/snmp/introduction/#what-is-an-oid","text":"Identifiers for queryable quantities An OID , also known as an Object Identifier , is an identifier for a quantity (\"object\") that can be retrieved from an SNMP device. Such quantities may include uptime, temperature, network traffic, etc (quantities available will vary across devices). To make them processable by machines, OIDs are represented as dot-separated sequences of numbers, e.g. 1.3.6.1.2.1.1.1 . Global definition OIDs are globally defined , which means they have the same meaning regardless of the device that processes the SNMP query. For example, querying the 1.3.6.1.2.1.1.1 OID (also known as sysDescr ) on any SNMP agent will make it return the system description. (More on the OID/label mapping can be found in the MIBs section below.) Not all OIDs contain metrics data OIDs can refer to various types of objects, such as strings, numbers, tables, etc. In particular, this means that only a fraction of OIDs refer to numerical quantities that can actually be sent as metrics to Datadog. However, non-numerical OIDs can also be useful, especially for tagging.","title":"What is an OID?"},{"location":"tutorials/snmp/introduction/#the-oid-tree","text":"OIDs are structured in a tree-like fashion. Each number in the OID represents a node in the tree. The wildcard notation is often used to refer to a sub-tree of OIDs, e.g. 1.3.6.1.2.* . It so happens that there are two main OID sub-trees: a sub-tree for general-purpose OIDs, and a sub-tree for vendor-specific OIDs.","title":"The OID tree"},{"location":"tutorials/snmp/introduction/#generic-oids","text":"Located under the sub-tree: 1.3.6.1.2.1.* (a.k.a. SNMPv2-MIB or mib-2 ). These OIDs are applicable to all kinds of network devices (although all devices may not expose all OIDs in this sub-tree). For example, 1.3.6.1.2.1.1.1 corresponds to sysDescr , which contains a free-form, human-readable description of the device.","title":"Generic OIDs"},{"location":"tutorials/snmp/introduction/#vendor-specific-oids","text":"Located under the sub-tree: 1.3.6.1.4.1.* (a.k.a. enterprises ). These OIDs are defined and managed by network device vendors themselves. Each vendor is assigned its own enterprise sub-tree in the form of 1.3.6.1.4.1..* . For example: 1.3.6.1.4.1.2.* is the sub-tree for IBM-specific OIDs. 1.3.6.1.4.1.9.* is the sub-tree for Cisco-specific OIDs. The full list of vendor sub-trees can be found here: SNMP OID 1.3.6.1.4.1 .","title":"Vendor-specific OIDs"},{"location":"tutorials/snmp/introduction/#notable-oids","text":"OID Label Description 1.3.6.1.2.1.2 sysObjectId An OID whose value is an OID that represents the device make and model (yes, it's a bit meta). 1.3.6.1.2.1.1.1 sysDescr A human-readable, free-form description of the device. 1.3.6.1.2.1.1.3 sysUpTimeInstance The device uptime.","title":"Notable OIDs"},{"location":"tutorials/snmp/introduction/#mibs","text":"","title":"MIBs"},{"location":"tutorials/snmp/introduction/#what-is-an-mib","text":"OIDs are grouped in modules called MIBs (Management Information Base). An MIB describes the hierarchy of a given set of OIDs. (This is somewhat analogous to a dictionary that contains the definitions for each word in a spoken language.) For example, the IF-MIB describes the hierarchy of OIDs within the sub-tree 1.3.6.1.2.1.2.* . These OIDs contain metrics about the network interfaces available on the device. (Note how its location under the 1.3.6.1.2.* sub-tree indicates that it is a generic MIB, available on most network devices.) As part of the description of OIDs, an MIB defines a human-readable label for each OID. For example, IF-MIB describes the OID 1.3.6.1.2.1.1 and assigns it the label sysDescr . The operation that consists in finding the OID from a label is called OID resolution .","title":"What is an MIB?"},{"location":"tutorials/snmp/introduction/#tools-and-resources","text":"The following resources can be useful when working with MIBs: MIB Discovery : a search engine for OIDs. Use it to find what an OID corresponds to, which MIB it comes from, what label it is known as, etc. Circitor MIB files repository : a repository and search engine where one can download actual .mib files. SNMP Labs MIB repository : alternate repo of many common MIBs. Note : this site hosts the underlying MIBs which the pysnmp-mibs library (used by the SNMP Python check) actually validates against. Double check any MIB you get from an alternate source with what is in this repo.","title":"Tools and resources"},{"location":"tutorials/snmp/introduction/#learn-more","text":"For other high-level overviews of SNMP, see: How SNMP Works (Youtube) SNMP (Wikipedia) Tutorials: Internet Management and SNMP (YouTube) (In-depth videos about SNMP architecture, MIBs, protocol data structures, security models, monitoring code examples, etc.)","title":"Learn more"},{"location":"tutorials/snmp/profile-format/","text":"Profile Format Reference \u00b6 Overview \u00b6 SNMP profiles are our way of providing out-of-the-box monitoring for certain makes and models of network devices. An SNMP profile is materialised as a YAML file with the following structure: sysobjectid : # extends: # metrics : # # metric_tags: # Fields \u00b6 sysobjectid \u00b6 (Required) The sysobjectid field is used to match profiles against devices during device autodiscovery. It can refer to a fully-defined OID for a specific device make and model: sysobjectid : 1.3.6.1.4.1.232.9.4.10 or a wildcard pattern to address multiple device models: sysobjectid : 1.3.6.1.131.12.4.* or a list of fully-defined OID / wildcard patterns: sysobjectid : - 1.3.6.1.131.12.4.* - 1.3.6.1.4.1.232.9.4.10 extends \u00b6 (Optional) This field can be used to include metrics and metric tags from other so-called base profiles . Base profiles can derive from other base profiles to build a hierarchy of reusable profile mixins. Important All device profiles should extend from the _base.yaml profile, which defines items that should be collected for all devices. Example: extends : - _base.yaml - _generic-if.yaml # Include basic metrics from IF-MIB. metrics \u00b6 (Required) Entries in the metrics field define which metrics will be collected by the profile. They can reference either a single OID (a.k.a symbol ), or an SNMP table. Symbol metrics \u00b6 An SNMP symbol is an object with a scalar type (i.e. Counter32 , Integer32 , OctetString , etc). In a MIB file, a symbol can be recognized as an OBJECT-TYPE node with a scalar SYNTAX , placed under an OBJECT IDENTIFIER node (which is often the root OID of the MIB): EXAMPLE-MIB DEFINITIONS ::= BEGIN -- ... example OBJECT IDENTIFIER ::= { mib-2 7 } exampleSymbol OBJECT-TYPE SYNTAX Counter32 -- ... ::= { example 1 } In profiles, symbol metrics can be specified as entries that specify the MIB and symbol fields: metrics : # Example for the above dummy MIB and symbol: - MIB : EXAMPLE-MIB symbol : OID : 1.3.5.1.2.1.7.1 name : exampleSymbol # More realistic examples: - MIB : ISILON-MIB symbol : OID : 1.3.6.1.4.1.12124.1.1.2 name : clusterHealth - MIB : ISILON-MIB symbol : OID : 1.3.6.1.4.1.12124.1.2.1.1 name : clusterIfsInBytes - MIB : ISILON-MIB symbol : OID : 1.3.6.1.4.1.12124.1.2.1.3 name : clusterIfsOutBytes Warning Symbol metrics from the same MIB must still be listed as separate metrics entries, as shown above. For example, this is not valid syntax: metrics : - MIB : ISILON-MIB symbol : - OID : 1.3.6.1.4.1.12124.1.2.1.1 name : clusterIfsInBytes - OID : 1.3.6.1.4.1.12124.1.2.1.3 name : clusterIfsOutBytes Table metrics \u00b6 An SNMP table is an object that is composed of multiple entries (\"rows\"), where each entry contains values a set of symbols (\"columns\"). In a MIB file, tables be recognized by the presence of SEQUENCE OF : exampleTable OBJECT-TYPE SYNTAX SEQUENCE OF exampleEntry -- ... ::= { example 10 } exampleEntry OBJECT-TYPE -- ... ::= { exampleTable 1 } exampleColumn1 OBJECT-TYPE -- ... ::= { exampleEntry 1 } exampleColumn2 OBJECT-TYPE -- ... ::= { exampleEntry 2 } -- ... In profiles, tables can be specified as entries containing the MIB , table and symbols fields: metrics : # Example for the dummy table above: - MIB : EXAMPLE-MIB table : # Identification of the table which metrics come from. OID : 1.3.6.1.4.1.10 name : exampleTable symbols : # List of symbols ('columns') to retrieve. # Same format as for a single OID. # Each row in the table will emit these metrics. - OID : 1.3.6.1.4.1.10.1.1 name : exampleColumn1 - OID : 1.3.6.1.4.1.10.1.2 name : exampleColumn2 # ... # More realistic example: - MIB : CISCO-PROCESS-MIB table : # Each row in this table contains information about a CPU unit of the device. OID : 1.3.6.1.4.1.9.9.109.1.1.1 name : cpmCPUTotalTable symbols : - OID : 1.3.6.1.4.1.9.9.109.1.1.1.1.12 name : cpmCPUMemoryUsed # ... Table metrics tagging \u00b6 It is possible to add tags to metrics retrieved from a table in three ways: Using a column within the same table \u00b6 metrics : - MIB : IF-MIB table : OID : 1.3.6.1.2.1.2.2 name : ifTable symbols : - OID : 1.3.6.1.2.1.2.2.1.14 name : ifInErrors # ... metric_tags : # Add an 'interface' tag to each metric of each row, # whose value is obtained from the 'ifDescr' column of the row. # This allows querying metrics by interface, e.g. 'interface:eth0'. - tag : interface column : OID : 1.3.6.1.2.1.2.2.1.2 name : ifDescr Using a column from a different table with identical indexes \u00b6 metrics : - MIB : CISCO-IF-EXTENSION-MIB forced_type : monotonic_count table : OID : 1.3.6.1.4.1.9.9.276.1.1.2 name : cieIfInterfaceTable symbols : - OID : 1.3.6.1.4.1.9.9.276.1.1.2.1.1 name : cieIfResetCount metric_tags : - MIB : IF-MIB column : OID : 1.3.6.1.2.1.31.1.1.1.1 name : ifName table : ifXTable tag : interface Using a column from a different table with different indexes \u00b6 metrics : - MIB : CPI-UNITY-MIB table : OID : 1.3.6.1.4.1.30932.1.10.1.3.110 name : cpiPduBranchTable symbols : - OID : 1.3.6.1.4.1.30932.1.10.1.3.110.1.3 name : cpiPduBranchCurrent metric_tags : - column : OID : 1.3.6.1.4.1.30932.1.10.1.2.10.1.3 name : cpiPduName table : cpiPduTable index_transform : - start : 1 end : 7 tag : pdu_name If the external table has different indexes, use index_transform to select a subset of the full index. index_transform is a list of start / end ranges to extract from the current table index to match the external table index. start and end are inclusive. External table indexes must be a subset of the indexes of the current table, or same indexes in a different order. Example In the example above, the index of cpiPduBranchTable looks like 1.6.0.36.155.53.3.246 , the first digit is the cpiPduBranchId index and the rest is the cpiPduBranchMac index. The index of cpiPduTable looks like 6.0.36.155.53.3.246 and represents cpiPduMac (equivalent to cpiPduBranchMac ). By using the index_transform with start 1 and end 7, we extract 6.0.36.155.53.3.246 from 1.6.0.36.155.53.3.246 ( cpiPduBranchTable full index), and then use it to match 6.0.36.155.53.3.246 ( cpiPduTable full index). index_transform can be more complex, the following definition will extract 2.3.5.6.7 from 1.2.3.4.5.6.7 . index_transform : - start : 1 end : 2 - start : 4 end : 6 Using an index \u00b6 Important: \" index \" refers to one digit of the index part of the row OID. Example, if the column OID is 1.2.3.1.2 and the row OID is 1.2.3.1.2.7.8.9 , the full index is 7.8.9 . In this example, when using index: 1 , we will refer to 7 , index: 2 will refer to 8 , and so on. metrics : - MIB : CISCO-PROCESS-MIB table : OID : 1.3.6.1.4.1.9.9.109.1.1.1 name : cpmCPUTotalTable symbols : - OID : 1.3.6.1.4.1.9.9.109.1.1.1.1.12 name : cpmCPUMemoryUsed metric_tags : # This tagging method is more complex, so let's walk through an example... # # In CISCO-PROCESS-MIB, we can see that entries in the `cpmCPUTotalTable` are indexed by `cpmCPUTotalIndex`, # which corresponds to some sort of CPU position for each row in the table: # # cpmCPUTotalEntry OBJECT-TYPE # -- ... # INDEX { cpmCPUTotalIndex } # <-- See? # # We want to tag metrics in this table by this CPU position. # # To do this, we look up the position of this OID in `INDEX`. Here we see it's in 1st position. # So we can reference it here using `index: 1`. # (If there were two OIDs in `INDEX`, and we wanted to use the one in 2nd position, then we would have used `index: 2`.) # # NOTE: currently only indexes that refer to a column in the same table are supported. - tag : cpu index : 1 Mapping index to tag string value \u00b6 You can use the following syntax to map indexes to tag string values. In the example below, the submitted metrics will be snmp.ipSystemStatsHCInReceives with tags like ipversion:ipv6 . metrics : - MIB : IP-MIB table : OID : 1.3.6.1.2.1.4.31.1 name : ipSystemStatsTable forced_type : monotonic_count symbols : - OID : 1.3.6.1.2.1.4.31.1.1.4 name : ipSystemStatsHCInReceives metric_tags : - index : 1 tag : ipversion mapping : 0 : unknown 1 : ipv4 2 : ipv6 3 : ipv4z 4 : ipv6z 16 : dns See meaning of index as used here in Using an index section. Tagging tips \u00b6 Note General guidelines on Datadog tagging also apply to table metric tags. In particular, be mindful of the kind of value contained in the columns used a tag sources. E.g. avoid using a DisplayString (an arbitrarily long human-readable text description) or unbounded sources (timestamps, IDs...) as tag values. Good candidates for tag values include short strings, enums, or integer indexes. Metric type inference \u00b6 By default, the Datadog metric type of a symbol will be inferred from the SNMP type (i.e. the MIB SYNTAX ): SNMP type Inferred metric type Counter32 rate Counter64 rate Gauge32 gauge Integer gauge Integer32 gauge CounterBasedGauge64 gauge Opaque gauge SNMP types not listed in this table are submitted as gauge by default. Forced metric types \u00b6 Sometimes the inferred type may not be what you want. Typically, OIDs that represent \"total number of X\" are defined as Counter32 in MIBs, but you probably want to submit them monotonic_count instead of a rate . For such cases, you can define a forced_type . Possible values and their effect are listed below. Forced type Description gauge Submit as a gauge. rate Submit as a rate. percent Multiply by 100 and submit as a rate. monotonic_count Submit as a monotonic count. monotonic_count_and_rate Submit 2 copies of the metric: one as a monotonic count, and one as a rate (suffixed with .rate ). flag_stream Submit each flag of a flag stream as individual metric with value 0 or 1 . See Flag Stream section . This works on both symbol and table metrics: metrics : # On a symbol: - MIB : TCP-MIB forced_type : monotonic_count symbol : OID : 1.3.6.1.2.1.6.5 name : tcpActiveOpens # On a table: - MIB : IP-MIB table : OID : 1.3.6.1.2.1.4.31.1 name : ipSystemStatsTable forced_type : monotonic_count symbols : - OID : 1.3.6.1.2.1.4.31.1.1.4 name : ipSystemStatsHCInReceives - OID : 1.3.6.1.2.1.4.31.1.1.6 name : ipSystemStatsHCInOctets Note When used on a table metrics entry, forced_type is applied to all symbols in the entry. So, if a table contains symbols of varying types, you should use multiple metrics entries: one for symbols with inferred metric types, and one for each forced_type . For example: metrics : - MIB : F5-BIGIP-LOCAL-MIB table : OID : 1.3.6.1.4.1.3375.2.2.5.2.3 name : ltmPoolStatTable # No `forced_type` specified => metric types will be inferred. symbols : - OID : 1.3.6.1.4.1.3375.2.2.5.2.3.1.2 name : ltmPoolStatServerPktsIn - OID : 1.3.6.1.4.1.3375.2.2.5.2.3.1.4 name : ltmPoolStatServerPktsOut # ... - MIB : F5-BIGIP-LOCAL-MIB table : OID : 1.3.6.1.4.1.3375.2.2.5.2.3 name : ltmPoolStatTable forced_type : monotonic_count # All these symbols will be submitted as monotonic counts. symbols : - OID : 1.3.6.1.4.1.3375.2.2.5.2.3.1.7 name : ltmPoolStatServerTotConns - OID : 1.3.6.1.4.1.3375.2.2.5.2.3.1.23 name : ltmPoolStatConnqServiced # ... Flag stream \u00b6 When the value is a flag stream like 010101 , you can use forced_type: flag_stream to submit each flag as individual metric with value 0 or 1 . Two options are required when using flag_stream : options.placement : position of the flag in the flag stream (1-based indexing, first element is placement 1). options.metric_suffix : suffix appended to the metric name for a specific flag, usually matching the name of the flag. Example: metrics : - MIB : PowerNet-MIB symbol : OID : 1.3.6.1.4.1.318.1.1.1.11.1.1.0 name : upsBasicStateOutputState forced_type : flag_stream options : placement : 4 metric_suffix : OnLine - MIB : PowerNet-MIB symbol : OID : 1.3.6.1.4.1.318.1.1.1.11.1.1.0 name : upsBasicStateOutputState forced_type : flag_stream options : placement : 5 metric_suffix : ReplaceBattery This example will submit two metrics snmp.upsBasicStateOutputState.OnLine and snmp.upsBasicStateOutputState.ReplaceBattery with value 0 or 1 . Example of flag_stream usage in a profile . Extract value \u00b6 If the metric value to be submitted is from a OID with string value and needs to be extracted from it, you can use extract value feature. extract_value is a regex pattern with one capture group like (\\d+)C , where the capture group is (\\d+) . Example use cases respective regex patterns: stripping the C unit from a temperature value: (\\d+)C stripping the USD unit from a currency value: USD(\\d+) stripping the F unit from a temperature value with spaces between the metric and the unit: (\\d+) *F Example: Scalar Metric Example: metrics : - MIB : MY-MIB symbol : OID : 1.2.3.4.5.6.7 name : temperature extract_value : '(\\d+)C' Table Column Metric Example: metrics : - MIB : MY-MIB table : OID : 1.2.3.4.5.6 name : myTable symbols : - OID : 1.2.3.4.5.6.7 name : temperature extract_value : '(\\d+)C' # ... In the examples above, the OID value is a snmp OctetString value 22C and we want 22 to be submitted as value for snmp.temperature . metric_tags \u00b6 (Optional) This field is used to apply tags to all metrics collected by the profile. It has the same meaning than the instance-level config option (see conf.yaml.example ). Several collection methods are supported, as illustrated below: metric_tags : - OID : 1.3.6.1.2.1.1.5.0 symbol : sysName tag : snmp_host - # With regular expression matching OID : 1.3.6.1.2.1.1.5.0 symbol : sysName match : (.*)-(.*) tags : device_type : \\1 host : \\2","title":"Profile Format Reference"},{"location":"tutorials/snmp/profile-format/#profile-format-reference","text":"","title":"Profile Format Reference"},{"location":"tutorials/snmp/profile-format/#overview","text":"SNMP profiles are our way of providing out-of-the-box monitoring for certain makes and models of network devices. An SNMP profile is materialised as a YAML file with the following structure: sysobjectid : # extends: # metrics : # # metric_tags: # ","title":"Overview"},{"location":"tutorials/snmp/profile-format/#fields","text":"","title":"Fields"},{"location":"tutorials/snmp/profile-format/#sysobjectid","text":"(Required) The sysobjectid field is used to match profiles against devices during device autodiscovery. It can refer to a fully-defined OID for a specific device make and model: sysobjectid : 1.3.6.1.4.1.232.9.4.10 or a wildcard pattern to address multiple device models: sysobjectid : 1.3.6.1.131.12.4.* or a list of fully-defined OID / wildcard patterns: sysobjectid : - 1.3.6.1.131.12.4.* - 1.3.6.1.4.1.232.9.4.10","title":"sysobjectid"},{"location":"tutorials/snmp/profile-format/#extends","text":"(Optional) This field can be used to include metrics and metric tags from other so-called base profiles . Base profiles can derive from other base profiles to build a hierarchy of reusable profile mixins. Important All device profiles should extend from the _base.yaml profile, which defines items that should be collected for all devices. Example: extends : - _base.yaml - _generic-if.yaml # Include basic metrics from IF-MIB.","title":"extends"},{"location":"tutorials/snmp/profile-format/#metrics","text":"(Required) Entries in the metrics field define which metrics will be collected by the profile. They can reference either a single OID (a.k.a symbol ), or an SNMP table.","title":"metrics"},{"location":"tutorials/snmp/profile-format/#symbol-metrics","text":"An SNMP symbol is an object with a scalar type (i.e. Counter32 , Integer32 , OctetString , etc). In a MIB file, a symbol can be recognized as an OBJECT-TYPE node with a scalar SYNTAX , placed under an OBJECT IDENTIFIER node (which is often the root OID of the MIB): EXAMPLE-MIB DEFINITIONS ::= BEGIN -- ... example OBJECT IDENTIFIER ::= { mib-2 7 } exampleSymbol OBJECT-TYPE SYNTAX Counter32 -- ... ::= { example 1 } In profiles, symbol metrics can be specified as entries that specify the MIB and symbol fields: metrics : # Example for the above dummy MIB and symbol: - MIB : EXAMPLE-MIB symbol : OID : 1.3.5.1.2.1.7.1 name : exampleSymbol # More realistic examples: - MIB : ISILON-MIB symbol : OID : 1.3.6.1.4.1.12124.1.1.2 name : clusterHealth - MIB : ISILON-MIB symbol : OID : 1.3.6.1.4.1.12124.1.2.1.1 name : clusterIfsInBytes - MIB : ISILON-MIB symbol : OID : 1.3.6.1.4.1.12124.1.2.1.3 name : clusterIfsOutBytes Warning Symbol metrics from the same MIB must still be listed as separate metrics entries, as shown above. For example, this is not valid syntax: metrics : - MIB : ISILON-MIB symbol : - OID : 1.3.6.1.4.1.12124.1.2.1.1 name : clusterIfsInBytes - OID : 1.3.6.1.4.1.12124.1.2.1.3 name : clusterIfsOutBytes","title":"Symbol metrics"},{"location":"tutorials/snmp/profile-format/#table-metrics","text":"An SNMP table is an object that is composed of multiple entries (\"rows\"), where each entry contains values a set of symbols (\"columns\"). In a MIB file, tables be recognized by the presence of SEQUENCE OF : exampleTable OBJECT-TYPE SYNTAX SEQUENCE OF exampleEntry -- ... ::= { example 10 } exampleEntry OBJECT-TYPE -- ... ::= { exampleTable 1 } exampleColumn1 OBJECT-TYPE -- ... ::= { exampleEntry 1 } exampleColumn2 OBJECT-TYPE -- ... ::= { exampleEntry 2 } -- ... In profiles, tables can be specified as entries containing the MIB , table and symbols fields: metrics : # Example for the dummy table above: - MIB : EXAMPLE-MIB table : # Identification of the table which metrics come from. OID : 1.3.6.1.4.1.10 name : exampleTable symbols : # List of symbols ('columns') to retrieve. # Same format as for a single OID. # Each row in the table will emit these metrics. - OID : 1.3.6.1.4.1.10.1.1 name : exampleColumn1 - OID : 1.3.6.1.4.1.10.1.2 name : exampleColumn2 # ... # More realistic example: - MIB : CISCO-PROCESS-MIB table : # Each row in this table contains information about a CPU unit of the device. OID : 1.3.6.1.4.1.9.9.109.1.1.1 name : cpmCPUTotalTable symbols : - OID : 1.3.6.1.4.1.9.9.109.1.1.1.1.12 name : cpmCPUMemoryUsed # ...","title":"Table metrics"},{"location":"tutorials/snmp/profile-format/#table-metrics-tagging","text":"It is possible to add tags to metrics retrieved from a table in three ways:","title":"Table metrics tagging"},{"location":"tutorials/snmp/profile-format/#using-a-column-within-the-same-table","text":"metrics : - MIB : IF-MIB table : OID : 1.3.6.1.2.1.2.2 name : ifTable symbols : - OID : 1.3.6.1.2.1.2.2.1.14 name : ifInErrors # ... metric_tags : # Add an 'interface' tag to each metric of each row, # whose value is obtained from the 'ifDescr' column of the row. # This allows querying metrics by interface, e.g. 'interface:eth0'. - tag : interface column : OID : 1.3.6.1.2.1.2.2.1.2 name : ifDescr","title":"Using a column within the same table"},{"location":"tutorials/snmp/profile-format/#using-a-column-from-a-different-table-with-identical-indexes","text":"metrics : - MIB : CISCO-IF-EXTENSION-MIB forced_type : monotonic_count table : OID : 1.3.6.1.4.1.9.9.276.1.1.2 name : cieIfInterfaceTable symbols : - OID : 1.3.6.1.4.1.9.9.276.1.1.2.1.1 name : cieIfResetCount metric_tags : - MIB : IF-MIB column : OID : 1.3.6.1.2.1.31.1.1.1.1 name : ifName table : ifXTable tag : interface","title":"Using a column from a different table with identical indexes"},{"location":"tutorials/snmp/profile-format/#using-a-column-from-a-different-table-with-different-indexes","text":"metrics : - MIB : CPI-UNITY-MIB table : OID : 1.3.6.1.4.1.30932.1.10.1.3.110 name : cpiPduBranchTable symbols : - OID : 1.3.6.1.4.1.30932.1.10.1.3.110.1.3 name : cpiPduBranchCurrent metric_tags : - column : OID : 1.3.6.1.4.1.30932.1.10.1.2.10.1.3 name : cpiPduName table : cpiPduTable index_transform : - start : 1 end : 7 tag : pdu_name If the external table has different indexes, use index_transform to select a subset of the full index. index_transform is a list of start / end ranges to extract from the current table index to match the external table index. start and end are inclusive. External table indexes must be a subset of the indexes of the current table, or same indexes in a different order. Example In the example above, the index of cpiPduBranchTable looks like 1.6.0.36.155.53.3.246 , the first digit is the cpiPduBranchId index and the rest is the cpiPduBranchMac index. The index of cpiPduTable looks like 6.0.36.155.53.3.246 and represents cpiPduMac (equivalent to cpiPduBranchMac ). By using the index_transform with start 1 and end 7, we extract 6.0.36.155.53.3.246 from 1.6.0.36.155.53.3.246 ( cpiPduBranchTable full index), and then use it to match 6.0.36.155.53.3.246 ( cpiPduTable full index). index_transform can be more complex, the following definition will extract 2.3.5.6.7 from 1.2.3.4.5.6.7 . index_transform : - start : 1 end : 2 - start : 4 end : 6","title":"Using a column from a different table with different indexes"},{"location":"tutorials/snmp/profile-format/#using-an-index","text":"Important: \" index \" refers to one digit of the index part of the row OID. Example, if the column OID is 1.2.3.1.2 and the row OID is 1.2.3.1.2.7.8.9 , the full index is 7.8.9 . In this example, when using index: 1 , we will refer to 7 , index: 2 will refer to 8 , and so on. metrics : - MIB : CISCO-PROCESS-MIB table : OID : 1.3.6.1.4.1.9.9.109.1.1.1 name : cpmCPUTotalTable symbols : - OID : 1.3.6.1.4.1.9.9.109.1.1.1.1.12 name : cpmCPUMemoryUsed metric_tags : # This tagging method is more complex, so let's walk through an example... # # In CISCO-PROCESS-MIB, we can see that entries in the `cpmCPUTotalTable` are indexed by `cpmCPUTotalIndex`, # which corresponds to some sort of CPU position for each row in the table: # # cpmCPUTotalEntry OBJECT-TYPE # -- ... # INDEX { cpmCPUTotalIndex } # <-- See? # # We want to tag metrics in this table by this CPU position. # # To do this, we look up the position of this OID in `INDEX`. Here we see it's in 1st position. # So we can reference it here using `index: 1`. # (If there were two OIDs in `INDEX`, and we wanted to use the one in 2nd position, then we would have used `index: 2`.) # # NOTE: currently only indexes that refer to a column in the same table are supported. - tag : cpu index : 1","title":"Using an index"},{"location":"tutorials/snmp/profile-format/#mapping-index-to-tag-string-value","text":"You can use the following syntax to map indexes to tag string values. In the example below, the submitted metrics will be snmp.ipSystemStatsHCInReceives with tags like ipversion:ipv6 . metrics : - MIB : IP-MIB table : OID : 1.3.6.1.2.1.4.31.1 name : ipSystemStatsTable forced_type : monotonic_count symbols : - OID : 1.3.6.1.2.1.4.31.1.1.4 name : ipSystemStatsHCInReceives metric_tags : - index : 1 tag : ipversion mapping : 0 : unknown 1 : ipv4 2 : ipv6 3 : ipv4z 4 : ipv6z 16 : dns See meaning of index as used here in Using an index section.","title":"Mapping index to tag string value"},{"location":"tutorials/snmp/profile-format/#tagging-tips","text":"Note General guidelines on Datadog tagging also apply to table metric tags. In particular, be mindful of the kind of value contained in the columns used a tag sources. E.g. avoid using a DisplayString (an arbitrarily long human-readable text description) or unbounded sources (timestamps, IDs...) as tag values. Good candidates for tag values include short strings, enums, or integer indexes.","title":"Tagging tips"},{"location":"tutorials/snmp/profile-format/#metric-type-inference","text":"By default, the Datadog metric type of a symbol will be inferred from the SNMP type (i.e. the MIB SYNTAX ): SNMP type Inferred metric type Counter32 rate Counter64 rate Gauge32 gauge Integer gauge Integer32 gauge CounterBasedGauge64 gauge Opaque gauge SNMP types not listed in this table are submitted as gauge by default.","title":"Metric type inference"},{"location":"tutorials/snmp/profile-format/#forced-metric-types","text":"Sometimes the inferred type may not be what you want. Typically, OIDs that represent \"total number of X\" are defined as Counter32 in MIBs, but you probably want to submit them monotonic_count instead of a rate . For such cases, you can define a forced_type . Possible values and their effect are listed below. Forced type Description gauge Submit as a gauge. rate Submit as a rate. percent Multiply by 100 and submit as a rate. monotonic_count Submit as a monotonic count. monotonic_count_and_rate Submit 2 copies of the metric: one as a monotonic count, and one as a rate (suffixed with .rate ). flag_stream Submit each flag of a flag stream as individual metric with value 0 or 1 . See Flag Stream section . This works on both symbol and table metrics: metrics : # On a symbol: - MIB : TCP-MIB forced_type : monotonic_count symbol : OID : 1.3.6.1.2.1.6.5 name : tcpActiveOpens # On a table: - MIB : IP-MIB table : OID : 1.3.6.1.2.1.4.31.1 name : ipSystemStatsTable forced_type : monotonic_count symbols : - OID : 1.3.6.1.2.1.4.31.1.1.4 name : ipSystemStatsHCInReceives - OID : 1.3.6.1.2.1.4.31.1.1.6 name : ipSystemStatsHCInOctets Note When used on a table metrics entry, forced_type is applied to all symbols in the entry. So, if a table contains symbols of varying types, you should use multiple metrics entries: one for symbols with inferred metric types, and one for each forced_type . For example: metrics : - MIB : F5-BIGIP-LOCAL-MIB table : OID : 1.3.6.1.4.1.3375.2.2.5.2.3 name : ltmPoolStatTable # No `forced_type` specified => metric types will be inferred. symbols : - OID : 1.3.6.1.4.1.3375.2.2.5.2.3.1.2 name : ltmPoolStatServerPktsIn - OID : 1.3.6.1.4.1.3375.2.2.5.2.3.1.4 name : ltmPoolStatServerPktsOut # ... - MIB : F5-BIGIP-LOCAL-MIB table : OID : 1.3.6.1.4.1.3375.2.2.5.2.3 name : ltmPoolStatTable forced_type : monotonic_count # All these symbols will be submitted as monotonic counts. symbols : - OID : 1.3.6.1.4.1.3375.2.2.5.2.3.1.7 name : ltmPoolStatServerTotConns - OID : 1.3.6.1.4.1.3375.2.2.5.2.3.1.23 name : ltmPoolStatConnqServiced # ...","title":"Forced metric types"},{"location":"tutorials/snmp/profile-format/#flag-stream","text":"When the value is a flag stream like 010101 , you can use forced_type: flag_stream to submit each flag as individual metric with value 0 or 1 . Two options are required when using flag_stream : options.placement : position of the flag in the flag stream (1-based indexing, first element is placement 1). options.metric_suffix : suffix appended to the metric name for a specific flag, usually matching the name of the flag. Example: metrics : - MIB : PowerNet-MIB symbol : OID : 1.3.6.1.4.1.318.1.1.1.11.1.1.0 name : upsBasicStateOutputState forced_type : flag_stream options : placement : 4 metric_suffix : OnLine - MIB : PowerNet-MIB symbol : OID : 1.3.6.1.4.1.318.1.1.1.11.1.1.0 name : upsBasicStateOutputState forced_type : flag_stream options : placement : 5 metric_suffix : ReplaceBattery This example will submit two metrics snmp.upsBasicStateOutputState.OnLine and snmp.upsBasicStateOutputState.ReplaceBattery with value 0 or 1 . Example of flag_stream usage in a profile .","title":"Flag stream"},{"location":"tutorials/snmp/profile-format/#extract-value","text":"If the metric value to be submitted is from a OID with string value and needs to be extracted from it, you can use extract value feature. extract_value is a regex pattern with one capture group like (\\d+)C , where the capture group is (\\d+) . Example use cases respective regex patterns: stripping the C unit from a temperature value: (\\d+)C stripping the USD unit from a currency value: USD(\\d+) stripping the F unit from a temperature value with spaces between the metric and the unit: (\\d+) *F Example: Scalar Metric Example: metrics : - MIB : MY-MIB symbol : OID : 1.2.3.4.5.6.7 name : temperature extract_value : '(\\d+)C' Table Column Metric Example: metrics : - MIB : MY-MIB table : OID : 1.2.3.4.5.6 name : myTable symbols : - OID : 1.2.3.4.5.6.7 name : temperature extract_value : '(\\d+)C' # ... In the examples above, the OID value is a snmp OctetString value 22C and we want 22 to be submitted as value for snmp.temperature .","title":"Extract value"},{"location":"tutorials/snmp/profile-format/#metric_tags","text":"(Optional) This field is used to apply tags to all metrics collected by the profile. It has the same meaning than the instance-level config option (see conf.yaml.example ). Several collection methods are supported, as illustrated below: metric_tags : - OID : 1.3.6.1.2.1.1.5.0 symbol : sysName tag : snmp_host - # With regular expression matching OID : 1.3.6.1.2.1.1.5.0 symbol : sysName match : (.*)-(.*) tags : device_type : \\1 host : \\2","title":"metric_tags"},{"location":"tutorials/snmp/profiles/","text":"Build an SNMP Profile \u00b6 SNMP profiles are our way of providing out-of-the-box monitoring for certain makes and models of network devices. This tutorial will walk you through the steps of building a basic SNMP profile that collects OID metrics from HP iLO4 devices. Feel free to read the Introduction to SNMP if you need a refresher on SNMP concepts such as OIDs and MIBs. Ready? Let's get started! Research \u00b6 The first step to building an SNMP profile is doing some basic research about the device, and which metrics we want to collect. General device information \u00b6 Generally, you'll want to search the web and find out about the following: Device name, manufacturer, and device sysobjectid . Understand what the device does, and what it is used for. (Which metrics are relevant varies between routers, switches, bridges, etc. See Networking hardware .) E.g. from the HP iLO Wikipedia page , we can see that iLO4 devices are used by system administrators for remote management of embedded servers. Available versions of the device, and which ones we target. E.g. HP iLO devices exist in multiple versions (version 3, version 4...). Here, we are specifically targetting HP iLO4. Supported MIBs and OIDs (often available in official documentation), and associated MIB files. E.g. we can see that HP provides a MIB package for iLO devices here . Metrics selection \u00b6 Now that we have gathered some basic information about the device and its SNMP interfaces, we should decide which metrics we want to collect. (Devices often expose thousands of metrics through SNMP. We certainly don't want to collect them all.) Devices typically expose thousands of OIDs that can span dozens of MIB, so this can feel daunting at first. Remember, never give up! Some guidelines to help you in this process: 10-40 metrics is a good amount already. Explore base profiles to see which ones could be applicable to the device. Explore manufacturer-specific MIB files looking for metrics such as: General health: status gauges... Network traffic: bytes in/out, errors in/out, ... CPU and memory usage. Temperature: temperature sensors, thermal condition, ... Power supply. Storage. Field-replaceable units ( FRU ). ... Implementation \u00b6 It might be tempting to gather as many metrics as possible, and only then start building the profile and writing tests. But we recommend you start small . This will allow you to quickly gain confidence on the various components of the SNMP development workflow: Editing profile files. Writing tests. Building and using simulation data. Add a profile file \u00b6 Add a .yaml file for the profile with the sysobjectid and a metric (you'll be able to add more later). For example: sysobjectid : 1.3.6.1.4.1.232.9.4.10 metrics : - MIB : CPQHLTH-MIB symbol : OID : 1.3.6.1.4.1.232.6.2.8.1.0 name : cpqHeSysUtilLifeTime Tip sysobjectid can also be a wildcard pattern to match a sub-tree of devices, eg 1.3.6.1.131.12.4.* . Generate a profile file from a collection of MIBs \u00b6 You can use ddev to create a profile from a list of mibs. $ ddev meta snmp generate-profile-from-mibs --help This script requires a list of ASN1 MIB files as input argument, and copies to the clipboard a list of metrics that can be used to create a profile. Options \u00b6 -f, --filters is an option to provide the path to a YAML file containing a collection of MIB names and their list of node names to be included. For example: RFC1213-MIB : - system - interfaces - ip CISCO-SYSLOG-MIB : [] SNMP-FRAMEWORK-MIB : - snmpEngine Will include system , interfaces and ip nodes from RFC1213-MIB , no node fro, CISCO-SYSLOG-MIB and node snmpEngine from SNMP-FRAMEWORK-MIB . Note that each MIB:node_name correspond to exactly one and only one OID. However, some MIBs report legacy nodes that are overwritten. To resolve, edit the MIB by removing legacy values manually before loading them with this profile generator. If a MIB is fully supported, it can be omitted from the filter as MIBs not found in a filter will be fully loaded. If a MIB is not fully supported, it can be listed with an empty node list, as CISCO-SYSLOG-MIB in the example. -a, --aliases is an option to provide the path to a YAML file containing a list of aliases to be used as metric tags for tables, in the following format: aliases : - from : MIB : ENTITY-MIB name : entPhysicalIndex to : MIB : ENTITY-MIB name : entPhysicalName MIBs tables most of the time define one or more indexes, as columns within the same table, or columns from a different table and even a different MIB. The index value can be used to tag table's metrics. This is defined in the INDEX field in row nodes. As an example, entPhysicalContainsTable in ENTITY-MIB is as follows: entPhysicalContainsEntry OBJECT-TYPE SYNTAX EntPhysicalContainsEntry MAX-ACCESS not-accessible STATUS current DESCRIPTION \"A single container/'containee' relationship.\" INDEX { entPhysicalIndex, entPhysicalChildIndex } <== this is the index definition ::= { entPhysicalContainsTable 1 } or its JSON dump, where INDEX is replaced by indices : \"entPhysicalContainsEntry\" : { \"name\" : \"entPhysicalContainsEntry\" , \"oid\" : \"1.3.6.1.2.1.47.1.3.3.1\" , \"nodetype\" : \"row\" , \"class\" : \"objecttype\" , \"maxaccess\" : \"not-accessible\" , \"indices\" : [ { \"module\" : \"ENTITY-MIB\" , \"object\" : \"entPhysicalIndex\" , \"implied\" : 0 }, { \"module\" : \"ENTITY-MIB\" , \"object\" : \"entPhysicalChildIndex\" , \"implied\" : 0 } ], \"status\" : \"current\" , \"description\" : \"A single container/'containee' relationship.\" }, Indexes can be replaced by another MIB symbol that is more human friendly. You might prefer to see the interface name versus its numerical table index. This can be achieved using metric_tag_aliases . Add unit tests \u00b6 Add a unit test in test_profiles.py to verify that the metric is successfully collected by the integration when the profile is enabled. (These unit tests are mostly used to prevent regressions and will help with maintenance.) For example: def test_hp_ilo4 ( aggregator ): run_profile_check ( 'hp_ilo4' ) common_tags = common . CHECK_TAGS + [ 'snmp_profile:hp-ilo4' ] aggregator . assert_metric ( 'snmp.cpqHeSysUtilLifeTime' , metric_type = aggregator . MONOTONIC_COUNT , tags = common_tags , count = 1 ) aggregator . assert_all_metrics_covered () We don't have simulation data yet, so the test should fail. Let's make sure it does: $ ddev test -k test_hp_ilo4 snmp:py38 [...] ======================================= FAILURES ======================================== _____________________________________ test_hp_ilo4 ______________________________________ tests/test_profiles.py:1464: in test_hp_ilo4 aggregator.assert_metric('snmp.cpqHeSysUtilLifeTime', metric_type=aggregator.GAUGE, tags=common.CHECK_TAGS, count=1) ../datadog_checks_base/datadog_checks/base/stubs/aggregator.py:253: in assert_metric self._assert(condition, msg=msg, expected_stub=expected_metric, submitted_elements=self._metrics) ../datadog_checks_base/datadog_checks/base/stubs/aggregator.py:295: in _assert assert condition, new_msg E AssertionError: Needed exactly 1 candidates for 'snmp.cpqHeSysUtilLifeTime', got 0 [...] Good. Now, onto adding simulation data. Add simulation data \u00b6 Add a .snmprec file named after the community_string , which is the value we gave to run_profile_check() : $ touch snmp/tests/compose/data/hp_ilo4.snmprec Add lines to the .snmprec file to specify the sysobjectid and the OID listed in the profile: 1.3.6.1.2.1.1.2.0|6|1.3.6.1.4.1.232.9.4.10 1.3.6.1.4.1.232.6.2.8.1.0|2|1051200 Run the test again, and make sure it passes this time: $ ddev test -k test_hp_ilo4 snmp:py38 [...] tests/test_profiles.py::test_hp_ilo4 PASSED [100%] =================================================== 1 passed, 107 deselected in 9.87s ==================================================== ________________________________________________________________ summary _________________________________________________________________ py38: commands succeeded congratulations :) Rinse and repeat \u00b6 We have now covered the basic workflow \u2014 add metrics, expand tests, add simulation data. You can now go ahead and add more metrics to the profile! Next steps \u00b6 Congratulations! You should now be able to write a basic SNMP profile. We kept this tutorial as simple as possible, but profiles offer many more options to collect metrics from SNMP devices. To learn more about what can be done in profiles, read the Profile format reference . To learn more about .snmprec files, see the Simulation data format reference .","title":"Build an SNMP Profile"},{"location":"tutorials/snmp/profiles/#build-an-snmp-profile","text":"SNMP profiles are our way of providing out-of-the-box monitoring for certain makes and models of network devices. This tutorial will walk you through the steps of building a basic SNMP profile that collects OID metrics from HP iLO4 devices. Feel free to read the Introduction to SNMP if you need a refresher on SNMP concepts such as OIDs and MIBs. Ready? Let's get started!","title":"Build an SNMP Profile"},{"location":"tutorials/snmp/profiles/#research","text":"The first step to building an SNMP profile is doing some basic research about the device, and which metrics we want to collect.","title":"Research"},{"location":"tutorials/snmp/profiles/#general-device-information","text":"Generally, you'll want to search the web and find out about the following: Device name, manufacturer, and device sysobjectid . Understand what the device does, and what it is used for. (Which metrics are relevant varies between routers, switches, bridges, etc. See Networking hardware .) E.g. from the HP iLO Wikipedia page , we can see that iLO4 devices are used by system administrators for remote management of embedded servers. Available versions of the device, and which ones we target. E.g. HP iLO devices exist in multiple versions (version 3, version 4...). Here, we are specifically targetting HP iLO4. Supported MIBs and OIDs (often available in official documentation), and associated MIB files. E.g. we can see that HP provides a MIB package for iLO devices here .","title":"General device information"},{"location":"tutorials/snmp/profiles/#metrics-selection","text":"Now that we have gathered some basic information about the device and its SNMP interfaces, we should decide which metrics we want to collect. (Devices often expose thousands of metrics through SNMP. We certainly don't want to collect them all.) Devices typically expose thousands of OIDs that can span dozens of MIB, so this can feel daunting at first. Remember, never give up! Some guidelines to help you in this process: 10-40 metrics is a good amount already. Explore base profiles to see which ones could be applicable to the device. Explore manufacturer-specific MIB files looking for metrics such as: General health: status gauges... Network traffic: bytes in/out, errors in/out, ... CPU and memory usage. Temperature: temperature sensors, thermal condition, ... Power supply. Storage. Field-replaceable units ( FRU ). ...","title":"Metrics selection"},{"location":"tutorials/snmp/profiles/#implementation","text":"It might be tempting to gather as many metrics as possible, and only then start building the profile and writing tests. But we recommend you start small . This will allow you to quickly gain confidence on the various components of the SNMP development workflow: Editing profile files. Writing tests. Building and using simulation data.","title":"Implementation"},{"location":"tutorials/snmp/profiles/#add-a-profile-file","text":"Add a .yaml file for the profile with the sysobjectid and a metric (you'll be able to add more later). For example: sysobjectid : 1.3.6.1.4.1.232.9.4.10 metrics : - MIB : CPQHLTH-MIB symbol : OID : 1.3.6.1.4.1.232.6.2.8.1.0 name : cpqHeSysUtilLifeTime Tip sysobjectid can also be a wildcard pattern to match a sub-tree of devices, eg 1.3.6.1.131.12.4.* .","title":"Add a profile file"},{"location":"tutorials/snmp/profiles/#generate-a-profile-file-from-a-collection-of-mibs","text":"You can use ddev to create a profile from a list of mibs. $ ddev meta snmp generate-profile-from-mibs --help This script requires a list of ASN1 MIB files as input argument, and copies to the clipboard a list of metrics that can be used to create a profile.","title":"Generate a profile file from a collection of MIBs"},{"location":"tutorials/snmp/profiles/#options","text":"-f, --filters is an option to provide the path to a YAML file containing a collection of MIB names and their list of node names to be included. For example: RFC1213-MIB : - system - interfaces - ip CISCO-SYSLOG-MIB : [] SNMP-FRAMEWORK-MIB : - snmpEngine Will include system , interfaces and ip nodes from RFC1213-MIB , no node fro, CISCO-SYSLOG-MIB and node snmpEngine from SNMP-FRAMEWORK-MIB . Note that each MIB:node_name correspond to exactly one and only one OID. However, some MIBs report legacy nodes that are overwritten. To resolve, edit the MIB by removing legacy values manually before loading them with this profile generator. If a MIB is fully supported, it can be omitted from the filter as MIBs not found in a filter will be fully loaded. If a MIB is not fully supported, it can be listed with an empty node list, as CISCO-SYSLOG-MIB in the example. -a, --aliases is an option to provide the path to a YAML file containing a list of aliases to be used as metric tags for tables, in the following format: aliases : - from : MIB : ENTITY-MIB name : entPhysicalIndex to : MIB : ENTITY-MIB name : entPhysicalName MIBs tables most of the time define one or more indexes, as columns within the same table, or columns from a different table and even a different MIB. The index value can be used to tag table's metrics. This is defined in the INDEX field in row nodes. As an example, entPhysicalContainsTable in ENTITY-MIB is as follows: entPhysicalContainsEntry OBJECT-TYPE SYNTAX EntPhysicalContainsEntry MAX-ACCESS not-accessible STATUS current DESCRIPTION \"A single container/'containee' relationship.\" INDEX { entPhysicalIndex, entPhysicalChildIndex } <== this is the index definition ::= { entPhysicalContainsTable 1 } or its JSON dump, where INDEX is replaced by indices : \"entPhysicalContainsEntry\" : { \"name\" : \"entPhysicalContainsEntry\" , \"oid\" : \"1.3.6.1.2.1.47.1.3.3.1\" , \"nodetype\" : \"row\" , \"class\" : \"objecttype\" , \"maxaccess\" : \"not-accessible\" , \"indices\" : [ { \"module\" : \"ENTITY-MIB\" , \"object\" : \"entPhysicalIndex\" , \"implied\" : 0 }, { \"module\" : \"ENTITY-MIB\" , \"object\" : \"entPhysicalChildIndex\" , \"implied\" : 0 } ], \"status\" : \"current\" , \"description\" : \"A single container/'containee' relationship.\" }, Indexes can be replaced by another MIB symbol that is more human friendly. You might prefer to see the interface name versus its numerical table index. This can be achieved using metric_tag_aliases .","title":"Options"},{"location":"tutorials/snmp/profiles/#add-unit-tests","text":"Add a unit test in test_profiles.py to verify that the metric is successfully collected by the integration when the profile is enabled. (These unit tests are mostly used to prevent regressions and will help with maintenance.) For example: def test_hp_ilo4 ( aggregator ): run_profile_check ( 'hp_ilo4' ) common_tags = common . CHECK_TAGS + [ 'snmp_profile:hp-ilo4' ] aggregator . assert_metric ( 'snmp.cpqHeSysUtilLifeTime' , metric_type = aggregator . MONOTONIC_COUNT , tags = common_tags , count = 1 ) aggregator . assert_all_metrics_covered () We don't have simulation data yet, so the test should fail. Let's make sure it does: $ ddev test -k test_hp_ilo4 snmp:py38 [...] ======================================= FAILURES ======================================== _____________________________________ test_hp_ilo4 ______________________________________ tests/test_profiles.py:1464: in test_hp_ilo4 aggregator.assert_metric('snmp.cpqHeSysUtilLifeTime', metric_type=aggregator.GAUGE, tags=common.CHECK_TAGS, count=1) ../datadog_checks_base/datadog_checks/base/stubs/aggregator.py:253: in assert_metric self._assert(condition, msg=msg, expected_stub=expected_metric, submitted_elements=self._metrics) ../datadog_checks_base/datadog_checks/base/stubs/aggregator.py:295: in _assert assert condition, new_msg E AssertionError: Needed exactly 1 candidates for 'snmp.cpqHeSysUtilLifeTime', got 0 [...] Good. Now, onto adding simulation data.","title":"Add unit tests"},{"location":"tutorials/snmp/profiles/#add-simulation-data","text":"Add a .snmprec file named after the community_string , which is the value we gave to run_profile_check() : $ touch snmp/tests/compose/data/hp_ilo4.snmprec Add lines to the .snmprec file to specify the sysobjectid and the OID listed in the profile: 1.3.6.1.2.1.1.2.0|6|1.3.6.1.4.1.232.9.4.10 1.3.6.1.4.1.232.6.2.8.1.0|2|1051200 Run the test again, and make sure it passes this time: $ ddev test -k test_hp_ilo4 snmp:py38 [...] tests/test_profiles.py::test_hp_ilo4 PASSED [100%] =================================================== 1 passed, 107 deselected in 9.87s ==================================================== ________________________________________________________________ summary _________________________________________________________________ py38: commands succeeded congratulations :)","title":"Add simulation data"},{"location":"tutorials/snmp/profiles/#rinse-and-repeat","text":"We have now covered the basic workflow \u2014 add metrics, expand tests, add simulation data. You can now go ahead and add more metrics to the profile!","title":"Rinse and repeat"},{"location":"tutorials/snmp/profiles/#next-steps","text":"Congratulations! You should now be able to write a basic SNMP profile. We kept this tutorial as simple as possible, but profiles offer many more options to collect metrics from SNMP devices. To learn more about what can be done in profiles, read the Profile format reference . To learn more about .snmprec files, see the Simulation data format reference .","title":"Next steps"},{"location":"tutorials/snmp/sim-format/","text":"Simulation Data Format Reference \u00b6 Conventions \u00b6 Simulation data for profiles is contained in .snmprec files located in the tests directory . Simulation files must be named after the SNMP community string used in the profile unit tests. For example: cisco-nexus.snmprec . File contents \u00b6 Each line in a .snmprec file corresponds to a value for an OID. Lines must be formatted as follows: || For the list of supported types, see the snmpsim simulation data file format documentation. Warning Due to a limitation of snmpsim , contents of .snmprec files must be sorted in lexicographic order . Use $ sort -V /path/to/profile.snmprec to sort lines from the terminal. Symbols \u00b6 For symbol metrics , add a single line corresponding to the symbol OID. For example: 1.3.6.1.4.1.232.6.2.8.1.0|2|1051200 Tables \u00b6 Tip Adding simulation data for tables can be particularly tedious. This section documents the manual process, but automatic generation is possible \u2014 see How to generate table simulation data . For table metrics , add one copy of the metric per row, appending the index to the OID. For example, to simulate 3 rows in the table 1.3.6.1.4.1.6.13 that has OIDs 1.3.6.1.4.1.6.13.1.6 and 1.3.6.1.4.1.6.13.1.8 , you could write: 1.3.6.1.4.1.6.13.1.6.0|2|1051200 1.3.6.1.4.1.6.13.1.6.1|2|1446 1.3.6.1.4.1.6.13.1.6.2|2|23 1.3.6.1.4.1.6.13.1.8.0|2|165 1.3.6.1.4.1.6.13.1.8.1|2|976 1.3.6.1.4.1.6.13.1.8.2|2|0 Note If the table uses table metric tags , you may need to add additional OID simulation data for those tags.","title":"Simulation Data Format Reference"},{"location":"tutorials/snmp/sim-format/#simulation-data-format-reference","text":"","title":"Simulation Data Format Reference"},{"location":"tutorials/snmp/sim-format/#conventions","text":"Simulation data for profiles is contained in .snmprec files located in the tests directory . Simulation files must be named after the SNMP community string used in the profile unit tests. For example: cisco-nexus.snmprec .","title":"Conventions"},{"location":"tutorials/snmp/sim-format/#file-contents","text":"Each line in a .snmprec file corresponds to a value for an OID. Lines must be formatted as follows: || For the list of supported types, see the snmpsim simulation data file format documentation. Warning Due to a limitation of snmpsim , contents of .snmprec files must be sorted in lexicographic order . Use $ sort -V /path/to/profile.snmprec to sort lines from the terminal.","title":"File contents"},{"location":"tutorials/snmp/sim-format/#symbols","text":"For symbol metrics , add a single line corresponding to the symbol OID. For example: 1.3.6.1.4.1.232.6.2.8.1.0|2|1051200","title":"Symbols"},{"location":"tutorials/snmp/sim-format/#tables","text":"Tip Adding simulation data for tables can be particularly tedious. This section documents the manual process, but automatic generation is possible \u2014 see How to generate table simulation data . For table metrics , add one copy of the metric per row, appending the index to the OID. For example, to simulate 3 rows in the table 1.3.6.1.4.1.6.13 that has OIDs 1.3.6.1.4.1.6.13.1.6 and 1.3.6.1.4.1.6.13.1.8 , you could write: 1.3.6.1.4.1.6.13.1.6.0|2|1051200 1.3.6.1.4.1.6.13.1.6.1|2|1446 1.3.6.1.4.1.6.13.1.6.2|2|23 1.3.6.1.4.1.6.13.1.8.0|2|165 1.3.6.1.4.1.6.13.1.8.1|2|976 1.3.6.1.4.1.6.13.1.8.2|2|0 Note If the table uses table metric tags , you may need to add additional OID simulation data for those tags.","title":"Tables"},{"location":"tutorials/snmp/tools/","text":"Tools \u00b6 Using tcpdump with SNMP \u00b6 The tcpdump command shows the exact request and response content of SNMP GET , GETNEXT and other SNMP calls. In a shell run tcpdump : tcpdump -vv -nni lo0 -T snmp host localhost and port 161 -nn : turn off host and protocol name resolution (to avoid generating DNS packets) -i INTERFACE : listen on INTERFACE (default: lowest numbered interface) -T snmp : type/protocol, snmp in our case In another separate shell run snmpwalk or snmpget : snmpwalk -O n -v2c -c localhost:1161 1.3.6 After you've run snmpwalk , you'll see results like this from tcpdump : tcpdump -vv -nni lo0 -T snmp host localhost and port 161 tcpdump: listening on lo0, link-type NULL (BSD loopback), capture size 262144 bytes 17:25:43.639639 IP (tos 0x0, ttl 64, id 29570, offset 0, flags [none], proto UDP (17), length 76, bad cksum 0 (->91d)!) 127.0.0.1.59540 > 127.0.0.1.1161: { SNMPv2c C=\"cisco-nexus\" { GetRequest(28) R=1921760388 .1.3.6.1.2.1.1.2.0 } } 17:25:43.645088 IP (tos 0x0, ttl 64, id 26543, offset 0, flags [none], proto UDP (17), length 88, bad cksum 0 (->14e4)!) 127.0.0.1.1161 > 127.0.0.1.59540: { SNMPv2c C=\"cisco-nexus\" { GetResponse(40) R=1921760388 .1.3.6.1.2.1.1.2.0=.1.3.6.1.4.1.9.12.3.1.3.1.2 } } From the Docker Agent container \u00b6 If you want to run snmpget , snmpwalk , and tcpdump from the Docker Agent container you can install them by running the following commands (in the container): apt update apt install -y snmp tcpdump","title":"Tools"},{"location":"tutorials/snmp/tools/#tools","text":"","title":"Tools"},{"location":"tutorials/snmp/tools/#using-tcpdump-with-snmp","text":"The tcpdump command shows the exact request and response content of SNMP GET , GETNEXT and other SNMP calls. In a shell run tcpdump : tcpdump -vv -nni lo0 -T snmp host localhost and port 161 -nn : turn off host and protocol name resolution (to avoid generating DNS packets) -i INTERFACE : listen on INTERFACE (default: lowest numbered interface) -T snmp : type/protocol, snmp in our case In another separate shell run snmpwalk or snmpget : snmpwalk -O n -v2c -c localhost:1161 1.3.6 After you've run snmpwalk , you'll see results like this from tcpdump : tcpdump -vv -nni lo0 -T snmp host localhost and port 161 tcpdump: listening on lo0, link-type NULL (BSD loopback), capture size 262144 bytes 17:25:43.639639 IP (tos 0x0, ttl 64, id 29570, offset 0, flags [none], proto UDP (17), length 76, bad cksum 0 (->91d)!) 127.0.0.1.59540 > 127.0.0.1.1161: { SNMPv2c C=\"cisco-nexus\" { GetRequest(28) R=1921760388 .1.3.6.1.2.1.1.2.0 } } 17:25:43.645088 IP (tos 0x0, ttl 64, id 26543, offset 0, flags [none], proto UDP (17), length 88, bad cksum 0 (->14e4)!) 127.0.0.1.1161 > 127.0.0.1.59540: { SNMPv2c C=\"cisco-nexus\" { GetResponse(40) R=1921760388 .1.3.6.1.2.1.1.2.0=.1.3.6.1.4.1.9.12.3.1.3.1.2 } }","title":"Using tcpdump with SNMP"},{"location":"tutorials/snmp/tools/#from-the-docker-agent-container","text":"If you want to run snmpget , snmpwalk , and tcpdump from the Docker Agent container you can install them by running the following commands (in the container): apt update apt install -y snmp tcpdump","title":"From the Docker Agent container"}]} \ No newline at end of file