From 5b62251f9dd2cdf94cfdcb2b7552702f51c0a1ab Mon Sep 17 00:00:00 2001 From: Jason Rhodes Date: Fri, 3 Jan 2020 11:36:48 -0500 Subject: [PATCH 1/9] Moves server and common files from legacy to np for infra --- .../infra/common/color_palette.test.ts | 38 + x-pack/plugins/infra/common/color_palette.ts | 56 + .../infra/common/ecs_allowed_list.test.ts | 42 + .../plugins/infra/common/ecs_allowed_list.ts | 67 + x-pack/plugins/infra/common/errors/index.ts | 7 + x-pack/plugins/infra/common/errors/metrics.ts | 10 + .../infra/common/graphql/root/index.ts | 7 + .../infra/common/graphql/root/schema.gql.ts | 18 + .../graphql/shared/fragments.gql_query.ts | 81 + .../infra/common/graphql/shared/index.ts | 8 + .../infra/common/graphql/shared/schema.gql.ts | 38 + x-pack/plugins/infra/common/graphql/types.ts | 1109 ++++++++++++ x-pack/plugins/infra/common/http_api/index.ts | 10 + .../common/http_api/inventory_meta_api.ts | 28 + .../common/http_api/log_analysis/index.ts | 8 + .../http_api/log_analysis/results/index.ts | 7 + .../log_analysis/results/log_entry_rate.ts | 77 + .../http_api/log_analysis/validation/index.ts | 7 + .../validation/log_entry_rate_indices.ts | 61 + .../common/http_api/log_entries/common.ts | 12 + .../common/http_api/log_entries/index.ts | 9 + .../infra/common/http_api/log_entries/item.ts | 32 + .../common/http_api/log_entries/summary.ts | 37 + .../log_entries/summary_highlights.ts | 46 + .../infra/common/http_api/metadata_api.ts | 93 + .../common/http_api/metrics_explorer/index.ts | 96 ++ .../infra/common/http_api/node_details_api.ts | 55 + .../infra/common/http_api/shared/errors.ts | 23 + .../infra/common/http_api/shared/index.ts | 9 + .../http_api/shared/metric_statistics.ts | 15 + .../common/http_api/shared/time_range.ts | 14 + .../infra/common/http_api/snapshot_api.ts | 70 + .../common/inventory_models/aws_ec2/index.ts | 30 + .../inventory_models/aws_ec2/layout.tsx | 130 ++ .../inventory_models/aws_ec2/metrics/index.ts | 28 + .../aws_ec2/metrics/snapshot/cpu.ts | 27 + .../metrics/snapshot/disk_io_read_bytes.ts | 15 + .../metrics/snapshot/disk_io_write_bytes.ts | 15 + .../aws_ec2/metrics/snapshot/rx.ts | 15 + .../aws_ec2/metrics/snapshot/tx.ts | 15 + .../metrics/tsvb/aws_ec2_cpu_utilization.ts | 37 + .../metrics/tsvb/aws_ec2_diskio_bytes.ts | 41 + .../metrics/tsvb/aws_ec2_network_traffic.ts | 41 + .../aws_ec2/toolbar_items.tsx | 37 + .../common/inventory_models/aws_rds/index.ts | 35 + .../inventory_models/aws_rds/layout.tsx | 183 ++ .../inventory_models/aws_rds/metrics/index.ts | 38 + .../aws_rds/metrics/snapshot/cpu.ts | 27 + .../snapshot/rds_active_transactions.ts | 15 + .../metrics/snapshot/rds_connections.ts | 15 + .../aws_rds/metrics/snapshot/rds_latency.ts | 15 + .../metrics/snapshot/rds_queries_executed.ts | 15 + .../tsvb/aws_rds_active_transactions.ts | 36 + .../metrics/tsvb/aws_rds_connections.ts | 25 + .../aws_rds/metrics/tsvb/aws_rds_cpu_total.ts | 37 + .../aws_rds/metrics/tsvb/aws_rds_latency.ts | 68 + .../metrics/tsvb/aws_rds_queries_executed.ts | 24 + .../aws_rds/toolbar_items.tsx | 36 + .../common/inventory_models/aws_s3/index.ts | 35 + .../common/inventory_models/aws_s3/layout.tsx | 146 ++ .../inventory_models/aws_s3/metrics/index.ts | 38 + .../aws_s3/metrics/snapshot/s3_bucket_size.ts | 15 + .../metrics/snapshot/s3_download_bytes.ts | 15 + .../metrics/snapshot/s3_number_of_objects.ts | 15 + .../metrics/snapshot/s3_total_requests.ts | 15 + .../metrics/snapshot/s3_upload_bytes.ts | 15 + .../aws_s3/metrics/tsvb/aws_s3_bucket_size.ts | 26 + .../metrics/tsvb/aws_s3_download_bytes.ts | 25 + .../metrics/tsvb/aws_s3_number_of_objects.ts | 26 + .../metrics/tsvb/aws_s3_total_requests.ts | 26 + .../metrics/tsvb/aws_s3_upload_bytes.ts | 26 + .../inventory_models/aws_s3/toolbar_items.tsx | 32 + .../common/inventory_models/aws_sqs/index.ts | 35 + .../inventory_models/aws_sqs/layout.tsx | 146 ++ .../inventory_models/aws_sqs/metrics/index.ts | 38 + .../metrics/snapshot/sqs_messages_delayed.ts | 15 + .../metrics/snapshot/sqs_messages_empty.ts | 15 + .../metrics/snapshot/sqs_messages_sent.ts | 15 + .../metrics/snapshot/sqs_messages_visible.ts | 15 + .../metrics/snapshot/sqs_oldest_message.ts | 15 + .../metrics/tsvb/aws_sqs_messages_delayed.ts | 26 + .../metrics/tsvb/aws_sqs_messages_empty.ts | 26 + .../metrics/tsvb/aws_sqs_messages_sent.ts | 26 + .../metrics/tsvb/aws_sqs_messages_visible.ts | 26 + .../metrics/tsvb/aws_sqs_oldest_message.ts | 26 + .../aws_sqs/toolbar_items.tsx | 32 + .../inventory_models/container/index.ts | 37 + .../inventory_models/container/layout.tsx | 230 +++ .../container/metrics/index.ts | 34 + .../container/metrics/snapshot/cpu.ts | 15 + .../container/metrics/snapshot/memory.ts | 9 + .../container/metrics/snapshot/rx.ts | 12 + .../container/metrics/snapshot/tx.ts | 12 + .../metrics/tsvb/container_cpu_kernel.ts | 33 + .../metrics/tsvb/container_cpu_usage.ts | 33 + .../metrics/tsvb/container_disk_io_bytes.ts | 80 + .../metrics/tsvb/container_diskio_ops.ts | 80 + .../metrics/tsvb/container_memory.ts | 33 + .../metrics/tsvb/container_network_traffic.ts | 56 + .../metrics/tsvb/container_overview.ts | 66 + .../container/toolbar_items.tsx | 34 + .../inventory_models/create_tsvb_model.ts | 24 + .../common/inventory_models/host/index.ts | 47 + .../common/inventory_models/host/layout.tsx | 370 ++++ .../inventory_models/host/metrics/index.ts | 56 + .../host/metrics/snapshot/cpu.ts | 39 + .../host/metrics/snapshot/load.ts | 9 + .../host/metrics/snapshot/log_rate.ts | 32 + .../host/metrics/snapshot/memory.ts | 11 + .../host/metrics/snapshot/rx.ts | 12 + .../host/metrics/snapshot/tx.ts | 12 + .../host/metrics/tsvb/host_cpu_usage.ts | 253 +++ .../host/metrics/tsvb/host_docker_info.ts | 55 + .../host/metrics/tsvb/host_docker_overview.ts | 65 + .../metrics/tsvb/host_docker_top_5_by_cpu.ts | 36 + .../tsvb/host_docker_top_5_by_memory.ts | 36 + .../host/metrics/tsvb/host_filesystem.ts | 37 + .../host/metrics/tsvb/host_k8s_cpu_cap.ts | 57 + .../host/metrics/tsvb/host_k8s_disk_cap.ts | 44 + .../host/metrics/tsvb/host_k8s_memory_cap.ts | 45 + .../host/metrics/tsvb/host_k8s_overview.ts | 154 ++ .../host/metrics/tsvb/host_k8s_pod_cap.ts | 46 + .../host/metrics/tsvb/host_load.ts | 55 + .../host/metrics/tsvb/host_memory_usage.ts | 77 + .../host/metrics/tsvb/host_network_traffic.ts | 92 + .../host/metrics/tsvb/host_system_overview.ts | 145 ++ .../inventory_models/host/toolbar_items.tsx | 35 + .../infra/common/inventory_models/index.ts | 65 + .../common/inventory_models/intl_strings.ts | 70 + .../infra/common/inventory_models/layouts.ts | 51 + .../infra/common/inventory_models/metrics.ts | 27 + .../common/inventory_models/pod/index.ts | 37 + .../common/inventory_models/pod/layout.tsx | 159 ++ .../inventory_models/pod/metrics/index.ts | 30 + .../pod/metrics/snapshot/cpu.ts | 15 + .../pod/metrics/snapshot/memory.ts | 11 + .../pod/metrics/snapshot/rx.ts | 8 + .../pod/metrics/snapshot/tx.ts | 8 + .../pod/metrics/tsvb/pod_cpu_usage.ts | 33 + .../pod/metrics/tsvb/pod_log_usage.ts | 55 + .../pod/metrics/tsvb/pod_memory_usage.ts | 33 + .../pod/metrics/tsvb/pod_network_traffic.ts | 80 + .../pod/metrics/tsvb/pod_overview.ts | 90 + .../inventory_models/pod/toolbar_items.tsx | 27 + .../compontents/cloud_toolbar_items.tsx | 38 + .../metrics_and_groupby_toolbar_items.tsx | 52 + .../inventory_models/shared/layouts/aws.tsx | 237 +++ .../inventory_models/shared/layouts/nginx.tsx | 103 ++ .../inventory_models/shared/metrics/index.ts | 39 + .../shared/metrics/required_metrics.ts | 23 + .../shared/metrics/snapshot/count.ts | 20 + .../metrics/snapshot/network_traffic.ts | 30 + .../network_traffic_with_interfaces.ts | 42 + .../shared/metrics/snapshot/rate.ts | 30 + .../metrics/tsvb/aws_cpu_utilization.ts | 34 + .../shared/metrics/tsvb/aws_diskio_bytes.ts | 83 + .../shared/metrics/tsvb/aws_diskio_ops.ts | 81 + .../shared/metrics/tsvb/aws_network_bytes.ts | 85 + .../metrics/tsvb/aws_network_packets.ts | 51 + .../shared/metrics/tsvb/aws_overview.ts | 64 + .../metrics/tsvb/nginx_active_connections.ts | 33 + .../shared/metrics/tsvb/nginx_hits.ts | 78 + .../shared/metrics/tsvb/nginx_request_rate.ts | 44 + .../tsvb/nginx_requests_per_connection.ts | 48 + .../infra/common/inventory_models/toolbars.ts | 43 + .../infra/common/inventory_models/types.ts | 337 ++++ .../infra/common/log_analysis/index.ts | 8 + .../common/log_analysis/job_parameters.ts | 30 + .../infra/common/log_analysis/log_analysis.ts | 61 + .../plugins/infra/common/log_entry/index.ts | 7 + .../infra/common/log_entry/log_entry.ts | 44 + .../infra/common/log_search_result/index.ts | 12 + .../log_search_result/log_search_result.ts | 30 + .../infra/common/log_search_summary/index.ts | 7 + .../log_search_summary/log_search_summary.ts | 14 + .../infra/common/log_text_scale/index.ts | 7 + .../common/log_text_scale/log_text_scale.ts | 11 + x-pack/plugins/infra/common/runtime_types.ts | 14 + .../common/saved_objects/inventory_view.ts | 87 + .../saved_objects/metrics_explorer_view.ts | 94 + x-pack/plugins/infra/common/time/index.ts | 9 + x-pack/plugins/infra/common/time/time_key.ts | 96 ++ .../plugins/infra/common/time/time_scale.ts | 37 + x-pack/plugins/infra/common/time/time_unit.ts | 41 + x-pack/plugins/infra/common/typed_json.ts | 14 + x-pack/plugins/infra/common/utility_types.ts | 45 + x-pack/plugins/infra/server/features.ts | 65 + x-pack/plugins/infra/server/graphql/index.ts | 19 + .../infra/server/graphql/log_entries/index.ts | 7 + .../server/graphql/log_entries/resolvers.ts | 175 ++ .../server/graphql/log_entries/schema.gql.ts | 136 ++ .../server/graphql/source_status/index.ts | 7 + .../server/graphql/source_status/resolvers.ts | 90 + .../graphql/source_status/schema.gql.ts | 40 + .../infra/server/graphql/sources/index.ts | 8 + .../infra/server/graphql/sources/resolvers.ts | 197 +++ .../server/graphql/sources/schema.gql.ts | 201 +++ x-pack/plugins/infra/server/graphql/types.ts | 1513 +++++++++++++++++ x-pack/plugins/infra/server/infra_server.ts | 52 + x-pack/plugins/infra/server/kibana.index.ts | 46 + .../lib/adapters/fields/adapter_types.ts | 23 + .../fields/framework_fields_adapter.ts | 126 ++ .../infra/server/lib/adapters/fields/index.ts | 7 + .../lib/adapters/framework/adapter_types.ts | 172 ++ .../server/lib/adapters/framework/index.ts | 7 + .../framework/kibana_framework_adapter.ts | 259 +++ .../lib/adapters/log_entries/adapter_types.ts | 5 + .../server/lib/adapters/log_entries/index.ts | 5 + .../log_entries/kibana_log_entries_adapter.ts | 390 +++++ .../lib/adapters/metrics/adapter_types.ts | 125 ++ .../server/lib/adapters/metrics/index.ts | 7 + .../metrics/kibana_metrics_adapter.ts | 154 ++ .../adapters/metrics/lib/check_valid_node.ts | 32 + .../server/lib/adapters/metrics/lib/errors.ts | 15 + .../elasticsearch_source_status_adapter.ts | 71 + .../lib/adapters/source_status/index.ts | 7 + .../infra/server/lib/compose/kibana.ts | 56 + x-pack/plugins/infra/server/lib/constants.ts | 7 + .../infra/server/lib/domains/fields_domain.ts | 40 + .../builtin_rules/filebeat_apache2.test.ts | 263 +++ .../builtin_rules/filebeat_apache2.ts | 100 ++ .../builtin_rules/filebeat_auditd.test.ts | 359 ++++ .../builtin_rules/filebeat_auditd.ts | 119 ++ .../builtin_rules/filebeat_haproxy.test.ts | 791 +++++++++ .../builtin_rules/filebeat_haproxy.ts | 329 ++++ .../builtin_rules/filebeat_icinga.test.ts | 147 ++ .../builtin_rules/filebeat_icinga.ts | 86 + .../builtin_rules/filebeat_iis.test.ts | 562 ++++++ .../builtin_rules/filebeat_iis.ts | 142 ++ .../builtin_rules/filebeat_kafka.test.ts | 60 + .../builtin_rules/filebeat_logstash.test.ts | 206 +++ .../builtin_rules/filebeat_logstash.ts | 80 + .../builtin_rules/filebeat_mongodb.test.ts | 52 + .../builtin_rules/filebeat_mongodb.ts | 28 + .../builtin_rules/filebeat_mysql.test.ts | 219 +++ .../builtin_rules/filebeat_mysql.ts | 104 ++ .../builtin_rules/filebeat_nginx.test.ts | 264 +++ .../builtin_rules/filebeat_nginx.ts | 106 ++ .../builtin_rules/filebeat_osquery.test.ts | 77 + .../builtin_rules/filebeat_osquery.ts | 34 + .../builtin_rules/filebeat_redis.ts | 30 + .../builtin_rules/filebeat_system.ts | 90 + .../builtin_rules/filebeat_traefik.test.ts | 124 ++ .../builtin_rules/filebeat_traefik.ts | 64 + .../builtin_rules/generic.test.ts | 168 ++ .../builtin_rules/generic.ts | 101 ++ .../builtin_rules/generic_webserver.ts | 116 ++ .../builtin_rules/helpers.ts | 12 + .../log_entries_domain/builtin_rules/index.ts | 63 + ...document_source_to_log_item_fields.test.ts | 70 + ...vert_document_source_to_log_item_fields.ts | 38 + .../lib/domains/log_entries_domain/index.ts | 7 + .../log_entries_domain/log_entries_domain.ts | 436 +++++ .../lib/domains/log_entries_domain/message.ts | 190 +++ .../domains/log_entries_domain/rule_types.ts | 36 + .../server/lib/domains/metrics_domain.ts | 25 + .../plugins/infra/server/lib/infra_types.ts | 49 + .../infra/server/lib/log_analysis/errors.ts | 12 + .../infra/server/lib/log_analysis/index.ts | 8 + .../server/lib/log_analysis/log_analysis.ts | 143 ++ .../server/lib/log_analysis/queries/index.ts | 7 + .../log_analysis/queries/log_entry_rate.ts | 182 ++ .../infra/server/lib/snapshot/constants.ts | 9 + .../create_timerange_with_interval.ts | 59 + .../infra/server/lib/snapshot/index.ts | 7 + .../server/lib/snapshot/query_helpers.ts | 73 + .../lib/snapshot/response_helpers.test.ts | 78 + .../server/lib/snapshot/response_helpers.ts | 173 ++ .../infra/server/lib/snapshot/snapshot.ts | 237 +++ .../infra/server/lib/snapshot/types.ts | 25 + .../plugins/infra/server/lib/source_status.ts | 106 ++ .../infra/server/lib/sources/defaults.ts | 40 + .../infra/server/lib/sources/errors.ts | 12 + .../plugins/infra/server/lib/sources/index.ts | 10 + .../lib/sources/saved_object_mappings.ts | 79 + .../infra/server/lib/sources/sources.test.ts | 153 ++ .../infra/server/lib/sources/sources.ts | 247 +++ .../plugins/infra/server/lib/sources/types.ts | 149 ++ .../infra/server/new_platform_index.ts | 15 + .../infra/server/new_platform_plugin.ts | 135 ++ .../server/routes/inventory_metadata/index.ts | 62 + .../lib/get_cloud_metadata.ts | 108 ++ .../infra/server/routes/ip_to_hostname.ts | 65 + .../infra/server/routes/log_analysis/index.ts | 8 + .../routes/log_analysis/results/index.ts | 7 + .../log_analysis/results/log_entry_rate.ts | 83 + .../routes/log_analysis/validation/index.ts | 7 + .../routes/log_analysis/validation/indices.ts | 93 + .../infra/server/routes/log_entries/index.ts | 9 + .../infra/server/routes/log_entries/item.ts | 55 + .../server/routes/log_entries/summary.ts | 66 + .../routes/log_entries/summary_highlights.ts | 70 + .../infra/server/routes/metadata/index.ts | 95 ++ .../metadata/lib/get_cloud_metric_metadata.ts | 62 + .../metadata/lib/get_metric_metadata.ts | 83 + .../routes/metadata/lib/get_node_info.ts | 82 + .../routes/metadata/lib/get_pod_node_name.ts | 47 + .../routes/metadata/lib/has_apm_data.ts | 56 + .../routes/metadata/lib/pick_feature_name.ts | 16 + .../server/routes/metrics_explorer/index.ts | 62 + .../lib/create_metrics_model.ts | 67 + .../metrics_explorer/lib/get_groupings.ts | 120 ++ .../lib/populate_series_with_tsvb_data.ts | 134 ++ .../server/routes/metrics_explorer/types.ts | 36 + .../infra/server/routes/node_details/index.ts | 66 + .../infra/server/routes/snapshot/index.ts | 72 + x-pack/plugins/infra/server/saved_objects.ts | 15 + .../infra/server/usage/usage_collector.ts | 119 ++ x-pack/plugins/infra/server/utils/README.md | 1 + .../server/utils/calculate_metric_interval.ts | 105 ++ .../server/utils/create_afterkey_handler.ts | 21 + .../server/utils/get_all_composite_data.ts | 56 + .../server/utils/get_interval_in_seconds.ts | 31 + .../infra/server/utils/serialized_query.ts | 34 + .../utils/typed_elasticsearch_mappings.ts | 48 + .../infra/server/utils/typed_resolvers.ts | 97 ++ 316 files changed, 23699 insertions(+) create mode 100644 x-pack/plugins/infra/common/color_palette.test.ts create mode 100644 x-pack/plugins/infra/common/color_palette.ts create mode 100644 x-pack/plugins/infra/common/ecs_allowed_list.test.ts create mode 100644 x-pack/plugins/infra/common/ecs_allowed_list.ts create mode 100644 x-pack/plugins/infra/common/errors/index.ts create mode 100644 x-pack/plugins/infra/common/errors/metrics.ts create mode 100644 x-pack/plugins/infra/common/graphql/root/index.ts create mode 100644 x-pack/plugins/infra/common/graphql/root/schema.gql.ts create mode 100644 x-pack/plugins/infra/common/graphql/shared/fragments.gql_query.ts create mode 100644 x-pack/plugins/infra/common/graphql/shared/index.ts create mode 100644 x-pack/plugins/infra/common/graphql/shared/schema.gql.ts create mode 100644 x-pack/plugins/infra/common/graphql/types.ts create mode 100644 x-pack/plugins/infra/common/http_api/index.ts create mode 100644 x-pack/plugins/infra/common/http_api/inventory_meta_api.ts create mode 100644 x-pack/plugins/infra/common/http_api/log_analysis/index.ts create mode 100644 x-pack/plugins/infra/common/http_api/log_analysis/results/index.ts create mode 100644 x-pack/plugins/infra/common/http_api/log_analysis/results/log_entry_rate.ts create mode 100644 x-pack/plugins/infra/common/http_api/log_analysis/validation/index.ts create mode 100644 x-pack/plugins/infra/common/http_api/log_analysis/validation/log_entry_rate_indices.ts create mode 100644 x-pack/plugins/infra/common/http_api/log_entries/common.ts create mode 100644 x-pack/plugins/infra/common/http_api/log_entries/index.ts create mode 100644 x-pack/plugins/infra/common/http_api/log_entries/item.ts create mode 100644 x-pack/plugins/infra/common/http_api/log_entries/summary.ts create mode 100644 x-pack/plugins/infra/common/http_api/log_entries/summary_highlights.ts create mode 100644 x-pack/plugins/infra/common/http_api/metadata_api.ts create mode 100644 x-pack/plugins/infra/common/http_api/metrics_explorer/index.ts create mode 100644 x-pack/plugins/infra/common/http_api/node_details_api.ts create mode 100644 x-pack/plugins/infra/common/http_api/shared/errors.ts create mode 100644 x-pack/plugins/infra/common/http_api/shared/index.ts create mode 100644 x-pack/plugins/infra/common/http_api/shared/metric_statistics.ts create mode 100644 x-pack/plugins/infra/common/http_api/shared/time_range.ts create mode 100644 x-pack/plugins/infra/common/http_api/snapshot_api.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_ec2/index.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_ec2/layout.tsx create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/index.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/snapshot/cpu.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/snapshot/disk_io_read_bytes.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/snapshot/disk_io_write_bytes.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/snapshot/rx.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/snapshot/tx.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/tsvb/aws_ec2_cpu_utilization.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/tsvb/aws_ec2_diskio_bytes.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/tsvb/aws_ec2_network_traffic.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_ec2/toolbar_items.tsx create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_rds/index.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_rds/layout.tsx create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/index.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/snapshot/cpu.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/snapshot/rds_active_transactions.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/snapshot/rds_connections.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/snapshot/rds_latency.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/snapshot/rds_queries_executed.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/tsvb/aws_rds_active_transactions.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/tsvb/aws_rds_connections.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/tsvb/aws_rds_cpu_total.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/tsvb/aws_rds_latency.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/tsvb/aws_rds_queries_executed.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_rds/toolbar_items.tsx create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_s3/index.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_s3/layout.tsx create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/index.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/snapshot/s3_bucket_size.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/snapshot/s3_download_bytes.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/snapshot/s3_number_of_objects.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/snapshot/s3_total_requests.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/snapshot/s3_upload_bytes.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/tsvb/aws_s3_bucket_size.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/tsvb/aws_s3_download_bytes.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/tsvb/aws_s3_number_of_objects.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/tsvb/aws_s3_total_requests.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/tsvb/aws_s3_upload_bytes.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_s3/toolbar_items.tsx create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_sqs/index.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_sqs/layout.tsx create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/index.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/snapshot/sqs_messages_delayed.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/snapshot/sqs_messages_empty.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/snapshot/sqs_messages_sent.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/snapshot/sqs_messages_visible.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/snapshot/sqs_oldest_message.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/tsvb/aws_sqs_messages_delayed.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/tsvb/aws_sqs_messages_empty.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/tsvb/aws_sqs_messages_sent.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/tsvb/aws_sqs_messages_visible.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/tsvb/aws_sqs_oldest_message.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/aws_sqs/toolbar_items.tsx create mode 100644 x-pack/plugins/infra/common/inventory_models/container/index.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/container/layout.tsx create mode 100644 x-pack/plugins/infra/common/inventory_models/container/metrics/index.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/container/metrics/snapshot/cpu.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/container/metrics/snapshot/memory.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/container/metrics/snapshot/rx.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/container/metrics/snapshot/tx.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_cpu_kernel.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_cpu_usage.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_disk_io_bytes.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_diskio_ops.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_memory.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_network_traffic.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_overview.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/container/toolbar_items.tsx create mode 100644 x-pack/plugins/infra/common/inventory_models/create_tsvb_model.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/index.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/layout.tsx create mode 100644 x-pack/plugins/infra/common/inventory_models/host/metrics/index.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/cpu.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/load.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/log_rate.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/memory.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/rx.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/tx.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_cpu_usage.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_docker_info.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_docker_overview.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_docker_top_5_by_cpu.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_docker_top_5_by_memory.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_filesystem.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_k8s_cpu_cap.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_k8s_disk_cap.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_k8s_memory_cap.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_k8s_overview.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_k8s_pod_cap.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_load.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_memory_usage.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_network_traffic.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_system_overview.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/host/toolbar_items.tsx create mode 100644 x-pack/plugins/infra/common/inventory_models/index.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/intl_strings.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/layouts.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/metrics.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/pod/index.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/pod/layout.tsx create mode 100644 x-pack/plugins/infra/common/inventory_models/pod/metrics/index.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/pod/metrics/snapshot/cpu.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/pod/metrics/snapshot/memory.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/pod/metrics/snapshot/rx.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/pod/metrics/snapshot/tx.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/pod/metrics/tsvb/pod_cpu_usage.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/pod/metrics/tsvb/pod_log_usage.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/pod/metrics/tsvb/pod_memory_usage.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/pod/metrics/tsvb/pod_network_traffic.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/pod/metrics/tsvb/pod_overview.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/pod/toolbar_items.tsx create mode 100644 x-pack/plugins/infra/common/inventory_models/shared/compontents/cloud_toolbar_items.tsx create mode 100644 x-pack/plugins/infra/common/inventory_models/shared/compontents/metrics_and_groupby_toolbar_items.tsx create mode 100644 x-pack/plugins/infra/common/inventory_models/shared/layouts/aws.tsx create mode 100644 x-pack/plugins/infra/common/inventory_models/shared/layouts/nginx.tsx create mode 100644 x-pack/plugins/infra/common/inventory_models/shared/metrics/index.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/shared/metrics/required_metrics.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/shared/metrics/snapshot/count.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/shared/metrics/snapshot/network_traffic.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/shared/metrics/snapshot/network_traffic_with_interfaces.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/shared/metrics/snapshot/rate.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_cpu_utilization.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_diskio_bytes.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_diskio_ops.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_network_bytes.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_network_packets.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_overview.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/nginx_active_connections.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/nginx_hits.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/nginx_request_rate.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/nginx_requests_per_connection.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/toolbars.ts create mode 100644 x-pack/plugins/infra/common/inventory_models/types.ts create mode 100644 x-pack/plugins/infra/common/log_analysis/index.ts create mode 100644 x-pack/plugins/infra/common/log_analysis/job_parameters.ts create mode 100644 x-pack/plugins/infra/common/log_analysis/log_analysis.ts create mode 100644 x-pack/plugins/infra/common/log_entry/index.ts create mode 100644 x-pack/plugins/infra/common/log_entry/log_entry.ts create mode 100644 x-pack/plugins/infra/common/log_search_result/index.ts create mode 100644 x-pack/plugins/infra/common/log_search_result/log_search_result.ts create mode 100644 x-pack/plugins/infra/common/log_search_summary/index.ts create mode 100644 x-pack/plugins/infra/common/log_search_summary/log_search_summary.ts create mode 100644 x-pack/plugins/infra/common/log_text_scale/index.ts create mode 100644 x-pack/plugins/infra/common/log_text_scale/log_text_scale.ts create mode 100644 x-pack/plugins/infra/common/runtime_types.ts create mode 100644 x-pack/plugins/infra/common/saved_objects/inventory_view.ts create mode 100644 x-pack/plugins/infra/common/saved_objects/metrics_explorer_view.ts create mode 100644 x-pack/plugins/infra/common/time/index.ts create mode 100644 x-pack/plugins/infra/common/time/time_key.ts create mode 100644 x-pack/plugins/infra/common/time/time_scale.ts create mode 100644 x-pack/plugins/infra/common/time/time_unit.ts create mode 100644 x-pack/plugins/infra/common/typed_json.ts create mode 100644 x-pack/plugins/infra/common/utility_types.ts create mode 100644 x-pack/plugins/infra/server/features.ts create mode 100644 x-pack/plugins/infra/server/graphql/index.ts create mode 100644 x-pack/plugins/infra/server/graphql/log_entries/index.ts create mode 100644 x-pack/plugins/infra/server/graphql/log_entries/resolvers.ts create mode 100644 x-pack/plugins/infra/server/graphql/log_entries/schema.gql.ts create mode 100644 x-pack/plugins/infra/server/graphql/source_status/index.ts create mode 100644 x-pack/plugins/infra/server/graphql/source_status/resolvers.ts create mode 100644 x-pack/plugins/infra/server/graphql/source_status/schema.gql.ts create mode 100644 x-pack/plugins/infra/server/graphql/sources/index.ts create mode 100644 x-pack/plugins/infra/server/graphql/sources/resolvers.ts create mode 100644 x-pack/plugins/infra/server/graphql/sources/schema.gql.ts create mode 100644 x-pack/plugins/infra/server/graphql/types.ts create mode 100644 x-pack/plugins/infra/server/infra_server.ts create mode 100644 x-pack/plugins/infra/server/kibana.index.ts create mode 100644 x-pack/plugins/infra/server/lib/adapters/fields/adapter_types.ts create mode 100644 x-pack/plugins/infra/server/lib/adapters/fields/framework_fields_adapter.ts create mode 100644 x-pack/plugins/infra/server/lib/adapters/fields/index.ts create mode 100644 x-pack/plugins/infra/server/lib/adapters/framework/adapter_types.ts create mode 100644 x-pack/plugins/infra/server/lib/adapters/framework/index.ts create mode 100644 x-pack/plugins/infra/server/lib/adapters/framework/kibana_framework_adapter.ts create mode 100644 x-pack/plugins/infra/server/lib/adapters/log_entries/adapter_types.ts create mode 100644 x-pack/plugins/infra/server/lib/adapters/log_entries/index.ts create mode 100644 x-pack/plugins/infra/server/lib/adapters/log_entries/kibana_log_entries_adapter.ts create mode 100644 x-pack/plugins/infra/server/lib/adapters/metrics/adapter_types.ts create mode 100644 x-pack/plugins/infra/server/lib/adapters/metrics/index.ts create mode 100644 x-pack/plugins/infra/server/lib/adapters/metrics/kibana_metrics_adapter.ts create mode 100644 x-pack/plugins/infra/server/lib/adapters/metrics/lib/check_valid_node.ts create mode 100644 x-pack/plugins/infra/server/lib/adapters/metrics/lib/errors.ts create mode 100644 x-pack/plugins/infra/server/lib/adapters/source_status/elasticsearch_source_status_adapter.ts create mode 100644 x-pack/plugins/infra/server/lib/adapters/source_status/index.ts create mode 100644 x-pack/plugins/infra/server/lib/compose/kibana.ts create mode 100644 x-pack/plugins/infra/server/lib/constants.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/fields_domain.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_apache2.test.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_apache2.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_auditd.test.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_auditd.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_haproxy.test.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_haproxy.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_icinga.test.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_icinga.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_iis.test.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_iis.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_kafka.test.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_logstash.test.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_logstash.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mongodb.test.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mongodb.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mysql.test.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mysql.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_nginx.test.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_nginx.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_osquery.test.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_osquery.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_redis.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_system.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_traefik.test.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_traefik.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic.test.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic_webserver.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/helpers.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/index.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/convert_document_source_to_log_item_fields.test.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/convert_document_source_to_log_item_fields.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/index.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/log_entries_domain.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/message.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/log_entries_domain/rule_types.ts create mode 100644 x-pack/plugins/infra/server/lib/domains/metrics_domain.ts create mode 100644 x-pack/plugins/infra/server/lib/infra_types.ts create mode 100644 x-pack/plugins/infra/server/lib/log_analysis/errors.ts create mode 100644 x-pack/plugins/infra/server/lib/log_analysis/index.ts create mode 100644 x-pack/plugins/infra/server/lib/log_analysis/log_analysis.ts create mode 100644 x-pack/plugins/infra/server/lib/log_analysis/queries/index.ts create mode 100644 x-pack/plugins/infra/server/lib/log_analysis/queries/log_entry_rate.ts create mode 100644 x-pack/plugins/infra/server/lib/snapshot/constants.ts create mode 100644 x-pack/plugins/infra/server/lib/snapshot/create_timerange_with_interval.ts create mode 100644 x-pack/plugins/infra/server/lib/snapshot/index.ts create mode 100644 x-pack/plugins/infra/server/lib/snapshot/query_helpers.ts create mode 100644 x-pack/plugins/infra/server/lib/snapshot/response_helpers.test.ts create mode 100644 x-pack/plugins/infra/server/lib/snapshot/response_helpers.ts create mode 100644 x-pack/plugins/infra/server/lib/snapshot/snapshot.ts create mode 100644 x-pack/plugins/infra/server/lib/snapshot/types.ts create mode 100644 x-pack/plugins/infra/server/lib/source_status.ts create mode 100644 x-pack/plugins/infra/server/lib/sources/defaults.ts create mode 100644 x-pack/plugins/infra/server/lib/sources/errors.ts create mode 100644 x-pack/plugins/infra/server/lib/sources/index.ts create mode 100644 x-pack/plugins/infra/server/lib/sources/saved_object_mappings.ts create mode 100644 x-pack/plugins/infra/server/lib/sources/sources.test.ts create mode 100644 x-pack/plugins/infra/server/lib/sources/sources.ts create mode 100644 x-pack/plugins/infra/server/lib/sources/types.ts create mode 100644 x-pack/plugins/infra/server/new_platform_index.ts create mode 100644 x-pack/plugins/infra/server/new_platform_plugin.ts create mode 100644 x-pack/plugins/infra/server/routes/inventory_metadata/index.ts create mode 100644 x-pack/plugins/infra/server/routes/inventory_metadata/lib/get_cloud_metadata.ts create mode 100644 x-pack/plugins/infra/server/routes/ip_to_hostname.ts create mode 100644 x-pack/plugins/infra/server/routes/log_analysis/index.ts create mode 100644 x-pack/plugins/infra/server/routes/log_analysis/results/index.ts create mode 100644 x-pack/plugins/infra/server/routes/log_analysis/results/log_entry_rate.ts create mode 100644 x-pack/plugins/infra/server/routes/log_analysis/validation/index.ts create mode 100644 x-pack/plugins/infra/server/routes/log_analysis/validation/indices.ts create mode 100644 x-pack/plugins/infra/server/routes/log_entries/index.ts create mode 100644 x-pack/plugins/infra/server/routes/log_entries/item.ts create mode 100644 x-pack/plugins/infra/server/routes/log_entries/summary.ts create mode 100644 x-pack/plugins/infra/server/routes/log_entries/summary_highlights.ts create mode 100644 x-pack/plugins/infra/server/routes/metadata/index.ts create mode 100644 x-pack/plugins/infra/server/routes/metadata/lib/get_cloud_metric_metadata.ts create mode 100644 x-pack/plugins/infra/server/routes/metadata/lib/get_metric_metadata.ts create mode 100644 x-pack/plugins/infra/server/routes/metadata/lib/get_node_info.ts create mode 100644 x-pack/plugins/infra/server/routes/metadata/lib/get_pod_node_name.ts create mode 100644 x-pack/plugins/infra/server/routes/metadata/lib/has_apm_data.ts create mode 100644 x-pack/plugins/infra/server/routes/metadata/lib/pick_feature_name.ts create mode 100644 x-pack/plugins/infra/server/routes/metrics_explorer/index.ts create mode 100644 x-pack/plugins/infra/server/routes/metrics_explorer/lib/create_metrics_model.ts create mode 100644 x-pack/plugins/infra/server/routes/metrics_explorer/lib/get_groupings.ts create mode 100644 x-pack/plugins/infra/server/routes/metrics_explorer/lib/populate_series_with_tsvb_data.ts create mode 100644 x-pack/plugins/infra/server/routes/metrics_explorer/types.ts create mode 100644 x-pack/plugins/infra/server/routes/node_details/index.ts create mode 100644 x-pack/plugins/infra/server/routes/snapshot/index.ts create mode 100644 x-pack/plugins/infra/server/saved_objects.ts create mode 100644 x-pack/plugins/infra/server/usage/usage_collector.ts create mode 100644 x-pack/plugins/infra/server/utils/README.md create mode 100644 x-pack/plugins/infra/server/utils/calculate_metric_interval.ts create mode 100644 x-pack/plugins/infra/server/utils/create_afterkey_handler.ts create mode 100644 x-pack/plugins/infra/server/utils/get_all_composite_data.ts create mode 100644 x-pack/plugins/infra/server/utils/get_interval_in_seconds.ts create mode 100644 x-pack/plugins/infra/server/utils/serialized_query.ts create mode 100644 x-pack/plugins/infra/server/utils/typed_elasticsearch_mappings.ts create mode 100644 x-pack/plugins/infra/server/utils/typed_resolvers.ts diff --git a/x-pack/plugins/infra/common/color_palette.test.ts b/x-pack/plugins/infra/common/color_palette.test.ts new file mode 100644 index 0000000000000..ce0219862480d --- /dev/null +++ b/x-pack/plugins/infra/common/color_palette.test.ts @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { sampleColor, MetricsExplorerColor, colorTransformer } from './color_palette'; +describe('Color Palette', () => { + describe('sampleColor()', () => { + it('should just work', () => { + const usedColors = [MetricsExplorerColor.color0]; + const color = sampleColor(usedColors); + expect(color).toBe(MetricsExplorerColor.color1); + }); + + it('should return color0 when nothing is available', () => { + const usedColors = [ + MetricsExplorerColor.color0, + MetricsExplorerColor.color1, + MetricsExplorerColor.color2, + MetricsExplorerColor.color3, + MetricsExplorerColor.color4, + MetricsExplorerColor.color5, + MetricsExplorerColor.color6, + MetricsExplorerColor.color7, + MetricsExplorerColor.color8, + MetricsExplorerColor.color9, + ]; + const color = sampleColor(usedColors); + expect(color).toBe(MetricsExplorerColor.color0); + }); + }); + describe('colorTransformer()', () => { + it('should just work', () => { + expect(colorTransformer(MetricsExplorerColor.color0)).toBe('#3185FC'); + }); + }); +}); diff --git a/x-pack/plugins/infra/common/color_palette.ts b/x-pack/plugins/infra/common/color_palette.ts new file mode 100644 index 0000000000000..c43c17b9b0ef3 --- /dev/null +++ b/x-pack/plugins/infra/common/color_palette.ts @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import { difference, first, values } from 'lodash'; + +export enum MetricsExplorerColor { + color0 = 'color0', + color1 = 'color1', + color2 = 'color2', + color3 = 'color3', + color4 = 'color4', + color5 = 'color5', + color6 = 'color6', + color7 = 'color7', + color8 = 'color8', + color9 = 'color9', +} + +export interface MetricsExplorerPalette { + [MetricsExplorerColor.color0]: string; + [MetricsExplorerColor.color1]: string; + [MetricsExplorerColor.color2]: string; + [MetricsExplorerColor.color3]: string; + [MetricsExplorerColor.color4]: string; + [MetricsExplorerColor.color5]: string; + [MetricsExplorerColor.color6]: string; + [MetricsExplorerColor.color7]: string; + [MetricsExplorerColor.color8]: string; + [MetricsExplorerColor.color9]: string; +} + +export const defaultPalette: MetricsExplorerPalette = { + [MetricsExplorerColor.color0]: '#3185FC', // euiColorVis1 (blue) + [MetricsExplorerColor.color1]: '#DB1374', // euiColorVis2 (red-ish) + [MetricsExplorerColor.color2]: '#00B3A4', // euiColorVis0 (green-ish) + [MetricsExplorerColor.color3]: '#490092', // euiColorVis3 (purple) + [MetricsExplorerColor.color4]: '#FEB6DB', // euiColorVis4 (pink) + [MetricsExplorerColor.color5]: '#E6C220', // euiColorVis5 (yellow) + [MetricsExplorerColor.color6]: '#BFA180', // euiColorVis6 (tan) + [MetricsExplorerColor.color7]: '#F98510', // euiColorVis7 (orange) + [MetricsExplorerColor.color8]: '#461A0A', // euiColorVis8 (brown) + [MetricsExplorerColor.color9]: '#920000', // euiColorVis9 (maroon) +}; + +export const createPaletteTransformer = (palette: MetricsExplorerPalette) => ( + color: MetricsExplorerColor +) => palette[color]; + +export const colorTransformer = createPaletteTransformer(defaultPalette); + +export const sampleColor = (usedColors: MetricsExplorerColor[] = []): MetricsExplorerColor => { + const available = difference(values(MetricsExplorerColor) as MetricsExplorerColor[], usedColors); + return first(available) || MetricsExplorerColor.color0; +}; diff --git a/x-pack/plugins/infra/common/ecs_allowed_list.test.ts b/x-pack/plugins/infra/common/ecs_allowed_list.test.ts new file mode 100644 index 0000000000000..66ed681255d34 --- /dev/null +++ b/x-pack/plugins/infra/common/ecs_allowed_list.test.ts @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { + getAllowedListForPrefix, + ECS_ALLOWED_LIST, + K8S_ALLOWED_LIST, + PROMETHEUS_ALLOWED_LIST, + DOCKER_ALLOWED_LIST, +} from './ecs_allowed_list'; +describe('getAllowedListForPrefix()', () => { + test('kubernetes', () => { + expect(getAllowedListForPrefix('kubernetes.pod')).toEqual([ + ...ECS_ALLOWED_LIST, + 'kubernetes.pod', + ...K8S_ALLOWED_LIST, + ]); + }); + test('docker', () => { + expect(getAllowedListForPrefix('docker.container')).toEqual([ + ...ECS_ALLOWED_LIST, + 'docker.container', + ...DOCKER_ALLOWED_LIST, + ]); + }); + test('prometheus', () => { + expect(getAllowedListForPrefix('prometheus.metrics')).toEqual([ + ...ECS_ALLOWED_LIST, + 'prometheus.metrics', + ...PROMETHEUS_ALLOWED_LIST, + ]); + }); + test('anything.else', () => { + expect(getAllowedListForPrefix('anything.else')).toEqual([ + ...ECS_ALLOWED_LIST, + 'anything.else', + ]); + }); +}); diff --git a/x-pack/plugins/infra/common/ecs_allowed_list.ts b/x-pack/plugins/infra/common/ecs_allowed_list.ts new file mode 100644 index 0000000000000..1728cd1fa4b45 --- /dev/null +++ b/x-pack/plugins/infra/common/ecs_allowed_list.ts @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import { first } from 'lodash'; + +export const ECS_ALLOWED_LIST = [ + 'host', + 'cloud', + 'event', + 'agent', + 'fields', + 'service', + 'ecs', + 'metricset', + 'tags', + 'message', + 'labels', + '@timestamp', + 'source', + 'container', +]; + +export const K8S_ALLOWED_LIST = [ + 'kubernetes.pod.name', + 'kubernetes.pod.uid', + 'kubernetes.namespace', + 'kubernetes.node.name', + 'kubernetes.labels', + 'kubernetes.annotations', + 'kubernetes.replicaset.name', + 'kubernetes.deployment.name', + 'kubernetes.statefulset.name', + 'kubernetes.container.name', + 'kubernetes.container.image', +]; + +export const PROMETHEUS_ALLOWED_LIST = ['prometheus.labels', 'prometheus.metrics']; + +export const DOCKER_ALLOWED_LIST = [ + 'docker.container.id', + 'docker.container.image', + 'docker.container.name', + 'docker.container.labels', +]; + +export const AWS_S3_ALLOWED_LIST = ['aws.s3']; + +export const getAllowedListForPrefix = (prefix: string) => { + const firstPart = first(prefix.split(/\./)); + const defaultAllowedList = prefix ? [...ECS_ALLOWED_LIST, prefix] : ECS_ALLOWED_LIST; + switch (firstPart) { + case 'docker': + return [...defaultAllowedList, ...DOCKER_ALLOWED_LIST]; + case 'prometheus': + return [...defaultAllowedList, ...PROMETHEUS_ALLOWED_LIST]; + case 'kubernetes': + return [...defaultAllowedList, ...K8S_ALLOWED_LIST]; + case 'aws': + if (prefix === 'aws.s3_daily_storage') { + return [...defaultAllowedList, ...AWS_S3_ALLOWED_LIST]; + } + default: + return defaultAllowedList; + } +}; diff --git a/x-pack/plugins/infra/common/errors/index.ts b/x-pack/plugins/infra/common/errors/index.ts new file mode 100644 index 0000000000000..88b76eb0ef775 --- /dev/null +++ b/x-pack/plugins/infra/common/errors/index.ts @@ -0,0 +1,7 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export * from './metrics'; diff --git a/x-pack/plugins/infra/common/errors/metrics.ts b/x-pack/plugins/infra/common/errors/metrics.ts new file mode 100644 index 0000000000000..2acf2b741cec9 --- /dev/null +++ b/x-pack/plugins/infra/common/errors/metrics.ts @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export enum InfraMetricsErrorCodes { + // eslint-disable-next-line @typescript-eslint/camelcase + invalid_node = 'METRICS_INVALID_NODE', +} diff --git a/x-pack/plugins/infra/common/graphql/root/index.ts b/x-pack/plugins/infra/common/graphql/root/index.ts new file mode 100644 index 0000000000000..47417b6376307 --- /dev/null +++ b/x-pack/plugins/infra/common/graphql/root/index.ts @@ -0,0 +1,7 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export { rootSchema } from './schema.gql'; diff --git a/x-pack/plugins/infra/common/graphql/root/schema.gql.ts b/x-pack/plugins/infra/common/graphql/root/schema.gql.ts new file mode 100644 index 0000000000000..1665334827e8e --- /dev/null +++ b/x-pack/plugins/infra/common/graphql/root/schema.gql.ts @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import gql from 'graphql-tag'; + +export const rootSchema = gql` + schema { + query: Query + mutation: Mutation + } + + type Query + + type Mutation +`; diff --git a/x-pack/plugins/infra/common/graphql/shared/fragments.gql_query.ts b/x-pack/plugins/infra/common/graphql/shared/fragments.gql_query.ts new file mode 100644 index 0000000000000..c324813b65efb --- /dev/null +++ b/x-pack/plugins/infra/common/graphql/shared/fragments.gql_query.ts @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import gql from 'graphql-tag'; + +export const sharedFragments = { + InfraTimeKey: gql` + fragment InfraTimeKeyFields on InfraTimeKey { + time + tiebreaker + } + `, + InfraSourceFields: gql` + fragment InfraSourceFields on InfraSource { + id + version + updatedAt + origin + } + `, + InfraLogEntryFields: gql` + fragment InfraLogEntryFields on InfraLogEntry { + gid + key { + time + tiebreaker + } + columns { + ... on InfraLogEntryTimestampColumn { + columnId + timestamp + } + ... on InfraLogEntryMessageColumn { + columnId + message { + ... on InfraLogMessageFieldSegment { + field + value + } + ... on InfraLogMessageConstantSegment { + constant + } + } + } + ... on InfraLogEntryFieldColumn { + columnId + field + value + } + } + } + `, + InfraLogEntryHighlightFields: gql` + fragment InfraLogEntryHighlightFields on InfraLogEntry { + gid + key { + time + tiebreaker + } + columns { + ... on InfraLogEntryMessageColumn { + columnId + message { + ... on InfraLogMessageFieldSegment { + field + highlights + } + } + } + ... on InfraLogEntryFieldColumn { + columnId + field + highlights + } + } + } + `, +}; diff --git a/x-pack/plugins/infra/common/graphql/shared/index.ts b/x-pack/plugins/infra/common/graphql/shared/index.ts new file mode 100644 index 0000000000000..56c8675e76caf --- /dev/null +++ b/x-pack/plugins/infra/common/graphql/shared/index.ts @@ -0,0 +1,8 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export { sharedFragments } from './fragments.gql_query'; +export { sharedSchema } from './schema.gql'; diff --git a/x-pack/plugins/infra/common/graphql/shared/schema.gql.ts b/x-pack/plugins/infra/common/graphql/shared/schema.gql.ts new file mode 100644 index 0000000000000..071313817eff3 --- /dev/null +++ b/x-pack/plugins/infra/common/graphql/shared/schema.gql.ts @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import gql from 'graphql-tag'; + +export const sharedSchema = gql` + "A representation of the log entry's position in the event stream" + type InfraTimeKey { + "The timestamp of the event that the log entry corresponds to" + time: Float! + "The tiebreaker that disambiguates events with the same timestamp" + tiebreaker: Float! + } + + input InfraTimeKeyInput { + time: Float! + tiebreaker: Float! + } + + enum InfraIndexType { + ANY + LOGS + METRICS + } + + enum InfraNodeType { + pod + container + host + awsEC2 + awsS3 + awsRDS + awsSQS + } +`; diff --git a/x-pack/plugins/infra/common/graphql/types.ts b/x-pack/plugins/infra/common/graphql/types.ts new file mode 100644 index 0000000000000..bb089bf8bf8ad --- /dev/null +++ b/x-pack/plugins/infra/common/graphql/types.ts @@ -0,0 +1,1109 @@ +/* tslint:disable */ + +// ==================================================== +// START: Typescript template +// ==================================================== + +// ==================================================== +// Types +// ==================================================== + +export interface Query { + /** Get an infrastructure data source by id.The resolution order for the source configuration attributes is as followswith the first defined value winning:1. The attributes of the saved object with the given 'id'.2. The attributes defined in the static Kibana configuration key'xpack.infra.sources.default'.3. The hard-coded default values.As a consequence, querying a source that doesn't exist doesn't error out,but returns the configured or hardcoded defaults. */ + source: InfraSource; + /** Get a list of all infrastructure data sources */ + allSources: InfraSource[]; +} +/** A source of infrastructure data */ +export interface InfraSource { + /** The id of the source */ + id: string; + /** The version number the source configuration was last persisted with */ + version?: string | null; + /** The timestamp the source configuration was last persisted at */ + updatedAt?: number | null; + /** The origin of the source (one of 'fallback', 'internal', 'stored') */ + origin: string; + /** The raw configuration of the source */ + configuration: InfraSourceConfiguration; + /** The status of the source */ + status: InfraSourceStatus; + /** A consecutive span of log entries surrounding a point in time */ + logEntriesAround: InfraLogEntryInterval; + /** A consecutive span of log entries within an interval */ + logEntriesBetween: InfraLogEntryInterval; + /** Sequences of log entries matching sets of highlighting queries within an interval */ + logEntryHighlights: InfraLogEntryInterval[]; + + /** A snapshot of nodes */ + snapshot?: InfraSnapshotResponse | null; + + metrics: InfraMetricData[]; +} +/** A set of configuration options for an infrastructure data source */ +export interface InfraSourceConfiguration { + /** The name of the data source */ + name: string; + /** A description of the data source */ + description: string; + /** The alias to read metric data from */ + metricAlias: string; + /** The alias to read log data from */ + logAlias: string; + /** The field mapping to use for this source */ + fields: InfraSourceFields; + /** The columns to use for log display */ + logColumns: InfraSourceLogColumn[]; +} +/** A mapping of semantic fields to their document counterparts */ +export interface InfraSourceFields { + /** The field to identify a container by */ + container: string; + /** The fields to identify a host by */ + host: string; + /** The fields to use as the log message */ + message: string[]; + /** The field to identify a pod by */ + pod: string; + /** The field to use as a tiebreaker for log events that have identical timestamps */ + tiebreaker: string; + /** The field to use as a timestamp for metrics and logs */ + timestamp: string; +} +/** The built-in timestamp log column */ +export interface InfraSourceTimestampLogColumn { + timestampColumn: InfraSourceTimestampLogColumnAttributes; +} + +export interface InfraSourceTimestampLogColumnAttributes { + /** A unique id for the column */ + id: string; +} +/** The built-in message log column */ +export interface InfraSourceMessageLogColumn { + messageColumn: InfraSourceMessageLogColumnAttributes; +} + +export interface InfraSourceMessageLogColumnAttributes { + /** A unique id for the column */ + id: string; +} +/** A log column containing a field value */ +export interface InfraSourceFieldLogColumn { + fieldColumn: InfraSourceFieldLogColumnAttributes; +} + +export interface InfraSourceFieldLogColumnAttributes { + /** A unique id for the column */ + id: string; + /** The field name this column refers to */ + field: string; +} +/** The status of an infrastructure data source */ +export interface InfraSourceStatus { + /** Whether the configured metric alias exists */ + metricAliasExists: boolean; + /** Whether the configured log alias exists */ + logAliasExists: boolean; + /** Whether the configured alias or wildcard pattern resolve to any metric indices */ + metricIndicesExist: boolean; + /** Whether the configured alias or wildcard pattern resolve to any log indices */ + logIndicesExist: boolean; + /** The list of indices in the metric alias */ + metricIndices: string[]; + /** The list of indices in the log alias */ + logIndices: string[]; + /** The list of fields defined in the index mappings */ + indexFields: InfraIndexField[]; +} +/** A descriptor of a field in an index */ +export interface InfraIndexField { + /** The name of the field */ + name: string; + /** The type of the field's values as recognized by Kibana */ + type: string; + /** Whether the field's values can be efficiently searched for */ + searchable: boolean; + /** Whether the field's values can be aggregated */ + aggregatable: boolean; + /** Whether the field should be displayed based on event.module and a ECS allowed list */ + displayable: boolean; +} +/** A consecutive sequence of log entries */ +export interface InfraLogEntryInterval { + /** The key corresponding to the start of the interval covered by the entries */ + start?: InfraTimeKey | null; + /** The key corresponding to the end of the interval covered by the entries */ + end?: InfraTimeKey | null; + /** Whether there are more log entries available before the start */ + hasMoreBefore: boolean; + /** Whether there are more log entries available after the end */ + hasMoreAfter: boolean; + /** The query the log entries were filtered by */ + filterQuery?: string | null; + /** The query the log entries were highlighted with */ + highlightQuery?: string | null; + /** A list of the log entries */ + entries: InfraLogEntry[]; +} +/** A representation of the log entry's position in the event stream */ +export interface InfraTimeKey { + /** The timestamp of the event that the log entry corresponds to */ + time: number; + /** The tiebreaker that disambiguates events with the same timestamp */ + tiebreaker: number; +} +/** A log entry */ +export interface InfraLogEntry { + /** A unique representation of the log entry's position in the event stream */ + key: InfraTimeKey; + /** The log entry's id */ + gid: string; + /** The source id */ + source: string; + /** The columns used for rendering the log entry */ + columns: InfraLogEntryColumn[]; +} +/** A special built-in column that contains the log entry's timestamp */ +export interface InfraLogEntryTimestampColumn { + /** The id of the corresponding column configuration */ + columnId: string; + /** The timestamp */ + timestamp: number; +} +/** A special built-in column that contains the log entry's constructed message */ +export interface InfraLogEntryMessageColumn { + /** The id of the corresponding column configuration */ + columnId: string; + /** A list of the formatted log entry segments */ + message: InfraLogMessageSegment[]; +} +/** A segment of the log entry message that was derived from a field */ +export interface InfraLogMessageFieldSegment { + /** The field the segment was derived from */ + field: string; + /** The segment's message */ + value: string; + /** A list of highlighted substrings of the value */ + highlights: string[]; +} +/** A segment of the log entry message that was derived from a string literal */ +export interface InfraLogMessageConstantSegment { + /** The segment's message */ + constant: string; +} +/** A column that contains the value of a field of the log entry */ +export interface InfraLogEntryFieldColumn { + /** The id of the corresponding column configuration */ + columnId: string; + /** The field name of the column */ + field: string; + /** The value of the field in the log entry */ + value: string; + /** A list of highlighted substrings of the value */ + highlights: string[]; +} + +export interface InfraSnapshotResponse { + /** Nodes of type host, container or pod grouped by 0, 1 or 2 terms */ + nodes: InfraSnapshotNode[]; +} + +export interface InfraSnapshotNode { + path: InfraSnapshotNodePath[]; + + metric: InfraSnapshotNodeMetric; +} + +export interface InfraSnapshotNodePath { + value: string; + + label: string; + + ip?: string | null; +} + +export interface InfraSnapshotNodeMetric { + name: InfraSnapshotMetricType; + + value?: number | null; + + avg?: number | null; + + max?: number | null; +} + +export interface InfraMetricData { + id?: InfraMetric | null; + + series: InfraDataSeries[]; +} + +export interface InfraDataSeries { + id: string; + + label: string; + + data: InfraDataPoint[]; +} + +export interface InfraDataPoint { + timestamp: number; + + value?: number | null; +} + +export interface Mutation { + /** Create a new source of infrastructure data */ + createSource: UpdateSourceResult; + /** Modify an existing source */ + updateSource: UpdateSourceResult; + /** Delete a source of infrastructure data */ + deleteSource: DeleteSourceResult; +} +/** The result of a successful source update */ +export interface UpdateSourceResult { + /** The source that was updated */ + source: InfraSource; +} +/** The result of a source deletion operations */ +export interface DeleteSourceResult { + /** The id of the source that was deleted */ + id: string; +} + +// ==================================================== +// InputTypes +// ==================================================== + +export interface InfraTimeKeyInput { + time: number; + + tiebreaker: number; +} +/** A highlighting definition */ +export interface InfraLogEntryHighlightInput { + /** The query to highlight by */ + query: string; + /** The number of highlighted documents to include beyond the beginning of the interval */ + countBefore: number; + /** The number of highlighted documents to include beyond the end of the interval */ + countAfter: number; +} + +export interface InfraTimerangeInput { + /** The interval string to use for last bucket. The format is '{value}{unit}'. For example '5m' would return the metrics for the last 5 minutes of the timespan. */ + interval: string; + /** The end of the timerange */ + to: number; + /** The beginning of the timerange */ + from: number; +} + +export interface InfraSnapshotGroupbyInput { + /** The label to use in the results for the group by for the terms group by */ + label?: string | null; + /** The field to group by from a terms aggregation, this is ignored by the filter type */ + field?: string | null; +} + +export interface InfraSnapshotMetricInput { + /** The type of metric */ + type: InfraSnapshotMetricType; +} + +export interface InfraNodeIdsInput { + nodeId: string; + + cloudId?: string | null; +} +/** The properties to update the source with */ +export interface UpdateSourceInput { + /** The name of the data source */ + name?: string | null; + /** A description of the data source */ + description?: string | null; + /** The alias to read metric data from */ + metricAlias?: string | null; + /** The alias to read log data from */ + logAlias?: string | null; + /** The field mapping to use for this source */ + fields?: UpdateSourceFieldsInput | null; + /** The log columns to display for this source */ + logColumns?: UpdateSourceLogColumnInput[] | null; +} +/** The mapping of semantic fields of the source to be created */ +export interface UpdateSourceFieldsInput { + /** The field to identify a container by */ + container?: string | null; + /** The fields to identify a host by */ + host?: string | null; + /** The field to identify a pod by */ + pod?: string | null; + /** The field to use as a tiebreaker for log events that have identical timestamps */ + tiebreaker?: string | null; + /** The field to use as a timestamp for metrics and logs */ + timestamp?: string | null; +} +/** One of the log column types to display for this source */ +export interface UpdateSourceLogColumnInput { + /** A custom field log column */ + fieldColumn?: UpdateSourceFieldLogColumnInput | null; + /** A built-in message log column */ + messageColumn?: UpdateSourceMessageLogColumnInput | null; + /** A built-in timestamp log column */ + timestampColumn?: UpdateSourceTimestampLogColumnInput | null; +} + +export interface UpdateSourceFieldLogColumnInput { + id: string; + + field: string; +} + +export interface UpdateSourceMessageLogColumnInput { + id: string; +} + +export interface UpdateSourceTimestampLogColumnInput { + id: string; +} + +// ==================================================== +// Arguments +// ==================================================== + +export interface SourceQueryArgs { + /** The id of the source */ + id: string; +} +export interface LogEntriesAroundInfraSourceArgs { + /** The sort key that corresponds to the point in time */ + key: InfraTimeKeyInput; + /** The maximum number of preceding to return */ + countBefore?: number | null; + /** The maximum number of following to return */ + countAfter?: number | null; + /** The query to filter the log entries by */ + filterQuery?: string | null; +} +export interface LogEntriesBetweenInfraSourceArgs { + /** The sort key that corresponds to the start of the interval */ + startKey: InfraTimeKeyInput; + /** The sort key that corresponds to the end of the interval */ + endKey: InfraTimeKeyInput; + /** The query to filter the log entries by */ + filterQuery?: string | null; +} +export interface LogEntryHighlightsInfraSourceArgs { + /** The sort key that corresponds to the start of the interval */ + startKey: InfraTimeKeyInput; + /** The sort key that corresponds to the end of the interval */ + endKey: InfraTimeKeyInput; + /** The query to filter the log entries by */ + filterQuery?: string | null; + /** The highlighting to apply to the log entries */ + highlights: InfraLogEntryHighlightInput[]; +} +export interface SnapshotInfraSourceArgs { + timerange: InfraTimerangeInput; + + filterQuery?: string | null; +} +export interface MetricsInfraSourceArgs { + nodeIds: InfraNodeIdsInput; + + nodeType: InfraNodeType; + + timerange: InfraTimerangeInput; + + metrics: InfraMetric[]; +} +export interface IndexFieldsInfraSourceStatusArgs { + indexType?: InfraIndexType | null; +} +export interface NodesInfraSnapshotResponseArgs { + type: InfraNodeType; + + groupBy: InfraSnapshotGroupbyInput[]; + + metric: InfraSnapshotMetricInput; +} +export interface CreateSourceMutationArgs { + /** The id of the source */ + id: string; + + sourceProperties: UpdateSourceInput; +} +export interface UpdateSourceMutationArgs { + /** The id of the source */ + id: string; + /** The properties to update the source with */ + sourceProperties: UpdateSourceInput; +} +export interface DeleteSourceMutationArgs { + /** The id of the source */ + id: string; +} + +// ==================================================== +// Enums +// ==================================================== + +export enum InfraIndexType { + ANY = 'ANY', + LOGS = 'LOGS', + METRICS = 'METRICS', +} + +export enum InfraNodeType { + pod = 'pod', + container = 'container', + host = 'host', + awsEC2 = 'awsEC2', + awsS3 = 'awsS3', + awsRDS = 'awsRDS', + awsSQS = 'awsSQS', +} + +export enum InfraSnapshotMetricType { + count = 'count', + cpu = 'cpu', + load = 'load', + memory = 'memory', + tx = 'tx', + rx = 'rx', + logRate = 'logRate', + diskIOReadBytes = 'diskIOReadBytes', + diskIOWriteBytes = 'diskIOWriteBytes', + s3TotalRequests = 's3TotalRequests', + s3NumberOfObjects = 's3NumberOfObjects', + s3BucketSize = 's3BucketSize', + s3DownloadBytes = 's3DownloadBytes', + s3UploadBytes = 's3UploadBytes', + rdsConnections = 'rdsConnections', + rdsQueriesExecuted = 'rdsQueriesExecuted', + rdsActiveTransactions = 'rdsActiveTransactions', + rdsLatency = 'rdsLatency', + sqsMessagesVisible = 'sqsMessagesVisible', + sqsMessagesDelayed = 'sqsMessagesDelayed', + sqsMessagesSent = 'sqsMessagesSent', + sqsMessagesEmpty = 'sqsMessagesEmpty', + sqsOldestMessage = 'sqsOldestMessage', +} + +export enum InfraMetric { + hostSystemOverview = 'hostSystemOverview', + hostCpuUsage = 'hostCpuUsage', + hostFilesystem = 'hostFilesystem', + hostK8sOverview = 'hostK8sOverview', + hostK8sCpuCap = 'hostK8sCpuCap', + hostK8sDiskCap = 'hostK8sDiskCap', + hostK8sMemoryCap = 'hostK8sMemoryCap', + hostK8sPodCap = 'hostK8sPodCap', + hostLoad = 'hostLoad', + hostMemoryUsage = 'hostMemoryUsage', + hostNetworkTraffic = 'hostNetworkTraffic', + hostDockerOverview = 'hostDockerOverview', + hostDockerInfo = 'hostDockerInfo', + hostDockerTop5ByCpu = 'hostDockerTop5ByCpu', + hostDockerTop5ByMemory = 'hostDockerTop5ByMemory', + podOverview = 'podOverview', + podCpuUsage = 'podCpuUsage', + podMemoryUsage = 'podMemoryUsage', + podLogUsage = 'podLogUsage', + podNetworkTraffic = 'podNetworkTraffic', + containerOverview = 'containerOverview', + containerCpuKernel = 'containerCpuKernel', + containerCpuUsage = 'containerCpuUsage', + containerDiskIOOps = 'containerDiskIOOps', + containerDiskIOBytes = 'containerDiskIOBytes', + containerMemory = 'containerMemory', + containerNetworkTraffic = 'containerNetworkTraffic', + nginxHits = 'nginxHits', + nginxRequestRate = 'nginxRequestRate', + nginxActiveConnections = 'nginxActiveConnections', + nginxRequestsPerConnection = 'nginxRequestsPerConnection', + awsOverview = 'awsOverview', + awsCpuUtilization = 'awsCpuUtilization', + awsNetworkBytes = 'awsNetworkBytes', + awsNetworkPackets = 'awsNetworkPackets', + awsDiskioBytes = 'awsDiskioBytes', + awsDiskioOps = 'awsDiskioOps', + awsEC2CpuUtilization = 'awsEC2CpuUtilization', + awsEC2DiskIOBytes = 'awsEC2DiskIOBytes', + awsEC2NetworkTraffic = 'awsEC2NetworkTraffic', + awsS3TotalRequests = 'awsS3TotalRequests', + awsS3NumberOfObjects = 'awsS3NumberOfObjects', + awsS3BucketSize = 'awsS3BucketSize', + awsS3DownloadBytes = 'awsS3DownloadBytes', + awsS3UploadBytes = 'awsS3UploadBytes', + awsRDSCpuTotal = 'awsRDSCpuTotal', + awsRDSConnections = 'awsRDSConnections', + awsRDSQueriesExecuted = 'awsRDSQueriesExecuted', + awsRDSActiveTransactions = 'awsRDSActiveTransactions', + awsRDSLatency = 'awsRDSLatency', + awsSQSMessagesVisible = 'awsSQSMessagesVisible', + awsSQSMessagesDelayed = 'awsSQSMessagesDelayed', + awsSQSMessagesSent = 'awsSQSMessagesSent', + awsSQSMessagesEmpty = 'awsSQSMessagesEmpty', + awsSQSOldestMessage = 'awsSQSOldestMessage', + custom = 'custom', +} + +// ==================================================== +// Unions +// ==================================================== + +/** All known log column types */ +export type InfraSourceLogColumn = + | InfraSourceTimestampLogColumn + | InfraSourceMessageLogColumn + | InfraSourceFieldLogColumn; + +/** A column of a log entry */ +export type InfraLogEntryColumn = + | InfraLogEntryTimestampColumn + | InfraLogEntryMessageColumn + | InfraLogEntryFieldColumn; + +/** A segment of the log entry message */ +export type InfraLogMessageSegment = InfraLogMessageFieldSegment | InfraLogMessageConstantSegment; + +// ==================================================== +// END: Typescript template +// ==================================================== + +// ==================================================== +// Documents +// ==================================================== + +export namespace LogEntryHighlightsQuery { + export type Variables = { + sourceId?: string | null; + startKey: InfraTimeKeyInput; + endKey: InfraTimeKeyInput; + filterQuery?: string | null; + highlights: InfraLogEntryHighlightInput[]; + }; + + export type Query = { + __typename?: 'Query'; + + source: Source; + }; + + export type Source = { + __typename?: 'InfraSource'; + + id: string; + + logEntryHighlights: LogEntryHighlights[]; + }; + + export type LogEntryHighlights = { + __typename?: 'InfraLogEntryInterval'; + + start?: Start | null; + + end?: End | null; + + entries: Entries[]; + }; + + export type Start = InfraTimeKeyFields.Fragment; + + export type End = InfraTimeKeyFields.Fragment; + + export type Entries = InfraLogEntryHighlightFields.Fragment; +} + +export namespace MetricsQuery { + export type Variables = { + sourceId: string; + timerange: InfraTimerangeInput; + metrics: InfraMetric[]; + nodeId: string; + cloudId?: string | null; + nodeType: InfraNodeType; + }; + + export type Query = { + __typename?: 'Query'; + + source: Source; + }; + + export type Source = { + __typename?: 'InfraSource'; + + id: string; + + metrics: Metrics[]; + }; + + export type Metrics = { + __typename?: 'InfraMetricData'; + + id?: InfraMetric | null; + + series: Series[]; + }; + + export type Series = { + __typename?: 'InfraDataSeries'; + + id: string; + + label: string; + + data: Data[]; + }; + + export type Data = { + __typename?: 'InfraDataPoint'; + + timestamp: number; + + value?: number | null; + }; +} + +export namespace CreateSourceConfigurationMutation { + export type Variables = { + sourceId: string; + sourceProperties: UpdateSourceInput; + }; + + export type Mutation = { + __typename?: 'Mutation'; + + createSource: CreateSource; + }; + + export type CreateSource = { + __typename?: 'UpdateSourceResult'; + + source: Source; + }; + + export type Source = { + __typename?: 'InfraSource'; + + configuration: Configuration; + + status: Status; + } & InfraSourceFields.Fragment; + + export type Configuration = SourceConfigurationFields.Fragment; + + export type Status = SourceStatusFields.Fragment; +} + +export namespace SourceQuery { + export type Variables = { + sourceId?: string | null; + }; + + export type Query = { + __typename?: 'Query'; + + source: Source; + }; + + export type Source = { + __typename?: 'InfraSource'; + + configuration: Configuration; + + status: Status; + } & InfraSourceFields.Fragment; + + export type Configuration = SourceConfigurationFields.Fragment; + + export type Status = SourceStatusFields.Fragment; +} + +export namespace UpdateSourceMutation { + export type Variables = { + sourceId?: string | null; + sourceProperties: UpdateSourceInput; + }; + + export type Mutation = { + __typename?: 'Mutation'; + + updateSource: UpdateSource; + }; + + export type UpdateSource = { + __typename?: 'UpdateSourceResult'; + + source: Source; + }; + + export type Source = { + __typename?: 'InfraSource'; + + configuration: Configuration; + + status: Status; + } & InfraSourceFields.Fragment; + + export type Configuration = SourceConfigurationFields.Fragment; + + export type Status = SourceStatusFields.Fragment; +} + +export namespace WaffleNodesQuery { + export type Variables = { + sourceId: string; + timerange: InfraTimerangeInput; + filterQuery?: string | null; + metric: InfraSnapshotMetricInput; + groupBy: InfraSnapshotGroupbyInput[]; + type: InfraNodeType; + }; + + export type Query = { + __typename?: 'Query'; + + source: Source; + }; + + export type Source = { + __typename?: 'InfraSource'; + + id: string; + + snapshot?: Snapshot | null; + }; + + export type Snapshot = { + __typename?: 'InfraSnapshotResponse'; + + nodes: Nodes[]; + }; + + export type Nodes = { + __typename?: 'InfraSnapshotNode'; + + path: Path[]; + + metric: Metric; + }; + + export type Path = { + __typename?: 'InfraSnapshotNodePath'; + + value: string; + + label: string; + + ip?: string | null; + }; + + export type Metric = { + __typename?: 'InfraSnapshotNodeMetric'; + + name: InfraSnapshotMetricType; + + value?: number | null; + + avg?: number | null; + + max?: number | null; + }; +} + +export namespace LogEntries { + export type Variables = { + sourceId?: string | null; + timeKey: InfraTimeKeyInput; + countBefore?: number | null; + countAfter?: number | null; + filterQuery?: string | null; + }; + + export type Query = { + __typename?: 'Query'; + + source: Source; + }; + + export type Source = { + __typename?: 'InfraSource'; + + id: string; + + logEntriesAround: LogEntriesAround; + }; + + export type LogEntriesAround = { + __typename?: 'InfraLogEntryInterval'; + + start?: Start | null; + + end?: End | null; + + hasMoreBefore: boolean; + + hasMoreAfter: boolean; + + entries: Entries[]; + }; + + export type Start = InfraTimeKeyFields.Fragment; + + export type End = InfraTimeKeyFields.Fragment; + + export type Entries = InfraLogEntryFields.Fragment; +} + +export namespace SourceConfigurationFields { + export type Fragment = { + __typename?: 'InfraSourceConfiguration'; + + name: string; + + description: string; + + logAlias: string; + + metricAlias: string; + + fields: Fields; + + logColumns: LogColumns[]; + }; + + export type Fields = { + __typename?: 'InfraSourceFields'; + + container: string; + + host: string; + + message: string[]; + + pod: string; + + tiebreaker: string; + + timestamp: string; + }; + + export type LogColumns = + | InfraSourceTimestampLogColumnInlineFragment + | InfraSourceMessageLogColumnInlineFragment + | InfraSourceFieldLogColumnInlineFragment; + + export type InfraSourceTimestampLogColumnInlineFragment = { + __typename?: 'InfraSourceTimestampLogColumn'; + + timestampColumn: TimestampColumn; + }; + + export type TimestampColumn = { + __typename?: 'InfraSourceTimestampLogColumnAttributes'; + + id: string; + }; + + export type InfraSourceMessageLogColumnInlineFragment = { + __typename?: 'InfraSourceMessageLogColumn'; + + messageColumn: MessageColumn; + }; + + export type MessageColumn = { + __typename?: 'InfraSourceMessageLogColumnAttributes'; + + id: string; + }; + + export type InfraSourceFieldLogColumnInlineFragment = { + __typename?: 'InfraSourceFieldLogColumn'; + + fieldColumn: FieldColumn; + }; + + export type FieldColumn = { + __typename?: 'InfraSourceFieldLogColumnAttributes'; + + id: string; + + field: string; + }; +} + +export namespace SourceStatusFields { + export type Fragment = { + __typename?: 'InfraSourceStatus'; + + indexFields: IndexFields[]; + + logIndicesExist: boolean; + + metricIndicesExist: boolean; + }; + + export type IndexFields = { + __typename?: 'InfraIndexField'; + + name: string; + + type: string; + + searchable: boolean; + + aggregatable: boolean; + + displayable: boolean; + }; +} + +export namespace InfraTimeKeyFields { + export type Fragment = { + __typename?: 'InfraTimeKey'; + + time: number; + + tiebreaker: number; + }; +} + +export namespace InfraSourceFields { + export type Fragment = { + __typename?: 'InfraSource'; + + id: string; + + version?: string | null; + + updatedAt?: number | null; + + origin: string; + }; +} + +export namespace InfraLogEntryFields { + export type Fragment = { + __typename?: 'InfraLogEntry'; + + gid: string; + + key: Key; + + columns: Columns[]; + }; + + export type Key = { + __typename?: 'InfraTimeKey'; + + time: number; + + tiebreaker: number; + }; + + export type Columns = + | InfraLogEntryTimestampColumnInlineFragment + | InfraLogEntryMessageColumnInlineFragment + | InfraLogEntryFieldColumnInlineFragment; + + export type InfraLogEntryTimestampColumnInlineFragment = { + __typename?: 'InfraLogEntryTimestampColumn'; + + columnId: string; + + timestamp: number; + }; + + export type InfraLogEntryMessageColumnInlineFragment = { + __typename?: 'InfraLogEntryMessageColumn'; + + columnId: string; + + message: Message[]; + }; + + export type Message = + | InfraLogMessageFieldSegmentInlineFragment + | InfraLogMessageConstantSegmentInlineFragment; + + export type InfraLogMessageFieldSegmentInlineFragment = { + __typename?: 'InfraLogMessageFieldSegment'; + + field: string; + + value: string; + }; + + export type InfraLogMessageConstantSegmentInlineFragment = { + __typename?: 'InfraLogMessageConstantSegment'; + + constant: string; + }; + + export type InfraLogEntryFieldColumnInlineFragment = { + __typename?: 'InfraLogEntryFieldColumn'; + + columnId: string; + + field: string; + + value: string; + }; +} + +export namespace InfraLogEntryHighlightFields { + export type Fragment = { + __typename?: 'InfraLogEntry'; + + gid: string; + + key: Key; + + columns: Columns[]; + }; + + export type Key = { + __typename?: 'InfraTimeKey'; + + time: number; + + tiebreaker: number; + }; + + export type Columns = + | InfraLogEntryMessageColumnInlineFragment + | InfraLogEntryFieldColumnInlineFragment; + + export type InfraLogEntryMessageColumnInlineFragment = { + __typename?: 'InfraLogEntryMessageColumn'; + + columnId: string; + + message: Message[]; + }; + + export type Message = InfraLogMessageFieldSegmentInlineFragment; + + export type InfraLogMessageFieldSegmentInlineFragment = { + __typename?: 'InfraLogMessageFieldSegment'; + + field: string; + + highlights: string[]; + }; + + export type InfraLogEntryFieldColumnInlineFragment = { + __typename?: 'InfraLogEntryFieldColumn'; + + columnId: string; + + field: string; + + highlights: string[]; + }; +} diff --git a/x-pack/plugins/infra/common/http_api/index.ts b/x-pack/plugins/infra/common/http_api/index.ts new file mode 100644 index 0000000000000..326daa93de33a --- /dev/null +++ b/x-pack/plugins/infra/common/http_api/index.ts @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export * from './log_analysis'; +export * from './metadata_api'; +export * from './log_entries'; +export * from './metrics_explorer'; diff --git a/x-pack/plugins/infra/common/http_api/inventory_meta_api.ts b/x-pack/plugins/infra/common/http_api/inventory_meta_api.ts new file mode 100644 index 0000000000000..77de515c9cc46 --- /dev/null +++ b/x-pack/plugins/infra/common/http_api/inventory_meta_api.ts @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import * as rt from 'io-ts'; +import { ItemTypeRT } from '../inventory_models/types'; + +const CloudAccountRT = rt.type({ + value: rt.string, + name: rt.string, +}); + +export const InventoryMetaResponseRT = rt.type({ + accounts: rt.array(CloudAccountRT), + projects: rt.array(rt.string), + regions: rt.array(rt.string), +}); + +export const InventoryMetaRequestRT = rt.type({ + sourceId: rt.string, + nodeType: ItemTypeRT, +}); + +export type InventoryMetaRequest = rt.TypeOf; +export type InventoryMetaResponse = rt.TypeOf; +export type InventoryCloudAccount = rt.TypeOf; diff --git a/x-pack/plugins/infra/common/http_api/log_analysis/index.ts b/x-pack/plugins/infra/common/http_api/log_analysis/index.ts new file mode 100644 index 0000000000000..378e32cb3582c --- /dev/null +++ b/x-pack/plugins/infra/common/http_api/log_analysis/index.ts @@ -0,0 +1,8 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export * from './results'; +export * from './validation'; diff --git a/x-pack/plugins/infra/common/http_api/log_analysis/results/index.ts b/x-pack/plugins/infra/common/http_api/log_analysis/results/index.ts new file mode 100644 index 0000000000000..1749421277719 --- /dev/null +++ b/x-pack/plugins/infra/common/http_api/log_analysis/results/index.ts @@ -0,0 +1,7 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export * from './log_entry_rate'; diff --git a/x-pack/plugins/infra/common/http_api/log_analysis/results/log_entry_rate.ts b/x-pack/plugins/infra/common/http_api/log_analysis/results/log_entry_rate.ts new file mode 100644 index 0000000000000..dfc3d2aabd11a --- /dev/null +++ b/x-pack/plugins/infra/common/http_api/log_analysis/results/log_entry_rate.ts @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import * as rt from 'io-ts'; + +import { badRequestErrorRT, conflictErrorRT, forbiddenErrorRT, timeRangeRT } from '../../shared'; + +export const LOG_ANALYSIS_GET_LOG_ENTRY_RATE_PATH = + '/api/infra/log_analysis/results/log_entry_rate'; + +/** + * request + */ + +export const getLogEntryRateRequestPayloadRT = rt.type({ + data: rt.type({ + bucketDuration: rt.number, + sourceId: rt.string, + timeRange: timeRangeRT, + }), +}); + +export type GetLogEntryRateRequestPayload = rt.TypeOf; + +/** + * response + */ + +export const logEntryRateAnomalyRT = rt.type({ + actualLogEntryRate: rt.number, + anomalyScore: rt.number, + duration: rt.number, + startTime: rt.number, + typicalLogEntryRate: rt.number, +}); + +export const logEntryRatePartitionRT = rt.type({ + analysisBucketCount: rt.number, + anomalies: rt.array(logEntryRateAnomalyRT), + averageActualLogEntryRate: rt.number, + maximumAnomalyScore: rt.number, + numberOfLogEntries: rt.number, + partitionId: rt.string, +}); + +export type LogEntryRatePartition = rt.TypeOf; + +export const logEntryRateHistogramBucketRT = rt.type({ + partitions: rt.array(logEntryRatePartitionRT), + startTime: rt.number, +}); + +export type LogEntryRateHistogramBucket = rt.TypeOf; + +export const getLogEntryRateSuccessReponsePayloadRT = rt.type({ + data: rt.type({ + bucketDuration: rt.number, + histogramBuckets: rt.array(logEntryRateHistogramBucketRT), + totalNumberOfLogEntries: rt.number, + }), +}); + +export type GetLogEntryRateSuccessResponsePayload = rt.TypeOf< + typeof getLogEntryRateSuccessReponsePayloadRT +>; + +export const getLogEntryRateResponsePayloadRT = rt.union([ + getLogEntryRateSuccessReponsePayloadRT, + badRequestErrorRT, + conflictErrorRT, + forbiddenErrorRT, +]); + +export type GetLogEntryRateReponsePayload = rt.TypeOf; diff --git a/x-pack/plugins/infra/common/http_api/log_analysis/validation/index.ts b/x-pack/plugins/infra/common/http_api/log_analysis/validation/index.ts new file mode 100644 index 0000000000000..f23ef7ee7c302 --- /dev/null +++ b/x-pack/plugins/infra/common/http_api/log_analysis/validation/index.ts @@ -0,0 +1,7 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export * from './log_entry_rate_indices'; diff --git a/x-pack/plugins/infra/common/http_api/log_analysis/validation/log_entry_rate_indices.ts b/x-pack/plugins/infra/common/http_api/log_analysis/validation/log_entry_rate_indices.ts new file mode 100644 index 0000000000000..5b2509074f6ed --- /dev/null +++ b/x-pack/plugins/infra/common/http_api/log_analysis/validation/log_entry_rate_indices.ts @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import * as rt from 'io-ts'; + +export const LOG_ANALYSIS_VALIDATE_INDICES_PATH = + '/api/infra/log_analysis/validation/log_entry_rate_indices'; + +/** + * Request types + */ +export const validationIndicesFieldSpecificationRT = rt.type({ + name: rt.string, + validTypes: rt.array(rt.string), +}); + +export type ValidationIndicesFieldSpecification = rt.TypeOf< + typeof validationIndicesFieldSpecificationRT +>; + +export const validationIndicesRequestPayloadRT = rt.type({ + data: rt.type({ + fields: rt.array(validationIndicesFieldSpecificationRT), + indices: rt.array(rt.string), + }), +}); + +export type ValidationIndicesRequestPayload = rt.TypeOf; + +/** + * Response types + * */ +export const validationIndicesErrorRT = rt.union([ + rt.type({ + error: rt.literal('INDEX_NOT_FOUND'), + index: rt.string, + }), + rt.type({ + error: rt.literal('FIELD_NOT_FOUND'), + index: rt.string, + field: rt.string, + }), + rt.type({ + error: rt.literal('FIELD_NOT_VALID'), + index: rt.string, + field: rt.string, + }), +]); + +export type ValidationIndicesError = rt.TypeOf; + +export const validationIndicesResponsePayloadRT = rt.type({ + data: rt.type({ + errors: rt.array(validationIndicesErrorRT), + }), +}); + +export type ValidationIndicesResponsePayload = rt.TypeOf; diff --git a/x-pack/plugins/infra/common/http_api/log_entries/common.ts b/x-pack/plugins/infra/common/http_api/log_entries/common.ts new file mode 100644 index 0000000000000..3eb7e278bf99c --- /dev/null +++ b/x-pack/plugins/infra/common/http_api/log_entries/common.ts @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import * as rt from 'io-ts'; + +export const logEntriesCursorRT = rt.type({ + time: rt.number, + tiebreaker: rt.number, +}); diff --git a/x-pack/plugins/infra/common/http_api/log_entries/index.ts b/x-pack/plugins/infra/common/http_api/log_entries/index.ts new file mode 100644 index 0000000000000..8fed914c3dc8c --- /dev/null +++ b/x-pack/plugins/infra/common/http_api/log_entries/index.ts @@ -0,0 +1,9 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export * from './item'; +export * from './summary'; +export * from './summary_highlights'; diff --git a/x-pack/plugins/infra/common/http_api/log_entries/item.ts b/x-pack/plugins/infra/common/http_api/log_entries/item.ts new file mode 100644 index 0000000000000..02335d68402c0 --- /dev/null +++ b/x-pack/plugins/infra/common/http_api/log_entries/item.ts @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import * as rt from 'io-ts'; +import { logEntriesCursorRT } from './common'; + +export const LOG_ENTRIES_ITEM_PATH = '/api/log_entries/item'; + +export const logEntriesItemRequestRT = rt.type({ + sourceId: rt.string, + id: rt.string, +}); + +export type LogEntriesItemRequest = rt.TypeOf; + +const logEntriesItemFieldRT = rt.type({ field: rt.string, value: rt.string }); +const logEntriesItemRT = rt.type({ + id: rt.string, + index: rt.string, + fields: rt.array(logEntriesItemFieldRT), + key: logEntriesCursorRT, +}); +export const logEntriesItemResponseRT = rt.type({ + data: logEntriesItemRT, +}); + +export type LogEntriesItemField = rt.TypeOf; +export type LogEntriesItem = rt.TypeOf; +export type LogEntriesItemResponse = rt.TypeOf; diff --git a/x-pack/plugins/infra/common/http_api/log_entries/summary.ts b/x-pack/plugins/infra/common/http_api/log_entries/summary.ts new file mode 100644 index 0000000000000..4a2c0db0e995e --- /dev/null +++ b/x-pack/plugins/infra/common/http_api/log_entries/summary.ts @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import * as rt from 'io-ts'; + +export const LOG_ENTRIES_SUMMARY_PATH = '/api/log_entries/summary'; + +export const logEntriesSummaryRequestRT = rt.type({ + sourceId: rt.string, + startDate: rt.number, + endDate: rt.number, + bucketSize: rt.number, + query: rt.union([rt.string, rt.undefined, rt.null]), +}); + +export type LogEntriesSummaryRequest = rt.TypeOf; + +export const logEntriesSummaryBucketRT = rt.type({ + start: rt.number, + end: rt.number, + entriesCount: rt.number, +}); + +export type LogEntriesSummaryBucket = rt.TypeOf; + +export const logEntriesSummaryResponseRT = rt.type({ + data: rt.type({ + start: rt.number, + end: rt.number, + buckets: rt.array(logEntriesSummaryBucketRT), + }), +}); + +export type LogEntriesSummaryResponse = rt.TypeOf; diff --git a/x-pack/plugins/infra/common/http_api/log_entries/summary_highlights.ts b/x-pack/plugins/infra/common/http_api/log_entries/summary_highlights.ts new file mode 100644 index 0000000000000..30222cd71bbde --- /dev/null +++ b/x-pack/plugins/infra/common/http_api/log_entries/summary_highlights.ts @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import * as rt from 'io-ts'; +import { logEntriesSummaryRequestRT, logEntriesSummaryBucketRT } from './summary'; +import { logEntriesCursorRT } from './common'; + +export const LOG_ENTRIES_SUMMARY_HIGHLIGHTS_PATH = '/api/log_entries/summary_highlights'; + +export const logEntriesSummaryHighlightsRequestRT = rt.intersection([ + logEntriesSummaryRequestRT, + rt.type({ + highlightTerms: rt.array(rt.string), + }), +]); + +export type LogEntriesSummaryHighlightsRequest = rt.TypeOf< + typeof logEntriesSummaryHighlightsRequestRT +>; + +export const logEntriesSummaryHighlightsBucketRT = rt.intersection([ + logEntriesSummaryBucketRT, + rt.type({ + representativeKey: logEntriesCursorRT, + }), +]); + +export type LogEntriesSummaryHighlightsBucket = rt.TypeOf< + typeof logEntriesSummaryHighlightsBucketRT +>; + +export const logEntriesSummaryHighlightsResponseRT = rt.type({ + data: rt.array( + rt.type({ + start: rt.number, + end: rt.number, + buckets: rt.array(logEntriesSummaryHighlightsBucketRT), + }) + ), +}); +export type LogEntriesSummaryHighlightsResponse = rt.TypeOf< + typeof logEntriesSummaryHighlightsResponseRT +>; diff --git a/x-pack/plugins/infra/common/http_api/metadata_api.ts b/x-pack/plugins/infra/common/http_api/metadata_api.ts new file mode 100644 index 0000000000000..7fc3c3e876f08 --- /dev/null +++ b/x-pack/plugins/infra/common/http_api/metadata_api.ts @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import * as rt from 'io-ts'; +import { ItemTypeRT } from '../../common/inventory_models/types'; + +export const InfraMetadataRequestRT = rt.type({ + nodeId: rt.string, + nodeType: ItemTypeRT, + sourceId: rt.string, +}); + +export const InfraMetadataFeatureRT = rt.type({ + name: rt.string, + source: rt.string, +}); + +export const InfraMetadataOSRT = rt.partial({ + codename: rt.string, + family: rt.string, + kernel: rt.string, + name: rt.string, + platform: rt.string, + version: rt.string, +}); + +export const InfraMetadataHostRT = rt.partial({ + name: rt.string, + os: InfraMetadataOSRT, + architecture: rt.string, + containerized: rt.boolean, +}); + +export const InfraMetadataInstanceRT = rt.partial({ + id: rt.string, + name: rt.string, +}); + +export const InfraMetadataProjectRT = rt.partial({ + id: rt.string, +}); + +export const InfraMetadataMachineRT = rt.partial({ + interface: rt.string, +}); + +export const InfraMetadataCloudRT = rt.partial({ + instance: InfraMetadataInstanceRT, + provider: rt.string, + availability_zone: rt.string, + project: InfraMetadataProjectRT, + machine: InfraMetadataMachineRT, +}); + +export const InfraMetadataInfoRT = rt.partial({ + cloud: InfraMetadataCloudRT, + host: InfraMetadataHostRT, +}); + +const InfraMetadataRequiredRT = rt.type({ + id: rt.string, + name: rt.string, + features: rt.array(InfraMetadataFeatureRT), +}); + +const InfraMetadataOptionalRT = rt.partial({ + info: InfraMetadataInfoRT, +}); + +export const InfraMetadataRT = rt.intersection([InfraMetadataRequiredRT, InfraMetadataOptionalRT]); + +export type InfraMetadata = rt.TypeOf; + +export type InfraMetadataRequest = rt.TypeOf; + +export type InfraMetadataFeature = rt.TypeOf; + +export type InfraMetadataInfo = rt.TypeOf; + +export type InfraMetadataCloud = rt.TypeOf; + +export type InfraMetadataInstance = rt.TypeOf; + +export type InfraMetadataProject = rt.TypeOf; + +export type InfraMetadataMachine = rt.TypeOf; + +export type InfraMetadataHost = rt.TypeOf; + +export type InfraMetadataOS = rt.TypeOf; diff --git a/x-pack/plugins/infra/common/http_api/metrics_explorer/index.ts b/x-pack/plugins/infra/common/http_api/metrics_explorer/index.ts new file mode 100644 index 0000000000000..c10f86c40ad46 --- /dev/null +++ b/x-pack/plugins/infra/common/http_api/metrics_explorer/index.ts @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import * as rt from 'io-ts'; + +export const METRIC_EXPLORER_AGGREGATIONS = [ + 'avg', + 'max', + 'min', + 'cardinality', + 'rate', + 'count', +] as const; + +type MetricExplorerAggregations = typeof METRIC_EXPLORER_AGGREGATIONS[number]; + +const metricsExplorerAggregationKeys = METRIC_EXPLORER_AGGREGATIONS.reduce< + Record +>((acc, agg) => ({ ...acc, [agg]: null }), {} as Record); + +export const metricsExplorerAggregationRT = rt.keyof(metricsExplorerAggregationKeys); + +export const metricsExplorerMetricRequiredFieldsRT = rt.type({ + aggregation: metricsExplorerAggregationRT, +}); + +export const metricsExplorerMetricOptionalFieldsRT = rt.partial({ + field: rt.union([rt.string, rt.undefined]), +}); + +export const metricsExplorerMetricRT = rt.intersection([ + metricsExplorerMetricRequiredFieldsRT, + metricsExplorerMetricOptionalFieldsRT, +]); + +export const timeRangeRT = rt.type({ + field: rt.string, + from: rt.number, + to: rt.number, + interval: rt.string, +}); + +export const metricsExplorerRequestBodyRequiredFieldsRT = rt.type({ + timerange: timeRangeRT, + indexPattern: rt.string, + metrics: rt.array(metricsExplorerMetricRT), +}); + +export const metricsExplorerRequestBodyOptionalFieldsRT = rt.partial({ + groupBy: rt.union([rt.string, rt.null, rt.undefined]), + afterKey: rt.union([rt.string, rt.null, rt.undefined]), + limit: rt.union([rt.number, rt.null, rt.undefined]), + filterQuery: rt.union([rt.string, rt.null, rt.undefined]), +}); + +export const metricsExplorerRequestBodyRT = rt.intersection([ + metricsExplorerRequestBodyRequiredFieldsRT, + metricsExplorerRequestBodyOptionalFieldsRT, +]); + +export const metricsExplorerPageInfoRT = rt.type({ + total: rt.number, + afterKey: rt.union([rt.string, rt.null]), +}); + +export const metricsExplorerColumnTypeRT = rt.keyof({ + date: null, + number: null, + string: null, +}); + +export const metricsExplorerColumnRT = rt.type({ + name: rt.string, + type: metricsExplorerColumnTypeRT, +}); + +export const metricsExplorerRowRT = rt.intersection([ + rt.type({ + timestamp: rt.number, + }), + rt.record(rt.string, rt.union([rt.string, rt.number, rt.null, rt.undefined])), +]); + +export const metricsExplorerSeriesRT = rt.type({ + id: rt.string, + columns: rt.array(metricsExplorerColumnRT), + rows: rt.array(metricsExplorerRowRT), +}); + +export const metricsExplorerResponseRT = rt.type({ + series: rt.array(metricsExplorerSeriesRT), + pageInfo: metricsExplorerPageInfoRT, +}); diff --git a/x-pack/plugins/infra/common/http_api/node_details_api.ts b/x-pack/plugins/infra/common/http_api/node_details_api.ts new file mode 100644 index 0000000000000..46aab881bce4c --- /dev/null +++ b/x-pack/plugins/infra/common/http_api/node_details_api.ts @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import * as rt from 'io-ts'; +import { InventoryMetricRT, ItemTypeRT } from '../inventory_models/types'; +import { InfraTimerangeInputRT } from './snapshot_api'; + +const NodeDetailsDataPointRT = rt.intersection([ + rt.type({ + timestamp: rt.number, + }), + rt.partial({ + value: rt.union([rt.number, rt.null]), + }), +]); + +const NodeDetailsDataSeries = rt.type({ + id: rt.string, + label: rt.string, + data: rt.array(NodeDetailsDataPointRT), +}); + +export const NodeDetailsMetricDataRT = rt.intersection([ + rt.partial({ + id: rt.union([InventoryMetricRT, rt.null]), + }), + rt.type({ + series: rt.array(NodeDetailsDataSeries), + }), +]); + +export const NodeDetailsMetricDataResponseRT = rt.type({ + metrics: rt.array(NodeDetailsMetricDataRT), +}); + +export const NodeDetailsRequestRT = rt.intersection([ + rt.type({ + nodeType: ItemTypeRT, + nodeId: rt.string, + metrics: rt.array(InventoryMetricRT), + timerange: InfraTimerangeInputRT, + sourceId: rt.string, + }), + rt.partial({ + cloudId: rt.union([rt.string, rt.null]), + }), +]); + +// export type NodeDetailsRequest = InfraWrappableRequest; + +export type NodeDetailsRequest = rt.TypeOf; +export type NodeDetailsMetricDataResponse = rt.TypeOf; diff --git a/x-pack/plugins/infra/common/http_api/shared/errors.ts b/x-pack/plugins/infra/common/http_api/shared/errors.ts new file mode 100644 index 0000000000000..74608cec4d0dc --- /dev/null +++ b/x-pack/plugins/infra/common/http_api/shared/errors.ts @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import * as rt from 'io-ts'; + +const createErrorRuntimeType = ( + statusCode: number, + errorCode: string, + attributes?: Attributes +) => + rt.type({ + statusCode: rt.literal(statusCode), + error: rt.literal(errorCode), + message: rt.string, + ...(!!attributes ? { attributes } : {}), + }); + +export const badRequestErrorRT = createErrorRuntimeType(400, 'Bad Request'); +export const forbiddenErrorRT = createErrorRuntimeType(403, 'Forbidden'); +export const conflictErrorRT = createErrorRuntimeType(409, 'Conflict'); diff --git a/x-pack/plugins/infra/common/http_api/shared/index.ts b/x-pack/plugins/infra/common/http_api/shared/index.ts new file mode 100644 index 0000000000000..1047ca2f2a01a --- /dev/null +++ b/x-pack/plugins/infra/common/http_api/shared/index.ts @@ -0,0 +1,9 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export * from './errors'; +export * from './metric_statistics'; +export * from './time_range'; diff --git a/x-pack/plugins/infra/common/http_api/shared/metric_statistics.ts b/x-pack/plugins/infra/common/http_api/shared/metric_statistics.ts new file mode 100644 index 0000000000000..70bd85402c433 --- /dev/null +++ b/x-pack/plugins/infra/common/http_api/shared/metric_statistics.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import * as rt from 'io-ts'; + +export const metricStatisticsRT = rt.type({ + avg: rt.union([rt.number, rt.null]), + count: rt.number, + max: rt.union([rt.number, rt.null]), + min: rt.union([rt.number, rt.null]), + sum: rt.number, +}); diff --git a/x-pack/plugins/infra/common/http_api/shared/time_range.ts b/x-pack/plugins/infra/common/http_api/shared/time_range.ts new file mode 100644 index 0000000000000..efda07423748b --- /dev/null +++ b/x-pack/plugins/infra/common/http_api/shared/time_range.ts @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import * as rt from 'io-ts'; + +export const timeRangeRT = rt.type({ + startTime: rt.number, + endTime: rt.number, +}); + +export type TimeRange = rt.TypeOf; diff --git a/x-pack/plugins/infra/common/http_api/snapshot_api.ts b/x-pack/plugins/infra/common/http_api/snapshot_api.ts new file mode 100644 index 0000000000000..4ee0c9e23b68f --- /dev/null +++ b/x-pack/plugins/infra/common/http_api/snapshot_api.ts @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import * as rt from 'io-ts'; +import { SnapshotMetricTypeRT, ItemTypeRT } from '../inventory_models/types'; + +export const SnapshotNodePathRT = rt.intersection([ + rt.type({ + value: rt.string, + label: rt.string, + }), + rt.partial({ + ip: rt.union([rt.string, rt.null]), + }), +]); + +const SnapshotNodeMetricOptionalRT = rt.partial({ + value: rt.union([rt.number, rt.null]), + average: rt.union([rt.number, rt.null]), + max: rt.union([rt.number, rt.null]), +}); + +const SnapshotNodeMetricRequiredRT = rt.type({ + name: SnapshotMetricTypeRT, +}); + +export const SnapshotNodeRT = rt.type({ + metric: rt.intersection([SnapshotNodeMetricRequiredRT, SnapshotNodeMetricOptionalRT]), + path: rt.array(SnapshotNodePathRT), +}); + +export const SnapshotNodeResponseRT = rt.type({ + nodes: rt.array(SnapshotNodeRT), + interval: rt.string, +}); + +export const InfraTimerangeInputRT = rt.type({ + interval: rt.string, + to: rt.number, + from: rt.number, +}); + +export const SnapshotRequestRT = rt.intersection([ + rt.type({ + timerange: InfraTimerangeInputRT, + metric: rt.type({ + type: SnapshotMetricTypeRT, + }), + groupBy: rt.array( + rt.partial({ + label: rt.union([rt.string, rt.null]), + field: rt.union([rt.string, rt.null]), + }) + ), + nodeType: ItemTypeRT, + sourceId: rt.string, + }), + rt.partial({ + accountId: rt.string, + region: rt.string, + filterQuery: rt.union([rt.string, rt.null]), + }), +]); + +export type SnapshotRequest = rt.TypeOf; +export type SnapshotNode = rt.TypeOf; +export type SnapshotNodeResponse = rt.TypeOf; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_ec2/index.ts b/x-pack/plugins/infra/common/inventory_models/aws_ec2/index.ts new file mode 100644 index 0000000000000..ccfd8cd9851eb --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_ec2/index.ts @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { i18n } from '@kbn/i18n'; +import { metrics } from './metrics'; +import { InventoryModel } from '../types'; + +export const awsEC2: InventoryModel = { + id: 'awsEC2', + displayName: i18n.translate('xpack.infra.inventoryModels.awsEC2.displayName', { + defaultMessage: 'EC2 Instances', + }), + requiredModule: 'aws', + crosslinkSupport: { + details: true, + logs: true, + apm: true, + uptime: true, + }, + metrics, + fields: { + id: 'cloud.instance.id', + name: 'cloud.instance.name', + ip: 'aws.ec2.instance.public.ip', + }, + requiredMetrics: ['awsEC2CpuUtilization', 'awsEC2NetworkTraffic', 'awsEC2DiskIOBytes'], +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_ec2/layout.tsx b/x-pack/plugins/infra/common/inventory_models/aws_ec2/layout.tsx new file mode 100644 index 0000000000000..a3074b78f9f3b --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_ec2/layout.tsx @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import React from 'react'; +import { i18n } from '@kbn/i18n'; +import { LayoutPropsWithTheme } from '../../../public/pages/metrics/types'; +import { Section } from '../../../public/pages/metrics/components/section'; +import { SubSection } from '../../../public/pages/metrics/components/sub_section'; +import { LayoutContent } from '../../../public/pages/metrics/components/layout_content'; +import { ChartSectionVis } from '../../../public/pages/metrics/components/chart_section_vis'; +import { withTheme } from '../../../../../common/eui_styled_components'; +import { MetadataDetails } from '../../../public/pages/metrics/components/metadata_details'; + +export const Layout = withTheme(({ metrics, theme }: LayoutPropsWithTheme) => ( + + + +
+ + + + + + + + + +
+
+
+)); diff --git a/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/index.ts b/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/index.ts new file mode 100644 index 0000000000000..18b7cca2048a5 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/index.ts @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { cpu } from './snapshot/cpu'; +import { rx } from './snapshot/rx'; +import { tx } from './snapshot/tx'; +import { diskIOReadBytes } from './snapshot/disk_io_read_bytes'; +import { diskIOWriteBytes } from './snapshot/disk_io_write_bytes'; + +import { awsEC2CpuUtilization } from './tsvb/aws_ec2_cpu_utilization'; +import { awsEC2NetworkTraffic } from './tsvb/aws_ec2_network_traffic'; +import { awsEC2DiskIOBytes } from './tsvb/aws_ec2_diskio_bytes'; + +import { InventoryMetrics } from '../../types'; + +export const metrics: InventoryMetrics = { + tsvb: { + awsEC2CpuUtilization, + awsEC2NetworkTraffic, + awsEC2DiskIOBytes, + }, + snapshot: { cpu, rx, tx, diskIOReadBytes, diskIOWriteBytes }, + defaultSnapshot: 'cpu', + defaultTimeRangeInSeconds: 14400, // 4 hours +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/snapshot/cpu.ts b/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/snapshot/cpu.ts new file mode 100644 index 0000000000000..483d9de784919 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/snapshot/cpu.ts @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const cpu: SnapshotModel = { + cpu_avg: { + avg: { + field: 'aws.ec2.cpu.total.pct', + }, + }, + cpu: { + bucket_script: { + buckets_path: { + cpu: 'cpu_avg', + }, + script: { + source: 'params.cpu / 100', + lang: 'painless', + }, + gap_policy: 'skip', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/snapshot/disk_io_read_bytes.ts b/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/snapshot/disk_io_read_bytes.ts new file mode 100644 index 0000000000000..48e4a9eb59fad --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/snapshot/disk_io_read_bytes.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const diskIOReadBytes: SnapshotModel = { + diskIOReadBytes: { + avg: { + field: 'aws.ec2.diskio.read.bytes_per_sec', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/snapshot/disk_io_write_bytes.ts b/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/snapshot/disk_io_write_bytes.ts new file mode 100644 index 0000000000000..deadaa8c4a776 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/snapshot/disk_io_write_bytes.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const diskIOWriteBytes: SnapshotModel = { + diskIOWriteBytes: { + avg: { + field: 'aws.ec2.diskio.write.bytes_per_sec', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/snapshot/rx.ts b/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/snapshot/rx.ts new file mode 100644 index 0000000000000..2b857ce9b338a --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/snapshot/rx.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const rx: SnapshotModel = { + rx: { + avg: { + field: 'aws.ec2.network.in.bytes_per_sec', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/snapshot/tx.ts b/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/snapshot/tx.ts new file mode 100644 index 0000000000000..63c9da8ea1888 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/snapshot/tx.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const tx: SnapshotModel = { + tx: { + avg: { + field: 'aws.ec2.network.in.bytes_per_sec', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/tsvb/aws_ec2_cpu_utilization.ts b/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/tsvb/aws_ec2_cpu_utilization.ts new file mode 100644 index 0000000000000..a7a06ef1cfc1d --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/tsvb/aws_ec2_cpu_utilization.ts @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { createTSVBModel } from '../../../create_tsvb_model'; + +export const awsEC2CpuUtilization = createTSVBModel( + 'awsEC2CpuUtilization', + ['aws.ec2'], + [ + { + id: 'total', + split_mode: 'everything', + metrics: [ + { + field: 'aws.ec2.cpu.total.pct', + id: 'avg-cpu', + type: 'avg', + }, + { + id: 'convert-to-percent', + script: 'params.avg / 100', + type: 'calculation', + variables: [ + { + field: 'avg-cpu', + id: 'var-avg', + name: 'avg', + }, + ], + }, + ], + }, + ] +); diff --git a/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/tsvb/aws_ec2_diskio_bytes.ts b/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/tsvb/aws_ec2_diskio_bytes.ts new file mode 100644 index 0000000000000..35d165936211a --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/tsvb/aws_ec2_diskio_bytes.ts @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { createTSVBModel } from '../../../create_tsvb_model'; +export const awsEC2DiskIOBytes = createTSVBModel( + 'awsEC2DiskIOBytes', + ['aws.ec2'], + [ + { + id: 'write', + split_mode: 'everything', + metrics: [ + { + field: 'aws.ec2.diskio.write.bytes_per_sec', + id: 'avg-write', + type: 'avg', + }, + ], + }, + { + id: 'read', + split_mode: 'everything', + metrics: [ + { + field: 'aws.ec2.diskio.read.bytes_per_sec', + id: 'avg-read', + type: 'avg', + }, + { + id: 'calculation-rate', + type: 'calculation', + variables: [{ id: 'rate-var', name: 'rate', field: 'avg-read' }], + script: 'params.rate * -1', + }, + ], + }, + ] +); diff --git a/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/tsvb/aws_ec2_network_traffic.ts b/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/tsvb/aws_ec2_network_traffic.ts new file mode 100644 index 0000000000000..ea4b41d0bcd68 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_ec2/metrics/tsvb/aws_ec2_network_traffic.ts @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { createTSVBModel } from '../../../create_tsvb_model'; +export const awsEC2NetworkTraffic = createTSVBModel( + 'awsEC2NetworkTraffic', + ['aws.ec2'], + [ + { + id: 'tx', + split_mode: 'everything', + metrics: [ + { + field: 'aws.ec2.network.out.bytes_per_sec', + id: 'avg-tx', + type: 'avg', + }, + ], + }, + { + id: 'rx', + split_mode: 'everything', + metrics: [ + { + field: 'aws.ec2.network.in.bytes_per_sec', + id: 'avg-rx', + type: 'avg', + }, + { + id: 'calculation-rate', + type: 'calculation', + variables: [{ id: 'rate-var', name: 'rate', field: 'avg-rx' }], + script: 'params.rate * -1', + }, + ], + }, + ] +); diff --git a/x-pack/plugins/infra/common/inventory_models/aws_ec2/toolbar_items.tsx b/x-pack/plugins/infra/common/inventory_models/aws_ec2/toolbar_items.tsx new file mode 100644 index 0000000000000..490b5c552dcc3 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_ec2/toolbar_items.tsx @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import React from 'react'; +import { ToolbarProps } from '../../../public/components/inventory/toolbars/toolbar'; +import { MetricsAndGroupByToolbarItems } from '../shared/compontents/metrics_and_groupby_toolbar_items'; +import { InfraSnapshotMetricType } from '../../graphql/types'; +import { CloudToolbarItems } from '../shared/compontents/cloud_toolbar_items'; + +export const AwsEC2ToolbarItems = (props: ToolbarProps) => { + const metricTypes = [ + InfraSnapshotMetricType.cpu, + InfraSnapshotMetricType.rx, + InfraSnapshotMetricType.tx, + InfraSnapshotMetricType.diskIOReadBytes, + InfraSnapshotMetricType.diskIOWriteBytes, + ]; + const groupByFields = [ + 'cloud.availability_zone', + 'cloud.machine.type', + 'aws.ec2.instance.image.id', + 'aws.ec2.instance.state.name', + ]; + return ( + <> + + + + ); +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_rds/index.ts b/x-pack/plugins/infra/common/inventory_models/aws_rds/index.ts new file mode 100644 index 0000000000000..f1182a942ff06 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_rds/index.ts @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { i18n } from '@kbn/i18n'; +import { metrics } from './metrics'; +import { InventoryModel } from '../types'; + +export const awsRDS: InventoryModel = { + id: 'awsRDS', + displayName: i18n.translate('xpack.infra.inventoryModels.awsRDS.displayName', { + defaultMessage: 'RDS Databases', + }), + requiredModule: 'aws', + crosslinkSupport: { + details: true, + logs: true, + apm: false, + uptime: false, + }, + metrics, + fields: { + id: 'aws.rds.db_instance.arn', + name: 'aws.rds.db_instance.identifier', + }, + requiredMetrics: [ + 'awsRDSCpuTotal', + 'awsRDSConnections', + 'awsRDSQueriesExecuted', + 'awsRDSActiveTransactions', + 'awsRDSLatency', + ], +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_rds/layout.tsx b/x-pack/plugins/infra/common/inventory_models/aws_rds/layout.tsx new file mode 100644 index 0000000000000..debb569fcd5bb --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_rds/layout.tsx @@ -0,0 +1,183 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import React from 'react'; +import { i18n } from '@kbn/i18n'; +import { LayoutPropsWithTheme } from '../../../public/pages/metrics/types'; +import { Section } from '../../../public/pages/metrics/components/section'; +import { SubSection } from '../../../public/pages/metrics/components/sub_section'; +import { ChartSectionVis } from '../../../public/pages/metrics/components/chart_section_vis'; +import { withTheme } from '../../../../../common/eui_styled_components'; +import { LayoutContent } from '../../../public/pages/metrics/components/layout_content'; + +export const Layout = withTheme(({ metrics, theme }: LayoutPropsWithTheme) => ( + + +
+ + + + + + + + + + + + + + + +
+
+
+)); diff --git a/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/index.ts b/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/index.ts new file mode 100644 index 0000000000000..eaded5d8df223 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/index.ts @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { InventoryMetrics } from '../../types'; + +import { cpu } from './snapshot/cpu'; +import { rdsLatency } from './snapshot/rds_latency'; +import { rdsConnections } from './snapshot/rds_connections'; +import { rdsQueriesExecuted } from './snapshot/rds_queries_executed'; +import { rdsActiveTransactions } from './snapshot/rds_active_transactions'; + +import { awsRDSLatency } from './tsvb/aws_rds_latency'; +import { awsRDSConnections } from './tsvb/aws_rds_connections'; +import { awsRDSCpuTotal } from './tsvb/aws_rds_cpu_total'; +import { awsRDSQueriesExecuted } from './tsvb/aws_rds_queries_executed'; +import { awsRDSActiveTransactions } from './tsvb/aws_rds_active_transactions'; + +export const metrics: InventoryMetrics = { + tsvb: { + awsRDSLatency, + awsRDSConnections, + awsRDSCpuTotal, + awsRDSQueriesExecuted, + awsRDSActiveTransactions, + }, + snapshot: { + cpu, + rdsLatency, + rdsConnections, + rdsQueriesExecuted, + rdsActiveTransactions, + }, + defaultSnapshot: 'cpu', + defaultTimeRangeInSeconds: 14400, // 4 hours +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/snapshot/cpu.ts b/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/snapshot/cpu.ts new file mode 100644 index 0000000000000..e277b3b11958b --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/snapshot/cpu.ts @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const cpu: SnapshotModel = { + cpu_avg: { + avg: { + field: 'aws.rds.cpu.total.pct', + }, + }, + cpu: { + bucket_script: { + buckets_path: { + cpu: 'cpu_avg', + }, + script: { + source: 'params.cpu / 100', + lang: 'painless', + }, + gap_policy: 'skip', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/snapshot/rds_active_transactions.ts b/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/snapshot/rds_active_transactions.ts new file mode 100644 index 0000000000000..be3dba100ba29 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/snapshot/rds_active_transactions.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const rdsActiveTransactions: SnapshotModel = { + rdsActiveTransactions: { + avg: { + field: 'aws.rds.transactions.active', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/snapshot/rds_connections.ts b/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/snapshot/rds_connections.ts new file mode 100644 index 0000000000000..c7855d5548eea --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/snapshot/rds_connections.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const rdsConnections: SnapshotModel = { + rdsConnections: { + avg: { + field: 'aws.rds.database_connections', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/snapshot/rds_latency.ts b/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/snapshot/rds_latency.ts new file mode 100644 index 0000000000000..2997b54d2f92e --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/snapshot/rds_latency.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const rdsLatency: SnapshotModel = { + rdsLatency: { + avg: { + field: 'aws.rds.latency.dml', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/snapshot/rds_queries_executed.ts b/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/snapshot/rds_queries_executed.ts new file mode 100644 index 0000000000000..18e6538fb1e1e --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/snapshot/rds_queries_executed.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const rdsQueriesExecuted: SnapshotModel = { + rdsQueriesExecuted: { + avg: { + field: 'aws.rds.queries', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/tsvb/aws_rds_active_transactions.ts b/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/tsvb/aws_rds_active_transactions.ts new file mode 100644 index 0000000000000..026cdeac40c36 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/tsvb/aws_rds_active_transactions.ts @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { createTSVBModel } from '../../../create_tsvb_model'; + +export const awsRDSActiveTransactions = createTSVBModel( + 'awsRDSActiveTransactions', + ['aws.rds'], + [ + { + id: 'active', + split_mode: 'everything', + metrics: [ + { + field: 'aws.rds.transactions.active', + id: 'avg', + type: 'avg', + }, + ], + }, + { + id: 'blocked', + split_mode: 'everything', + metrics: [ + { + field: 'aws.rds.transactions.blocked', + id: 'avg', + type: 'avg', + }, + ], + }, + ] +); diff --git a/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/tsvb/aws_rds_connections.ts b/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/tsvb/aws_rds_connections.ts new file mode 100644 index 0000000000000..145cc758e4a5b --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/tsvb/aws_rds_connections.ts @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { createTSVBModel } from '../../../create_tsvb_model'; + +export const awsRDSConnections = createTSVBModel( + 'awsRDSConnections', + ['aws.rds'], + [ + { + id: 'connections', + split_mode: 'everything', + metrics: [ + { + field: 'aws.rds.database_connections', + id: 'avg-conns', + type: 'avg', + }, + ], + }, + ] +); diff --git a/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/tsvb/aws_rds_cpu_total.ts b/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/tsvb/aws_rds_cpu_total.ts new file mode 100644 index 0000000000000..9a8eefc859bb0 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/tsvb/aws_rds_cpu_total.ts @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { createTSVBModel } from '../../../create_tsvb_model'; + +export const awsRDSCpuTotal = createTSVBModel( + 'awsRDSCpuTotal', + ['aws.rds'], + [ + { + id: 'cpu', + split_mode: 'everything', + metrics: [ + { + field: 'aws.rds.cpu.total.pct', + id: 'avg-cpu', + type: 'avg', + }, + { + id: 'convert-to-percent', + script: 'params.avg / 100', + type: 'calculation', + variables: [ + { + field: 'avg-cpu', + id: 'var-avg', + name: 'avg', + }, + ], + }, + ], + }, + ] +); diff --git a/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/tsvb/aws_rds_latency.ts b/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/tsvb/aws_rds_latency.ts new file mode 100644 index 0000000000000..80dffeeb717c6 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/tsvb/aws_rds_latency.ts @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import { createTSVBModel } from '../../../create_tsvb_model'; + +export const awsRDSLatency = createTSVBModel( + 'awsRDSLatency', + ['aws.rds'], + [ + { + id: 'read', + split_mode: 'everything', + metrics: [ + { + field: 'aws.rds.latency.read', + id: 'avg', + type: 'avg', + }, + ], + }, + { + id: 'write', + split_mode: 'everything', + metrics: [ + { + field: 'aws.rds.latency.write', + id: 'avg', + type: 'avg', + }, + ], + }, + { + id: 'insert', + split_mode: 'everything', + metrics: [ + { + field: 'aws.rds.latency.insert', + id: 'avg', + type: 'avg', + }, + ], + }, + { + id: 'update', + split_mode: 'everything', + metrics: [ + { + field: 'aws.rds.latency.update', + id: 'avg', + type: 'avg', + }, + ], + }, + { + id: 'commit', + split_mode: 'everything', + metrics: [ + { + field: 'aws.rds.latency.commit', + id: 'avg', + type: 'avg', + }, + ], + }, + ] +); diff --git a/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/tsvb/aws_rds_queries_executed.ts b/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/tsvb/aws_rds_queries_executed.ts new file mode 100644 index 0000000000000..4dd1a1e89a21a --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_rds/metrics/tsvb/aws_rds_queries_executed.ts @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { createTSVBModel } from '../../../create_tsvb_model'; +export const awsRDSQueriesExecuted = createTSVBModel( + 'awsRDSQueriesExecuted', + ['aws.rds'], + [ + { + id: 'queries', + split_mode: 'everything', + metrics: [ + { + field: 'aws.rds.queries', + id: 'avg-queries', + type: 'avg', + }, + ], + }, + ] +); diff --git a/x-pack/plugins/infra/common/inventory_models/aws_rds/toolbar_items.tsx b/x-pack/plugins/infra/common/inventory_models/aws_rds/toolbar_items.tsx new file mode 100644 index 0000000000000..86ed57e8f4c7f --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_rds/toolbar_items.tsx @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import React from 'react'; +import { ToolbarProps } from '../../../public/components/inventory/toolbars/toolbar'; +import { InfraSnapshotMetricType } from '../../../public/graphql/types'; +import { MetricsAndGroupByToolbarItems } from '../shared/compontents/metrics_and_groupby_toolbar_items'; +import { CloudToolbarItems } from '../shared/compontents/cloud_toolbar_items'; + +export const AwsRDSToolbarItems = (props: ToolbarProps) => { + const metricTypes = [ + InfraSnapshotMetricType.cpu, + InfraSnapshotMetricType.rdsConnections, + InfraSnapshotMetricType.rdsQueriesExecuted, + InfraSnapshotMetricType.rdsActiveTransactions, + InfraSnapshotMetricType.rdsLatency, + ]; + const groupByFields = [ + 'cloud.availability_zone', + 'aws.rds.db_instance.class', + 'aws.rds.db_instance.status', + ]; + return ( + <> + + + + ); +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_s3/index.ts b/x-pack/plugins/infra/common/inventory_models/aws_s3/index.ts new file mode 100644 index 0000000000000..3bdf319f49c5f --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_s3/index.ts @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { i18n } from '@kbn/i18n'; +import { metrics } from './metrics'; +import { InventoryModel } from '../types'; + +export const awsS3: InventoryModel = { + id: 'awsS3', + displayName: i18n.translate('xpack.infra.inventoryModels.awsS3.displayName', { + defaultMessage: 'S3 Buckets', + }), + requiredModule: 'aws', + crosslinkSupport: { + details: true, + logs: true, + apm: false, + uptime: false, + }, + metrics, + fields: { + id: 'aws.s3.bucket.name', + name: 'aws.s3.bucket.name', + }, + requiredMetrics: [ + 'awsS3BucketSize', + 'awsS3NumberOfObjects', + 'awsS3TotalRequests', + 'awsS3DownloadBytes', + 'awsS3UploadBytes', + ], +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_s3/layout.tsx b/x-pack/plugins/infra/common/inventory_models/aws_s3/layout.tsx new file mode 100644 index 0000000000000..955960f5baeda --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_s3/layout.tsx @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import React from 'react'; +import { i18n } from '@kbn/i18n'; +import { LayoutPropsWithTheme } from '../../../public/pages/metrics/types'; +import { Section } from '../../../public/pages/metrics/components/section'; +import { SubSection } from '../../../public/pages/metrics/components/sub_section'; +import { ChartSectionVis } from '../../../public/pages/metrics/components/chart_section_vis'; +import { withTheme } from '../../../../../common/eui_styled_components'; +import { LayoutContent } from '../../../public/pages/metrics/components/layout_content'; + +export const Layout = withTheme(({ metrics, theme }: LayoutPropsWithTheme) => ( + + +
+ + + + + + + + + + + + + + + +
+
+
+)); diff --git a/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/index.ts b/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/index.ts new file mode 100644 index 0000000000000..5aa974c16feec --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/index.ts @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { InventoryMetrics } from '../../types'; + +import { awsS3BucketSize } from './tsvb/aws_s3_bucket_size'; +import { awsS3TotalRequests } from './tsvb/aws_s3_total_requests'; +import { awsS3NumberOfObjects } from './tsvb/aws_s3_number_of_objects'; +import { awsS3DownloadBytes } from './tsvb/aws_s3_download_bytes'; +import { awsS3UploadBytes } from './tsvb/aws_s3_upload_bytes'; + +import { s3BucketSize } from './snapshot/s3_bucket_size'; +import { s3TotalRequests } from './snapshot/s3_total_requests'; +import { s3NumberOfObjects } from './snapshot/s3_number_of_objects'; +import { s3DownloadBytes } from './snapshot/s3_download_bytes'; +import { s3UploadBytes } from './snapshot/s3_upload_bytes'; + +export const metrics: InventoryMetrics = { + tsvb: { + awsS3BucketSize, + awsS3TotalRequests, + awsS3NumberOfObjects, + awsS3DownloadBytes, + awsS3UploadBytes, + }, + snapshot: { + s3BucketSize, + s3NumberOfObjects, + s3TotalRequests, + s3UploadBytes, + s3DownloadBytes, + }, + defaultSnapshot: 's3BucketSize', + defaultTimeRangeInSeconds: 86400 * 7, // 7 days +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/snapshot/s3_bucket_size.ts b/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/snapshot/s3_bucket_size.ts new file mode 100644 index 0000000000000..a99753a39c97c --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/snapshot/s3_bucket_size.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const s3BucketSize: SnapshotModel = { + s3BucketSize: { + max: { + field: 'aws.s3_daily_storage.bucket.size.bytes', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/snapshot/s3_download_bytes.ts b/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/snapshot/s3_download_bytes.ts new file mode 100644 index 0000000000000..a0b23dadee37a --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/snapshot/s3_download_bytes.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const s3DownloadBytes: SnapshotModel = { + s3DownloadBytes: { + max: { + field: 'aws.s3_request.downloaded.bytes', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/snapshot/s3_number_of_objects.ts b/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/snapshot/s3_number_of_objects.ts new file mode 100644 index 0000000000000..29162a59db47a --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/snapshot/s3_number_of_objects.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const s3NumberOfObjects: SnapshotModel = { + s3NumberOfObjects: { + max: { + field: 'aws.s3_daily_storage.number_of_objects', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/snapshot/s3_total_requests.ts b/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/snapshot/s3_total_requests.ts new file mode 100644 index 0000000000000..bc57c6eb38234 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/snapshot/s3_total_requests.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const s3TotalRequests: SnapshotModel = { + s3TotalRequests: { + max: { + field: 'aws.s3_request.requests.total', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/snapshot/s3_upload_bytes.ts b/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/snapshot/s3_upload_bytes.ts new file mode 100644 index 0000000000000..977d73254c3cd --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/snapshot/s3_upload_bytes.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const s3UploadBytes: SnapshotModel = { + s3UploadBytes: { + max: { + field: 'aws.s3_request.uploaded.bytes', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/tsvb/aws_s3_bucket_size.ts b/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/tsvb/aws_s3_bucket_size.ts new file mode 100644 index 0000000000000..216f98b9e16b4 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/tsvb/aws_s3_bucket_size.ts @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import { createTSVBModel } from '../../../create_tsvb_model'; + +export const awsS3BucketSize = createTSVBModel( + 'awsS3BucketSize', + ['aws.s3_daily_storage'], + [ + { + id: 'bytes', + split_mode: 'everything', + metrics: [ + { + field: 'aws.s3_daily_storage.bucket.size.bytes', + id: 'max-bytes', + type: 'max', + }, + ], + }, + ], + '>=86400s', + false +); diff --git a/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/tsvb/aws_s3_download_bytes.ts b/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/tsvb/aws_s3_download_bytes.ts new file mode 100644 index 0000000000000..15eb3130a5e23 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/tsvb/aws_s3_download_bytes.ts @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import { createTSVBModel } from '../../../create_tsvb_model'; + +export const awsS3DownloadBytes = createTSVBModel( + 'awsS3DownloadBytes', + ['aws.s3_request'], + [ + { + id: 'bytes', + split_mode: 'everything', + metrics: [ + { + field: 'aws.s3_request.downloaded.bytes', + id: 'max-bytes', + type: 'max', + }, + ], + }, + ], + '>=300s' +); diff --git a/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/tsvb/aws_s3_number_of_objects.ts b/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/tsvb/aws_s3_number_of_objects.ts new file mode 100644 index 0000000000000..c108735bc0efd --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/tsvb/aws_s3_number_of_objects.ts @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import { createTSVBModel } from '../../../create_tsvb_model'; + +export const awsS3NumberOfObjects = createTSVBModel( + 'awsS3NumberOfObjects', + ['aws.s3_daily_storage'], + [ + { + id: 'objects', + split_mode: 'everything', + metrics: [ + { + field: 'aws.s3_daily_storage.number_of_objects', + id: 'max-size', + type: 'max', + }, + ], + }, + ], + '>=86400s', + false +); diff --git a/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/tsvb/aws_s3_total_requests.ts b/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/tsvb/aws_s3_total_requests.ts new file mode 100644 index 0000000000000..311067fd96b47 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/tsvb/aws_s3_total_requests.ts @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { createTSVBModel } from '../../../create_tsvb_model'; + +export const awsS3TotalRequests = createTSVBModel( + 'awsS3TotalRequests', + ['aws.s3_request'], + [ + { + id: 'total', + split_mode: 'everything', + metrics: [ + { + field: 'aws.s3_request.requests.total', + id: 'max-size', + type: 'max', + }, + ], + }, + ], + '>=300s' +); diff --git a/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/tsvb/aws_s3_upload_bytes.ts b/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/tsvb/aws_s3_upload_bytes.ts new file mode 100644 index 0000000000000..ab66b47cfa781 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_s3/metrics/tsvb/aws_s3_upload_bytes.ts @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { createTSVBModel } from '../../../create_tsvb_model'; + +export const awsS3UploadBytes = createTSVBModel( + 'awsS3UploadBytes', + ['aws.s3_request'], + [ + { + id: 'bytes', + split_mode: 'everything', + metrics: [ + { + field: 'aws.s3_request.uploaded.bytes', + id: 'max-bytes', + type: 'max', + }, + ], + }, + ], + '>=300s' +); diff --git a/x-pack/plugins/infra/common/inventory_models/aws_s3/toolbar_items.tsx b/x-pack/plugins/infra/common/inventory_models/aws_s3/toolbar_items.tsx new file mode 100644 index 0000000000000..276b6b83eb43d --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_s3/toolbar_items.tsx @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import React from 'react'; +import { ToolbarProps } from '../../../public/components/inventory/toolbars/toolbar'; +import { InfraSnapshotMetricType } from '../../../public/graphql/types'; +import { MetricsAndGroupByToolbarItems } from '../shared/compontents/metrics_and_groupby_toolbar_items'; +import { CloudToolbarItems } from '../shared/compontents/cloud_toolbar_items'; + +export const AwsS3ToolbarItems = (props: ToolbarProps) => { + const metricTypes = [ + InfraSnapshotMetricType.s3BucketSize, + InfraSnapshotMetricType.s3NumberOfObjects, + InfraSnapshotMetricType.s3TotalRequests, + InfraSnapshotMetricType.s3DownloadBytes, + InfraSnapshotMetricType.s3UploadBytes, + ]; + const groupByFields = ['cloud.region']; + return ( + <> + + + + ); +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_sqs/index.ts b/x-pack/plugins/infra/common/inventory_models/aws_sqs/index.ts new file mode 100644 index 0000000000000..1733e995a824f --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_sqs/index.ts @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { i18n } from '@kbn/i18n'; +import { metrics } from './metrics'; +import { InventoryModel } from '../types'; + +export const awsSQS: InventoryModel = { + id: 'awsSQS', + displayName: i18n.translate('xpack.infra.inventoryModels.awsSQS.displayName', { + defaultMessage: 'SQS Queues', + }), + requiredModule: 'aws', + crosslinkSupport: { + details: true, + logs: true, + apm: false, + uptime: false, + }, + metrics, + fields: { + id: 'aws.sqs.queue.name', + name: 'aws.sqs.queue.name', + }, + requiredMetrics: [ + 'awsSQSMessagesVisible', + 'awsSQSMessagesDelayed', + 'awsSQSMessagesSent', + 'awsSQSMessagesEmpty', + 'awsSQSOldestMessage', + ], +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_sqs/layout.tsx b/x-pack/plugins/infra/common/inventory_models/aws_sqs/layout.tsx new file mode 100644 index 0000000000000..5d460c971ec3b --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_sqs/layout.tsx @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import React from 'react'; +import { i18n } from '@kbn/i18n'; +import { LayoutPropsWithTheme } from '../../../public/pages/metrics/types'; +import { Section } from '../../../public/pages/metrics/components/section'; +import { SubSection } from '../../../public/pages/metrics/components/sub_section'; +import { ChartSectionVis } from '../../../public/pages/metrics/components/chart_section_vis'; +import { withTheme } from '../../../../../common/eui_styled_components'; +import { LayoutContent } from '../../../public/pages/metrics/components/layout_content'; + +export const Layout = withTheme(({ metrics, theme }: LayoutPropsWithTheme) => ( + + +
+ + + + + + + + + + + + + + + +
+
+
+)); diff --git a/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/index.ts b/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/index.ts new file mode 100644 index 0000000000000..7bc593cc22035 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/index.ts @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { InventoryMetrics } from '../../types'; + +import { sqsMessagesVisible } from './snapshot/sqs_messages_visible'; +import { sqsMessagesDelayed } from './snapshot/sqs_messages_delayed'; +import { sqsMessagesEmpty } from './snapshot/sqs_messages_empty'; +import { sqsMessagesSent } from './snapshot/sqs_messages_sent'; +import { sqsOldestMessage } from './snapshot/sqs_oldest_message'; + +import { awsSQSMessagesVisible } from './tsvb/aws_sqs_messages_visible'; +import { awsSQSMessagesDelayed } from './tsvb/aws_sqs_messages_delayed'; +import { awsSQSMessagesSent } from './tsvb/aws_sqs_messages_sent'; +import { awsSQSMessagesEmpty } from './tsvb/aws_sqs_messages_empty'; +import { awsSQSOldestMessage } from './tsvb/aws_sqs_oldest_message'; + +export const metrics: InventoryMetrics = { + tsvb: { + awsSQSMessagesVisible, + awsSQSMessagesDelayed, + awsSQSMessagesSent, + awsSQSMessagesEmpty, + awsSQSOldestMessage, + }, + snapshot: { + sqsMessagesVisible, + sqsMessagesDelayed, + sqsMessagesEmpty, + sqsMessagesSent, + sqsOldestMessage, + }, + defaultSnapshot: 'sqsMessagesVisible', + defaultTimeRangeInSeconds: 14400, // 4 hours +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/snapshot/sqs_messages_delayed.ts b/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/snapshot/sqs_messages_delayed.ts new file mode 100644 index 0000000000000..679f86671725e --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/snapshot/sqs_messages_delayed.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const sqsMessagesDelayed: SnapshotModel = { + sqsMessagesDelayed: { + max: { + field: 'aws.sqs.messages.delayed', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/snapshot/sqs_messages_empty.ts b/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/snapshot/sqs_messages_empty.ts new file mode 100644 index 0000000000000..d80a3f3451e1d --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/snapshot/sqs_messages_empty.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const sqsMessagesEmpty: SnapshotModel = { + sqsMessagesEmpty: { + max: { + field: 'aws.sqs.messages.not_visible', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/snapshot/sqs_messages_sent.ts b/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/snapshot/sqs_messages_sent.ts new file mode 100644 index 0000000000000..3d6934bf3da85 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/snapshot/sqs_messages_sent.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const sqsMessagesSent: SnapshotModel = { + sqsMessagesSent: { + max: { + field: 'aws.sqs.messages.sent', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/snapshot/sqs_messages_visible.ts b/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/snapshot/sqs_messages_visible.ts new file mode 100644 index 0000000000000..1a78c50cd7949 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/snapshot/sqs_messages_visible.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const sqsMessagesVisible: SnapshotModel = { + sqsMessagesVisible: { + avg: { + field: 'aws.sqs.messages.visible', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/snapshot/sqs_oldest_message.ts b/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/snapshot/sqs_oldest_message.ts new file mode 100644 index 0000000000000..ae780069c8ca1 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/snapshot/sqs_oldest_message.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const sqsOldestMessage: SnapshotModel = { + sqsOldestMessage: { + max: { + field: 'aws.sqs.oldest_message_age.sec', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/tsvb/aws_sqs_messages_delayed.ts b/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/tsvb/aws_sqs_messages_delayed.ts new file mode 100644 index 0000000000000..469b9ddd33953 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/tsvb/aws_sqs_messages_delayed.ts @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { createTSVBModel } from '../../../create_tsvb_model'; + +export const awsSQSMessagesDelayed = createTSVBModel( + 'awsSQSMessagesDelayed', + ['aws.sqs'], + [ + { + id: 'delayed', + split_mode: 'everything', + metrics: [ + { + field: 'aws.sqs.messages.delayed', + id: 'avg-delayed', + type: 'avg', + }, + ], + }, + ], + '>=300s' +); diff --git a/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/tsvb/aws_sqs_messages_empty.ts b/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/tsvb/aws_sqs_messages_empty.ts new file mode 100644 index 0000000000000..54c9e503a8c8c --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/tsvb/aws_sqs_messages_empty.ts @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { createTSVBModel } from '../../../create_tsvb_model'; + +export const awsSQSMessagesEmpty = createTSVBModel( + 'awsSQSMessagesEmpty', + ['aws.sqs'], + [ + { + id: 'empty', + split_mode: 'everything', + metrics: [ + { + field: 'aws.sqs.messages.not_visible', + id: 'avg-empty', + type: 'avg', + }, + ], + }, + ], + '>=300s' +); diff --git a/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/tsvb/aws_sqs_messages_sent.ts b/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/tsvb/aws_sqs_messages_sent.ts new file mode 100644 index 0000000000000..98389ef22fbe8 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/tsvb/aws_sqs_messages_sent.ts @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { createTSVBModel } from '../../../create_tsvb_model'; + +export const awsSQSMessagesSent = createTSVBModel( + 'awsSQSMessagesSent', + ['aws.sqs'], + [ + { + id: 'sent', + split_mode: 'everything', + metrics: [ + { + field: 'aws.sqs.messages.sent', + id: 'avg-sent', + type: 'avg', + }, + ], + }, + ], + '>=300s' +); diff --git a/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/tsvb/aws_sqs_messages_visible.ts b/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/tsvb/aws_sqs_messages_visible.ts new file mode 100644 index 0000000000000..c96ab07e4ae75 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/tsvb/aws_sqs_messages_visible.ts @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { createTSVBModel } from '../../../create_tsvb_model'; + +export const awsSQSMessagesVisible = createTSVBModel( + 'awsSQSMessagesVisible', + ['aws.sqs'], + [ + { + id: 'visible', + split_mode: 'everything', + metrics: [ + { + field: 'aws.sqs.messages.visible', + id: 'avg-visible', + type: 'avg', + }, + ], + }, + ], + '>=300s' +); diff --git a/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/tsvb/aws_sqs_oldest_message.ts b/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/tsvb/aws_sqs_oldest_message.ts new file mode 100644 index 0000000000000..812906386fb67 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_sqs/metrics/tsvb/aws_sqs_oldest_message.ts @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { createTSVBModel } from '../../../create_tsvb_model'; + +export const awsSQSOldestMessage = createTSVBModel( + 'awsSQSOldestMessage', + ['aws.sqs'], + [ + { + id: 'oldest', + split_mode: 'everything', + metrics: [ + { + field: 'aws.sqs.oldest_message_age.sec', + id: 'max-oldest', + type: 'max', + }, + ], + }, + ], + '>=300s' +); diff --git a/x-pack/plugins/infra/common/inventory_models/aws_sqs/toolbar_items.tsx b/x-pack/plugins/infra/common/inventory_models/aws_sqs/toolbar_items.tsx new file mode 100644 index 0000000000000..67baa22a5e6b0 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/aws_sqs/toolbar_items.tsx @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import React from 'react'; +import { ToolbarProps } from '../../../public/components/inventory/toolbars/toolbar'; +import { MetricsAndGroupByToolbarItems } from '../shared/compontents/metrics_and_groupby_toolbar_items'; +import { InfraSnapshotMetricType } from '../../graphql/types'; +import { CloudToolbarItems } from '../shared/compontents/cloud_toolbar_items'; + +export const AwsSQSToolbarItems = (props: ToolbarProps) => { + const metricTypes = [ + InfraSnapshotMetricType.sqsMessagesVisible, + InfraSnapshotMetricType.sqsMessagesDelayed, + InfraSnapshotMetricType.sqsMessagesSent, + InfraSnapshotMetricType.sqsMessagesEmpty, + InfraSnapshotMetricType.sqsOldestMessage, + ]; + const groupByFields = ['cloud.region']; + return ( + <> + + + + ); +}; diff --git a/x-pack/plugins/infra/common/inventory_models/container/index.ts b/x-pack/plugins/infra/common/inventory_models/container/index.ts new file mode 100644 index 0000000000000..29b3cfe3af180 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/container/index.ts @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { i18n } from '@kbn/i18n'; +import { metrics } from './metrics'; +import { InventoryModel } from '../types'; + +export const container: InventoryModel = { + id: 'container', + displayName: i18n.translate('xpack.infra.inventoryModel.container.displayName', { + defaultMessage: 'Docker Containers', + }), + requiredModule: 'docker', + crosslinkSupport: { + details: true, + logs: true, + apm: true, + uptime: true, + }, + fields: { + id: 'container.id', + name: 'container.name', + ip: 'continaer.ip_address', + }, + metrics, + requiredMetrics: [ + 'containerOverview', + 'containerCpuUsage', + 'containerMemory', + 'containerNetworkTraffic', + 'containerDiskIOBytes', + 'containerDiskIOOps', + ], +}; diff --git a/x-pack/plugins/infra/common/inventory_models/container/layout.tsx b/x-pack/plugins/infra/common/inventory_models/container/layout.tsx new file mode 100644 index 0000000000000..e207687cf8643 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/container/layout.tsx @@ -0,0 +1,230 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import React from 'react'; +import { i18n } from '@kbn/i18n'; +import { LayoutPropsWithTheme } from '../../../public/pages/metrics/types'; +import { Section } from '../../../public/pages/metrics/components/section'; +import { SubSection } from '../../../public/pages/metrics/components/sub_section'; +import { GaugesSectionVis } from '../../../public/pages/metrics/components/gauges_section_vis'; +import { ChartSectionVis } from '../../../public/pages/metrics/components/chart_section_vis'; +import { withTheme } from '../../../../../common/eui_styled_components'; +import { LayoutContent } from '../../../public/pages/metrics/components/layout_content'; +import { MetadataDetails } from '../../../public/pages/metrics/components/metadata_details'; + +export const Layout = withTheme(({ metrics, theme }: LayoutPropsWithTheme) => ( + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+)); diff --git a/x-pack/plugins/infra/common/inventory_models/container/metrics/index.ts b/x-pack/plugins/infra/common/inventory_models/container/metrics/index.ts new file mode 100644 index 0000000000000..73a10cbadb66d --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/container/metrics/index.ts @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { InventoryMetrics } from '../../types'; +import { cpu } from './snapshot/cpu'; +import { memory } from './snapshot/memory'; +import { rx } from './snapshot/rx'; +import { tx } from './snapshot/tx'; + +import { containerOverview } from './tsvb/container_overview'; +import { containerCpuUsage } from './tsvb/container_cpu_usage'; +import { containerCpuKernel } from './tsvb/container_cpu_kernel'; +import { containerDiskIOOps } from './tsvb/container_diskio_ops'; +import { containerDiskIOBytes } from './tsvb/container_disk_io_bytes'; +import { containerMemory } from './tsvb/container_memory'; +import { containerNetworkTraffic } from './tsvb/container_network_traffic'; + +export const metrics: InventoryMetrics = { + tsvb: { + containerOverview, + containerCpuUsage, + containerCpuKernel, + containerDiskIOOps, + containerDiskIOBytes, + containerNetworkTraffic, + containerMemory, + }, + snapshot: { cpu, memory, rx, tx }, + defaultSnapshot: 'cpu', + defaultTimeRangeInSeconds: 3600, // 1 hour +}; diff --git a/x-pack/plugins/infra/common/inventory_models/container/metrics/snapshot/cpu.ts b/x-pack/plugins/infra/common/inventory_models/container/metrics/snapshot/cpu.ts new file mode 100644 index 0000000000000..a6c25ee260cac --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/container/metrics/snapshot/cpu.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const cpu: SnapshotModel = { + cpu: { + avg: { + field: 'docker.cpu.total.pct', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/container/metrics/snapshot/memory.ts b/x-pack/plugins/infra/common/inventory_models/container/metrics/snapshot/memory.ts new file mode 100644 index 0000000000000..30df0ebbaa1d4 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/container/metrics/snapshot/memory.ts @@ -0,0 +1,9 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const memory: SnapshotModel = { memory: { avg: { field: 'docker.memory.usage.pct' } } }; diff --git a/x-pack/plugins/infra/common/inventory_models/container/metrics/snapshot/rx.ts b/x-pack/plugins/infra/common/inventory_models/container/metrics/snapshot/rx.ts new file mode 100644 index 0000000000000..6843f6149c711 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/container/metrics/snapshot/rx.ts @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { networkTrafficWithInterfaces } from '../../../shared/metrics/snapshot/network_traffic_with_interfaces'; +export const rx = networkTrafficWithInterfaces( + 'rx', + 'docker.network.in.bytes', + 'docker.network.interface' +); diff --git a/x-pack/plugins/infra/common/inventory_models/container/metrics/snapshot/tx.ts b/x-pack/plugins/infra/common/inventory_models/container/metrics/snapshot/tx.ts new file mode 100644 index 0000000000000..bccb4e60e9d19 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/container/metrics/snapshot/tx.ts @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { networkTrafficWithInterfaces } from '../../../shared/metrics/snapshot/network_traffic_with_interfaces'; +export const tx = networkTrafficWithInterfaces( + 'tx', + 'docker.network.out.bytes', + 'docker.network.interface' +); diff --git a/x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_cpu_kernel.ts b/x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_cpu_kernel.ts new file mode 100644 index 0000000000000..9e4a6dad429bb --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_cpu_kernel.ts @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const containerCpuKernel: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'containerCpuKernel', + requires: ['docker.cpu'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'kernel', + split_mode: 'everything', + metrics: [ + { + field: 'docker.cpu.kernel.pct', + id: 'avg-cpu-kernel', + type: 'avg', + }, + ], + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_cpu_usage.ts b/x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_cpu_usage.ts new file mode 100644 index 0000000000000..0eb84f4c3aca5 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_cpu_usage.ts @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const containerCpuUsage: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'containerCpuUsage', + requires: ['docker.cpu'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'cpu', + split_mode: 'everything', + metrics: [ + { + field: 'docker.cpu.total.pct', + id: 'avg-cpu-total', + type: 'avg', + }, + ], + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_disk_io_bytes.ts b/x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_disk_io_bytes.ts new file mode 100644 index 0000000000000..273056f376825 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_disk_io_bytes.ts @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const containerDiskIOBytes: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'containerDiskIOBytes', + requires: ['docker.disk'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'read', + split_mode: 'everything', + metrics: [ + { + field: 'docker.diskio.read.bytes', + id: 'max-diskio-read-bytes', + type: 'max', + }, + { + field: 'max-diskio-read-bytes', + id: 'deriv-max-diskio-read-bytes', + type: 'derivative', + unit: '1s', + }, + { + id: 'posonly-deriv-max-diskio-read-bytes', + type: 'calculation', + variables: [{ id: 'var-rate', name: 'rate', field: 'deriv-max-diskio-read-bytes' }], + script: 'params.rate > 0.0 ? params.rate : 0.0', + }, + ], + }, + { + id: 'write', + split_mode: 'everything', + metrics: [ + { + field: 'docker.diskio.write.bytes', + id: 'max-diskio-write-bytes', + type: 'max', + }, + { + field: 'max-diskio-write-bytes', + id: 'deriv-max-diskio-write-bytes', + type: 'derivative', + unit: '1s', + }, + { + id: 'posonly-deriv-max-diskio-write-bytes', + type: 'calculation', + variables: [{ id: 'var-rate', name: 'rate', field: 'deriv-max-diskio-write-bytes' }], + script: 'params.rate > 0.0 ? params.rate : 0.0', + }, + { + id: 'calc-invert-rate', + script: 'params.rate * -1', + type: 'calculation', + variables: [ + { + field: 'posonly-deriv-max-diskio-write-bytes', + id: 'var-rate', + name: 'rate', + }, + ], + }, + ], + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_diskio_ops.ts b/x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_diskio_ops.ts new file mode 100644 index 0000000000000..4067a5cf6c6f4 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_diskio_ops.ts @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const containerDiskIOOps: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'containerDiskIOOps', + requires: ['docker.disk'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'read', + split_mode: 'everything', + metrics: [ + { + field: 'docker.diskio.read.ops', + id: 'max-diskio-read-ops', + type: 'max', + }, + { + field: 'max-diskio-read-ops', + id: 'deriv-max-diskio-read-ops', + type: 'derivative', + unit: '1s', + }, + { + id: 'posonly-deriv-max-diskio-read-ops', + type: 'calculation', + variables: [{ id: 'var-rate', name: 'rate', field: 'deriv-max-diskio-read-ops' }], + script: 'params.rate > 0.0 ? params.rate : 0.0', + }, + ], + }, + { + id: 'write', + split_mode: 'everything', + metrics: [ + { + field: 'docker.diskio.write.ops', + id: 'max-diskio-write-ops', + type: 'max', + }, + { + field: 'max-diskio-write-ops', + id: 'deriv-max-diskio-write-ops', + type: 'derivative', + unit: '1s', + }, + { + id: 'posonly-deriv-max-diskio-write-ops', + type: 'calculation', + variables: [{ id: 'var-rate', name: 'rate', field: 'deriv-max-diskio-write-ops' }], + script: 'params.rate > 0.0 ? params.rate : 0.0', + }, + { + id: 'calc-invert-rate', + script: 'params.rate * -1', + type: 'calculation', + variables: [ + { + field: 'posonly-deriv-max-diskio-write-ops', + id: 'var-rate', + name: 'rate', + }, + ], + }, + ], + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_memory.ts b/x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_memory.ts new file mode 100644 index 0000000000000..8af7f057e8bad --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_memory.ts @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const containerMemory: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'containerMemory', + requires: ['docker.memory'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'memory', + split_mode: 'everything', + metrics: [ + { + field: 'docker.memory.usage.pct', + id: 'avg-memory', + type: 'avg', + }, + ], + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_network_traffic.ts b/x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_network_traffic.ts new file mode 100644 index 0000000000000..18daac446bdcd --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_network_traffic.ts @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const containerNetworkTraffic: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'containerNetworkTraffic', + requires: ['docker.network'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'tx', + split_mode: 'everything', + metrics: [ + { + field: 'docker.network.out.bytes', + id: 'avg-network-out', + type: 'avg', + }, + ], + }, + { + id: 'rx', + split_mode: 'everything', + metrics: [ + { + field: 'docker.network.in.bytes', + id: 'avg-network-in', + type: 'avg', + }, + { + id: 'invert-posonly-deriv-max-network-in', + script: 'params.rate * -1', + type: 'calculation', + variables: [ + { + field: 'avg-network-in', + id: 'var-rate', + name: 'rate', + }, + ], + }, + ], + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_overview.ts b/x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_overview.ts new file mode 100644 index 0000000000000..7facd082718b1 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/container/metrics/tsvb/container_overview.ts @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const containerOverview: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'containerOverview', + requires: ['docker'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'cpu', + split_mode: 'everything', + metrics: [ + { + field: 'docker.cpu.total.pct', + id: 'avg-cpu-total', + type: 'avg', + }, + ], + }, + { + id: 'memory', + split_mode: 'everything', + metrics: [ + { + field: 'docker.memory.usage.pct', + id: 'avg-memory', + type: 'avg', + }, + ], + }, + { + id: 'tx', + split_mode: 'everything', + metrics: [ + { + field: 'docker.network.out.bytes', + id: 'avg-network-out', + type: 'avg', + }, + ], + }, + { + id: 'rx', + split_mode: 'everything', + metrics: [ + { + field: 'docker.network.in.bytes', + id: 'avg-network-in', + type: 'avg', + }, + ], + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/container/toolbar_items.tsx b/x-pack/plugins/infra/common/inventory_models/container/toolbar_items.tsx new file mode 100644 index 0000000000000..9ed2cbe6dea08 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/container/toolbar_items.tsx @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import React from 'react'; +import { ToolbarProps } from '../../../public/components/inventory/toolbars/toolbar'; +import { MetricsAndGroupByToolbarItems } from '../shared/compontents/metrics_and_groupby_toolbar_items'; +import { InfraSnapshotMetricType } from '../../graphql/types'; + +export const ContainerToolbarItems = (props: ToolbarProps) => { + const metricTypes = [ + InfraSnapshotMetricType.cpu, + InfraSnapshotMetricType.memory, + InfraSnapshotMetricType.rx, + InfraSnapshotMetricType.tx, + ]; + const groupByFields = [ + 'host.name', + 'cloud.availability_zone', + 'cloud.machine.type', + 'cloud.project.id', + 'cloud.provider', + 'service.type', + ]; + return ( + + ); +}; diff --git a/x-pack/plugins/infra/common/inventory_models/create_tsvb_model.ts b/x-pack/plugins/infra/common/inventory_models/create_tsvb_model.ts new file mode 100644 index 0000000000000..7036b2236881f --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/create_tsvb_model.ts @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel, TSVBSeries, InventoryMetric } from './types'; + +export const createTSVBModel = ( + id: InventoryMetric, + requires: string[], + series: TSVBSeries[], + interval = '>=300s', + dropLastBucket = true +): TSVBMetricModelCreator => (timeField, indexPattern): TSVBMetricModel => ({ + id, + requires, + drop_last_bucket: dropLastBucket, + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series, +}); diff --git a/x-pack/plugins/infra/common/inventory_models/host/index.ts b/x-pack/plugins/infra/common/inventory_models/host/index.ts new file mode 100644 index 0000000000000..364ef0b4c2c91 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/index.ts @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { i18n } from '@kbn/i18n'; +import { metrics } from './metrics'; +import { InventoryModel } from '../types'; +import { + aws as awsRequiredMetrics, + nginx as nginxRequireMetrics, +} from '../shared/metrics/required_metrics'; + +export const host: InventoryModel = { + id: 'host', + displayName: i18n.translate('xpack.infra.inventoryModel.host.displayName', { + defaultMessage: 'Hosts', + }), + requiredModule: 'system', + crosslinkSupport: { + details: true, + logs: true, + apm: true, + uptime: true, + }, + fields: { + id: 'host.name', + name: 'host.name', + ip: 'host.ip', + }, + metrics, + requiredMetrics: [ + 'hostSystemOverview', + 'hostCpuUsage', + 'hostLoad', + 'hostMemoryUsage', + 'hostNetworkTraffic', + 'hostK8sOverview', + 'hostK8sCpuCap', + 'hostK8sMemoryCap', + 'hostK8sDiskCap', + 'hostK8sPodCap', + ...awsRequiredMetrics, + ...nginxRequireMetrics, + ], +}; diff --git a/x-pack/plugins/infra/common/inventory_models/host/layout.tsx b/x-pack/plugins/infra/common/inventory_models/host/layout.tsx new file mode 100644 index 0000000000000..ca53193e64ca2 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/layout.tsx @@ -0,0 +1,370 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import React from 'react'; +import { i18n } from '@kbn/i18n'; +import { withTheme } from '../../../../../common/eui_styled_components/eui_styled_components'; +import { LayoutPropsWithTheme } from '../../../public/pages/metrics/types'; +import { Section } from '../../../public/pages/metrics/components/section'; +import { SubSection } from '../../../public/pages/metrics/components/sub_section'; +import { GaugesSectionVis } from '../../../public/pages/metrics/components/gauges_section_vis'; +import { ChartSectionVis } from '../../../public/pages/metrics/components/chart_section_vis'; +import * as Aws from '../shared/layouts/aws'; +import * as Ngnix from '../shared/layouts/nginx'; +import { MetadataDetails } from '../../../public/pages/metrics/components/metadata_details'; +import { LayoutContent } from '../../../public/pages/metrics/components/layout_content'; + +export const Layout = withTheme(({ metrics, theme }: LayoutPropsWithTheme) => ( + + + +
+ + + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + + + +
+ + +
+
+)); diff --git a/x-pack/plugins/infra/common/inventory_models/host/metrics/index.ts b/x-pack/plugins/infra/common/inventory_models/host/metrics/index.ts new file mode 100644 index 0000000000000..7f77f23e4fb95 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/metrics/index.ts @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { cpu } from './snapshot/cpu'; +import { count } from '../../shared/metrics/snapshot/count'; +import { load } from './snapshot/load'; +import { logRate } from './snapshot/log_rate'; +import { memory } from './snapshot/memory'; +import { rx } from './snapshot/rx'; +import { tx } from './snapshot/tx'; + +import { hostSystemOverview } from './tsvb/host_system_overview'; +import { hostCpuUsage } from './tsvb/host_cpu_usage'; +import { hostLoad } from './tsvb/host_load'; +import { hostMemoryUsage } from './tsvb/host_memory_usage'; +import { hostNetworkTraffic } from './tsvb/host_network_traffic'; +import { hostFilesystem } from './tsvb/host_filesystem'; + +import { hostK8sOverview } from './tsvb/host_k8s_overview'; +import { hostK8sCpuCap } from './tsvb/host_k8s_cpu_cap'; +import { hostK8sPodCap } from './tsvb/host_k8s_pod_cap'; +import { hostK8sDiskCap } from './tsvb/host_k8s_disk_cap'; +import { hostK8sMemoryCap } from './tsvb/host_k8s_memory_cap'; + +import { hostDockerTop5ByMemory } from './tsvb/host_docker_top_5_by_memory'; +import { hostDockerTop5ByCpu } from './tsvb/host_docker_top_5_by_cpu'; +import { hostDockerOverview } from './tsvb/host_docker_overview'; +import { hostDockerInfo } from './tsvb/host_docker_info'; + +import { InventoryMetrics } from '../../types'; + +export const metrics: InventoryMetrics = { + tsvb: { + hostSystemOverview, + hostCpuUsage, + hostLoad, + hostMemoryUsage, + hostNetworkTraffic, + hostFilesystem, + hostK8sOverview, + hostK8sCpuCap, + hostK8sPodCap, + hostK8sDiskCap, + hostK8sMemoryCap, + hostDockerOverview, + hostDockerInfo, + hostDockerTop5ByMemory, + hostDockerTop5ByCpu, + }, + snapshot: { count, cpu, load, logRate, memory, rx, tx }, + defaultSnapshot: 'cpu', + defaultTimeRangeInSeconds: 3600, // 1 hour +}; diff --git a/x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/cpu.ts b/x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/cpu.ts new file mode 100644 index 0000000000000..fa43acb8d6108 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/cpu.ts @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const cpu: SnapshotModel = { + cpu_user: { + avg: { + field: 'system.cpu.user.pct', + }, + }, + cpu_system: { + avg: { + field: 'system.cpu.system.pct', + }, + }, + cpu_cores: { + max: { + field: 'system.cpu.cores', + }, + }, + cpu: { + bucket_script: { + buckets_path: { + user: 'cpu_user', + system: 'cpu_system', + cores: 'cpu_cores', + }, + script: { + source: '(params.user + params.system) / params.cores', + lang: 'painless', + }, + gap_policy: 'skip', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/load.ts b/x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/load.ts new file mode 100644 index 0000000000000..803fb2664ad27 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/load.ts @@ -0,0 +1,9 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const load: SnapshotModel = { load: { avg: { field: 'system.load.5' } } }; diff --git a/x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/log_rate.ts b/x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/log_rate.ts new file mode 100644 index 0000000000000..658111bd07676 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/log_rate.ts @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const logRate: SnapshotModel = { + count: { + bucket_script: { + buckets_path: { count: '_count' }, + script: { + source: 'count * 1', + lang: 'expression', + }, + gap_policy: 'skip', + }, + }, + cumsum: { + cumulative_sum: { + buckets_path: 'count', + }, + }, + logRate: { + derivative: { + buckets_path: 'cumsum', + gap_policy: 'skip', + unit: '1s', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/memory.ts b/x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/memory.ts new file mode 100644 index 0000000000000..cb08a9eaebb3b --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/memory.ts @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const memory: SnapshotModel = { + memory: { avg: { field: 'system.memory.actual.used.pct' } }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/rx.ts b/x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/rx.ts new file mode 100644 index 0000000000000..602169339101e --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/rx.ts @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { networkTrafficWithInterfaces } from '../../../shared/metrics/snapshot/network_traffic_with_interfaces'; +export const rx = networkTrafficWithInterfaces( + 'rx', + 'system.network.in.bytes', + 'system.network.name' +); diff --git a/x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/tx.ts b/x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/tx.ts new file mode 100644 index 0000000000000..4042a7ab7b66e --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/metrics/snapshot/tx.ts @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { networkTrafficWithInterfaces } from '../../../shared/metrics/snapshot/network_traffic_with_interfaces'; +export const tx = networkTrafficWithInterfaces( + 'tx', + 'system.network.out.bytes', + 'system.network.name' +); diff --git a/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_cpu_usage.ts b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_cpu_usage.ts new file mode 100644 index 0000000000000..c7db1f9e7394c --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_cpu_usage.ts @@ -0,0 +1,253 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const hostCpuUsage: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'hostCpuUsage', + requires: ['system.cpu'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'user', + metrics: [ + { + field: 'system.cpu.user.pct', + id: 'avg-cpu-user', + type: 'avg', + }, + { + field: 'system.cpu.cores', + id: 'max-cpu-cores', + type: 'max', + }, + { + id: 'calc-avg-cores', + script: 'params.avg / params.cores', + type: 'calculation', + variables: [ + { + field: 'max-cpu-cores', + id: 'var-cores', + name: 'cores', + }, + { + field: 'avg-cpu-user', + id: 'var-avg', + name: 'avg', + }, + ], + }, + ], + split_mode: 'everything', + }, + { + id: 'system', + metrics: [ + { + field: 'system.cpu.system.pct', + id: 'avg-cpu-system', + type: 'avg', + }, + { + field: 'system.cpu.cores', + id: 'max-cpu-cores', + type: 'max', + }, + { + id: 'calc-avg-cores', + script: 'params.avg / params.cores', + type: 'calculation', + variables: [ + { + field: 'max-cpu-cores', + id: 'var-cores', + name: 'cores', + }, + { + field: 'avg-cpu-system', + id: 'var-avg', + name: 'avg', + }, + ], + }, + ], + split_mode: 'everything', + }, + { + id: 'steal', + metrics: [ + { + field: 'system.cpu.steal.pct', + id: 'avg-cpu-steal', + type: 'avg', + }, + { + field: 'system.cpu.cores', + id: 'max-cpu-cores', + type: 'max', + }, + { + id: 'calc-avg-cores', + script: 'params.avg / params.cores', + type: 'calculation', + variables: [ + { + field: 'avg-cpu-steal', + id: 'var-avg', + name: 'avg', + }, + { + field: 'max-cpu-cores', + id: 'var-cores', + name: 'cores', + }, + ], + }, + ], + split_mode: 'everything', + }, + { + id: 'irq', + metrics: [ + { + field: 'system.cpu.irq.pct', + id: 'avg-cpu-irq', + type: 'avg', + }, + { + field: 'system.cpu.cores', + id: 'max-cpu-cores', + type: 'max', + }, + { + id: 'calc-avg-cores', + script: 'params.avg / params.cores', + type: 'calculation', + variables: [ + { + field: 'max-cpu-cores', + id: 'var-cores', + name: 'cores', + }, + { + field: 'avg-cpu-irq', + id: 'var-avg', + name: 'avg', + }, + ], + }, + ], + split_mode: 'everything', + }, + { + id: 'softirq', + metrics: [ + { + field: 'system.cpu.softirq.pct', + id: 'avg-cpu-softirq', + type: 'avg', + }, + { + field: 'system.cpu.cores', + id: 'max-cpu-cores', + type: 'max', + }, + { + id: 'calc-avg-cores', + script: 'params.avg / params.cores', + type: 'calculation', + variables: [ + { + field: 'max-cpu-cores', + id: 'var-cores', + name: 'cores', + }, + { + field: 'avg-cpu-softirq', + id: 'var-avg', + name: 'avg', + }, + ], + }, + ], + split_mode: 'everything', + }, + { + id: 'iowait', + metrics: [ + { + field: 'system.cpu.iowait.pct', + id: 'avg-cpu-iowait', + type: 'avg', + }, + { + field: 'system.cpu.cores', + id: 'max-cpu-cores', + type: 'max', + }, + { + id: 'calc-avg-cores', + script: 'params.avg / params.cores', + type: 'calculation', + variables: [ + { + field: 'max-cpu-cores', + id: 'var-cores', + name: 'cores', + }, + { + field: 'avg-cpu-iowait', + id: 'var-avg', + name: 'avg', + }, + ], + }, + ], + split_mode: 'everything', + }, + { + id: 'nice', + metrics: [ + { + field: 'system.cpu.nice.pct', + id: 'avg-cpu-nice', + type: 'avg', + }, + { + field: 'system.cpu.cores', + id: 'max-cpu-cores', + type: 'max', + }, + { + id: 'calc-avg-cores', + script: 'params.avg / params.cores', + type: 'calculation', + variables: [ + { + field: 'max-cpu-cores', + id: 'var-cores', + name: 'cores', + }, + { + field: 'avg-cpu-nice', + id: 'var-avg', + name: 'avg', + }, + ], + }, + ], + split_mode: 'everything', + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_docker_info.ts b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_docker_info.ts new file mode 100644 index 0000000000000..9c55eaa932d33 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_docker_info.ts @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const hostDockerInfo: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'hostDockerInfo', + requires: ['docker.info'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'running', + metrics: [ + { + field: 'docker.info.containers.running', + id: 'max-running', + type: 'max', + }, + ], + split_mode: 'everything', + }, + { + id: 'paused', + metrics: [ + { + field: 'docker.info.containers.paused', + id: 'max-paused', + type: 'max', + }, + ], + split_mode: 'everything', + }, + { + id: 'stopped', + metrics: [ + { + field: 'docker.info.containers.stopped', + id: 'max-stopped', + type: 'max', + }, + ], + split_mode: 'everything', + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_docker_overview.ts b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_docker_overview.ts new file mode 100644 index 0000000000000..d7026d7648d37 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_docker_overview.ts @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const hostDockerOverview: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'hostDockerOverview', + requires: ['docker.info'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'gauge', + series: [ + { + id: 'total', + metrics: [ + { + field: 'docker.info.containers.total', + id: 'max-total', + type: 'max', + }, + ], + split_mode: 'everything', + }, + { + id: 'running', + metrics: [ + { + field: 'docker.info.containers.running', + id: 'max-running', + type: 'max', + }, + ], + split_mode: 'everything', + }, + { + id: 'paused', + metrics: [ + { + field: 'docker.info.containers.paused', + id: 'max-paused', + type: 'max', + }, + ], + split_mode: 'everything', + }, + { + id: 'stopped', + metrics: [ + { + field: 'docker.info.containers.stopped', + id: 'max-stopped', + type: 'max', + }, + ], + split_mode: 'everything', + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_docker_top_5_by_cpu.ts b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_docker_top_5_by_cpu.ts new file mode 100644 index 0000000000000..a4f8bcd426edd --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_docker_top_5_by_cpu.ts @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const hostDockerTop5ByCpu: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'hostDockerTop5ByCpu', + requires: ['docker.cpu'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'avg-cpu', + metrics: [ + { + field: 'docker.cpu.total.pct', + id: 'avg-cpu-metric', + type: 'avg', + }, + ], + split_mode: 'terms', + terms_field: 'container.name', + terms_order_by: 'avg-cpu', + terms_size: 5, + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_docker_top_5_by_memory.ts b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_docker_top_5_by_memory.ts new file mode 100644 index 0000000000000..4441b1824e2a2 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_docker_top_5_by_memory.ts @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const hostDockerTop5ByMemory: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'hostDockerTop5ByMemory', + requires: ['docker.memory'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'avg-memory', + metrics: [ + { + field: 'docker.memory.usage.pct', + id: 'avg-memory-metric', + type: 'avg', + }, + ], + split_mode: 'terms', + terms_field: 'container.name', + terms_order_by: 'avg-memory', + terms_size: 5, + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_filesystem.ts b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_filesystem.ts new file mode 100644 index 0000000000000..850674ce89c75 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_filesystem.ts @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const hostFilesystem: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'hostFilesystem', + requires: ['system.filesystem'], + filter: 'system.filesystem.device_name:\\/*', + index_pattern: indexPattern, + time_field: timeField, + interval, + type: 'timeseries', + series: [ + { + id: 'used', + metrics: [ + { + field: 'system.filesystem.used.pct', + id: 'avg-filesystem-used', + type: 'avg', + }, + ], + split_mode: 'terms', + terms_field: 'system.filesystem.device_name', + terms_order_by: 'used', + terms_size: 5, + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_k8s_cpu_cap.ts b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_k8s_cpu_cap.ts new file mode 100644 index 0000000000000..97e2bc8f09196 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_k8s_cpu_cap.ts @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const hostK8sCpuCap: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'hostK8sCpuCap', + map_field_to: 'kubernetes.node.name', + requires: ['kubernetes.node'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'capacity', + metrics: [ + { + field: 'kubernetes.node.cpu.allocatable.cores', + id: 'max-cpu-cap', + type: 'max', + }, + { + id: 'calc-nanocores', + type: 'calculation', + variables: [ + { + id: 'var-cores', + field: 'max-cpu-cap', + name: 'cores', + }, + ], + script: 'params.cores * 1000000000', + }, + ], + split_mode: 'everything', + }, + { + id: 'used', + metrics: [ + { + field: 'kubernetes.node.cpu.usage.nanocores', + id: 'avg-cpu-usage', + type: 'avg', + }, + ], + split_mode: 'everything', + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_k8s_disk_cap.ts b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_k8s_disk_cap.ts new file mode 100644 index 0000000000000..8ac4c70d06fed --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_k8s_disk_cap.ts @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; +export const hostK8sDiskCap: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'hostK8sDiskCap', + map_field_to: 'kubernetes.node.name', + requires: ['kubernetes.node'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'capacity', + metrics: [ + { + field: 'kubernetes.node.fs.capacity.bytes', + id: 'max-fs-cap', + type: 'max', + }, + ], + split_mode: 'everything', + }, + { + id: 'used', + metrics: [ + { + field: 'kubernetes.node.fs.used.bytes', + id: 'avg-fs-used', + type: 'avg', + }, + ], + split_mode: 'everything', + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_k8s_memory_cap.ts b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_k8s_memory_cap.ts new file mode 100644 index 0000000000000..6bf76b59dd60a --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_k8s_memory_cap.ts @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const hostK8sMemoryCap: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'hostK8sMemoryCap', + map_field_to: 'kubernetes.node.name', + requires: ['kubernetes.node'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'capacity', + metrics: [ + { + field: 'kubernetes.node.memory.allocatable.bytes', + id: 'max-memory-cap', + type: 'max', + }, + ], + split_mode: 'everything', + }, + { + id: 'used', + metrics: [ + { + field: 'kubernetes.node.memory.usage.bytes', + id: 'avg-memory-usage', + type: 'avg', + }, + ], + split_mode: 'everything', + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_k8s_overview.ts b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_k8s_overview.ts new file mode 100644 index 0000000000000..86d615231f070 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_k8s_overview.ts @@ -0,0 +1,154 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const hostK8sOverview: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'hostK8sOverview', + requires: ['kubernetes'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'gauge', + series: [ + { + id: 'cpucap', + split_mode: 'everything', + metrics: [ + { + field: 'kubernetes.node.cpu.allocatable.cores', + id: 'max-cpu-cap', + type: 'max', + }, + { + field: 'kubernetes.node.cpu.usage.nanocores', + id: 'avg-cpu-usage', + type: 'avg', + }, + { + id: 'calc-used-cap', + script: 'params.used / (params.cap * 1000000000)', + type: 'calculation', + variables: [ + { + field: 'max-cpu-cap', + id: 'var-cap', + name: 'cap', + }, + { + field: 'avg-cpu-usage', + id: 'var-used', + name: 'used', + }, + ], + }, + ], + }, + { + id: 'diskcap', + metrics: [ + { + field: 'kubernetes.node.fs.capacity.bytes', + id: 'max-fs-cap', + type: 'max', + }, + { + field: 'kubernetes.node.fs.used.bytes', + id: 'avg-fs-used', + type: 'avg', + }, + { + id: 'calc-used-cap', + script: 'params.used / params.cap', + type: 'calculation', + variables: [ + { + field: 'max-fs-cap', + id: 'var-cap', + name: 'cap', + }, + { + field: 'avg-fs-used', + id: 'var-used', + name: 'used', + }, + ], + }, + ], + split_mode: 'everything', + }, + { + id: 'memorycap', + metrics: [ + { + field: 'kubernetes.node.memory.allocatable.bytes', + id: 'max-memory-cap', + type: 'max', + }, + { + field: 'kubernetes.node.memory.usage.bytes', + id: 'avg-memory-usage', + type: 'avg', + }, + { + id: 'calc-used-cap', + script: 'params.used / params.cap', + type: 'calculation', + variables: [ + { + field: 'max-memory-cap', + id: 'var-cap', + name: 'cap', + }, + { + field: 'avg-memory-usage', + id: 'var-used', + name: 'used', + }, + ], + }, + ], + split_mode: 'everything', + }, + { + id: 'podcap', + metrics: [ + { + field: 'kubernetes.node.pod.capacity.total', + id: 'max-pod-cap', + type: 'max', + }, + { + field: 'kubernetes.pod.uid', + id: 'card-pod-name', + type: 'cardinality', + }, + { + id: 'calc-used-cap', + script: 'params.used / params.cap', + type: 'calculation', + variables: [ + { + field: 'max-pod-cap', + id: 'var-cap', + name: 'cap', + }, + { + field: 'card-pod-name', + id: 'var-used', + name: 'used', + }, + ], + }, + ], + split_mode: 'everything', + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_k8s_pod_cap.ts b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_k8s_pod_cap.ts new file mode 100644 index 0000000000000..e4c844f954599 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_k8s_pod_cap.ts @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const hostK8sPodCap: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'hostK8sPodCap', + requires: ['kubernetes.node'], + map_field_to: 'kubernetes.node.name', + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + + series: [ + { + id: 'capacity', + metrics: [ + { + field: 'kubernetes.node.pod.allocatable.total', + id: 'max-pod-cap', + type: 'max', + }, + ], + split_mode: 'everything', + }, + { + id: 'used', + metrics: [ + { + field: 'kubernetes.pod.uid', + id: 'avg-pod', + type: 'cardinality', + }, + ], + split_mode: 'everything', + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_load.ts b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_load.ts new file mode 100644 index 0000000000000..dcee609e7acd7 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_load.ts @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const hostLoad: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'hostLoad', + requires: ['system.cpu'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'load_1m', + metrics: [ + { + field: 'system.load.1', + id: 'avg-load-1m', + type: 'avg', + }, + ], + split_mode: 'everything', + }, + { + id: 'load_5m', + metrics: [ + { + field: 'system.load.5', + id: 'avg-load-5m', + type: 'avg', + }, + ], + split_mode: 'everything', + }, + { + id: 'load_15m', + metrics: [ + { + field: 'system.load.15', + id: 'avg-load-15m', + type: 'avg', + }, + ], + split_mode: 'everything', + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_memory_usage.ts b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_memory_usage.ts new file mode 100644 index 0000000000000..ef0f7b44912c7 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_memory_usage.ts @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const hostMemoryUsage: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'hostMemoryUsage', + requires: ['system.memory'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'free', + metrics: [ + { + field: 'system.memory.free', + id: 'avg-memory-free', + type: 'avg', + }, + ], + split_mode: 'everything', + }, + { + id: 'used', + metrics: [ + { + field: 'system.memory.actual.used.bytes', + id: 'avg-memory-used', + type: 'avg', + }, + ], + split_mode: 'everything', + }, + { + id: 'cache', + metrics: [ + { + field: 'system.memory.actual.used.bytes', + id: 'avg-memory-actual-used', + type: 'avg', + }, + { + field: 'system.memory.used.bytes', + id: 'avg-memory-used', + type: 'avg', + }, + { + id: 'calc-used-actual', + script: 'params.used - params.actual', + type: 'calculation', + variables: [ + { + field: 'avg-memory-actual-used', + id: 'var-actual', + name: 'actual', + }, + { + field: 'avg-memory-used', + id: 'var-used', + name: 'used', + }, + ], + }, + ], + split_mode: 'everything', + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_network_traffic.ts b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_network_traffic.ts new file mode 100644 index 0000000000000..94eb03111e855 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_network_traffic.ts @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const hostNetworkTraffic: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'hostNetworkTraffic', + requires: ['system.network'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'tx', + metrics: [ + { + field: 'system.network.out.bytes', + id: 'max-net-out', + type: 'max', + }, + { + field: 'max-net-out', + id: 'deriv-max-net-out', + type: 'derivative', + unit: '1s', + }, + { + id: 'posonly-deriv-max-net-out', + type: 'calculation', + variables: [{ id: 'var-rate', name: 'rate', field: 'deriv-max-net-out' }], + script: 'params.rate > 0.0 ? params.rate : 0.0', + }, + { + function: 'sum', + id: 'seriesagg-sum', + type: 'series_agg', + }, + ], + split_mode: 'terms', + terms_field: 'system.network.name', + }, + { + id: 'rx', + metrics: [ + { + field: 'system.network.in.bytes', + id: 'max-net-in', + type: 'max', + }, + { + field: 'max-net-in', + id: 'deriv-max-net-in', + type: 'derivative', + unit: '1s', + }, + { + id: 'posonly-deriv-max-net-in', + type: 'calculation', + variables: [{ id: 'var-rate', name: 'rate', field: 'deriv-max-net-in' }], + script: 'params.rate > 0.0 ? params.rate : 0.0', + }, + { + id: 'calc-invert-rate', + script: 'params.rate * -1', + type: 'calculation', + variables: [ + { + field: 'posonly-deriv-max-net-in', + id: 'var-rate', + name: 'rate', + }, + ], + }, + { + function: 'sum', + id: 'seriesagg-sum', + type: 'series_agg', + }, + ], + split_mode: 'terms', + terms_field: 'system.network.name', + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_system_overview.ts b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_system_overview.ts new file mode 100644 index 0000000000000..953c14ab2a9ce --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/metrics/tsvb/host_system_overview.ts @@ -0,0 +1,145 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const hostSystemOverview: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'hostSystemOverview', + requires: ['system.cpu', 'system.memory', 'system.load', 'system.network'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'gauge', + series: [ + { + id: 'cpu', + split_mode: 'everything', + metrics: [ + { + field: 'system.cpu.user.pct', + id: 'avg-cpu-user', + type: 'avg', + }, + { + field: 'system.cpu.cores', + id: 'max-cpu-cores', + type: 'max', + }, + { + field: 'system.cpu.system.pct', + id: 'avg-cpu-system', + type: 'avg', + }, + { + id: 'calc-user-system-cores', + script: '(params.users + params.system) / params.cores', + type: 'calculation', + variables: [ + { + field: 'avg-cpu-user', + id: 'var-users', + name: 'users', + }, + { + field: 'avg-cpu-system', + id: 'var-system', + name: 'system', + }, + { + field: 'max-cpu-cores', + id: 'var-cores', + name: 'cores', + }, + ], + }, + ], + }, + { + id: 'load', + split_mode: 'everything', + metrics: [ + { + field: 'system.load.5', + id: 'avg-load-5m', + type: 'avg', + }, + ], + }, + { + id: 'memory', + split_mode: 'everything', + metrics: [ + { + field: 'system.memory.actual.used.pct', + id: 'avg-memory-actual-used', + type: 'avg', + }, + ], + }, + { + id: 'rx', + split_mode: 'terms', + terms_field: 'system.network.name', + metrics: [ + { + field: 'system.network.in.bytes', + id: 'max-net-in', + type: 'max', + }, + { + field: 'max-net-in', + id: 'deriv-max-net-in', + type: 'derivative', + unit: '1s', + }, + { + id: 'posonly-deriv-max-net-in', + type: 'calculation', + variables: [{ id: 'var-rate', name: 'rate', field: 'deriv-max-net-in' }], + script: 'params.rate > 0.0 ? params.rate : 0.0', + }, + { + function: 'sum', + id: 'seriesagg-sum', + type: 'series_agg', + }, + ], + }, + { + id: 'tx', + split_mode: 'terms', + terms_field: 'system.network.name', + metrics: [ + { + field: 'system.network.out.bytes', + id: 'max-net-out', + type: 'max', + }, + { + field: 'max-net-out', + id: 'deriv-max-net-out', + type: 'derivative', + unit: '1s', + }, + { + id: 'posonly-deriv-max-net-out', + type: 'calculation', + variables: [{ id: 'var-rate', name: 'rate', field: 'deriv-max-net-out' }], + script: 'params.rate > 0.0 ? params.rate : 0.0', + }, + { + function: 'sum', + id: 'seriesagg-sum', + type: 'series_agg', + }, + ], + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/host/toolbar_items.tsx b/x-pack/plugins/infra/common/inventory_models/host/toolbar_items.tsx new file mode 100644 index 0000000000000..f8df81a33a8ec --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/host/toolbar_items.tsx @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import React from 'react'; +import { ToolbarProps } from '../../../public/components/inventory/toolbars/toolbar'; +import { MetricsAndGroupByToolbarItems } from '../shared/compontents/metrics_and_groupby_toolbar_items'; +import { InfraSnapshotMetricType } from '../../graphql/types'; + +export const HostToolbarItems = (props: ToolbarProps) => { + const metricTypes = [ + InfraSnapshotMetricType.cpu, + InfraSnapshotMetricType.memory, + InfraSnapshotMetricType.load, + InfraSnapshotMetricType.rx, + InfraSnapshotMetricType.tx, + InfraSnapshotMetricType.logRate, + ]; + const groupByFields = [ + 'cloud.availability_zone', + 'cloud.machine.type', + 'cloud.project.id', + 'cloud.provider', + 'service.type', + ]; + return ( + + ); +}; diff --git a/x-pack/plugins/infra/common/inventory_models/index.ts b/x-pack/plugins/infra/common/inventory_models/index.ts new file mode 100644 index 0000000000000..d9fd8fa465b7a --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/index.ts @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { i18n } from '@kbn/i18n'; +import { host } from './host'; +import { pod } from './pod'; +import { awsEC2 } from './aws_ec2'; +import { awsS3 } from './aws_s3'; +import { awsRDS } from './aws_rds'; +import { awsSQS } from './aws_sqs'; +import { container } from './container'; +import { InventoryItemType } from './types'; +export { metrics } from './metrics'; + +export const inventoryModels = [host, pod, container, awsEC2, awsS3, awsRDS, awsSQS]; + +export const findInventoryModel = (type: InventoryItemType) => { + const model = inventoryModels.find(m => m.id === type); + if (!model) { + throw new Error( + i18n.translate('xpack.infra.inventoryModels.findInventoryModel.error', { + defaultMessage: "The inventory model you've attempted to find does not exist", + }) + ); + } + return model; +}; + +interface InventoryFields { + message: string[]; + host: string; + pod: string; + container: string; + timestamp: string; + tiebreaker: string; +} + +const LEGACY_TYPES = ['host', 'pod', 'container']; + +const getFieldByType = (type: InventoryItemType, fields: InventoryFields) => { + switch (type) { + case 'pod': + return fields.pod; + case 'host': + return fields.host; + case 'container': + return fields.container; + } +}; + +export const findInventoryFields = (type: InventoryItemType, fields: InventoryFields) => { + const inventoryModel = findInventoryModel(type); + if (LEGACY_TYPES.includes(type)) { + const id = getFieldByType(type, fields) || inventoryModel.fields.id; + return { + ...inventoryModel.fields, + id, + }; + } else { + return inventoryModel.fields; + } +}; diff --git a/x-pack/plugins/infra/common/inventory_models/intl_strings.ts b/x-pack/plugins/infra/common/inventory_models/intl_strings.ts new file mode 100644 index 0000000000000..08949ed53eb10 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/intl_strings.ts @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { i18n } from '@kbn/i18n'; +export const CPUUsage = i18n.translate('xpack.infra.waffle.metricOptions.cpuUsageText', { + defaultMessage: 'CPU usage', +}); + +export const MemoryUsage = i18n.translate('xpack.infra.waffle.metricOptions.memoryUsageText', { + defaultMessage: 'Memory usage', +}); + +export const InboundTraffic = i18n.translate( + 'xpack.infra.waffle.metricOptions.inboundTrafficText', + { + defaultMessage: 'Inbound traffic', + } +); + +export const OutboundTraffic = i18n.translate( + 'xpack.infra.waffle.metricOptions.outboundTrafficText', + { + defaultMessage: 'Outbound traffic', + } +); + +export const LogRate = i18n.translate('xpack.infra.waffle.metricOptions.hostLogRateText', { + defaultMessage: 'Log rate', +}); + +export const Load = i18n.translate('xpack.infra.waffle.metricOptions.loadText', { + defaultMessage: 'Load', +}); + +interface Lookup { + [id: string]: string; +} + +export const fieldToName = (field: string) => { + const LOOKUP: Lookup = { + 'kubernetes.namespace': i18n.translate('xpack.infra.groupByDisplayNames.kubernetesNamespace', { + defaultMessage: 'Namespace', + }), + 'kubernetes.node.name': i18n.translate('xpack.infra.groupByDisplayNames.kubernetesNodeName', { + defaultMessage: 'Node', + }), + 'host.name': i18n.translate('xpack.infra.groupByDisplayNames.hostName', { + defaultMessage: 'Host', + }), + 'cloud.availability_zone': i18n.translate('xpack.infra.groupByDisplayNames.availabilityZone', { + defaultMessage: 'Availability zone', + }), + 'cloud.machine.type': i18n.translate('xpack.infra.groupByDisplayNames.machineType', { + defaultMessage: 'Machine type', + }), + 'cloud.project.id': i18n.translate('xpack.infra.groupByDisplayNames.projectID', { + defaultMessage: 'Project ID', + }), + 'cloud.provider': i18n.translate('xpack.infra.groupByDisplayNames.provider', { + defaultMessage: 'Cloud provider', + }), + 'service.type': i18n.translate('xpack.infra.groupByDisplayNames.serviceType', { + defaultMessage: 'Service type', + }), + }; + return LOOKUP[field] || field; +}; diff --git a/x-pack/plugins/infra/common/inventory_models/layouts.ts b/x-pack/plugins/infra/common/inventory_models/layouts.ts new file mode 100644 index 0000000000000..d9008753adf7b --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/layouts.ts @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * WHY ARE THE LAYOUTS A SEPERATE FILE? + * + * Files with React can not be included on the server without + * crashing due to the requirement of the `window` object. + */ + +import { i18n } from '@kbn/i18n'; + +import { ReactNode, FunctionComponent } from 'react'; +import { Layout as HostLayout } from './host/layout'; +import { Layout as PodLayout } from './pod/layout'; +import { Layout as ContainerLayout } from './container/layout'; +import { Layout as AwsEC2Layout } from './aws_ec2/layout'; +import { Layout as AwsS3Layout } from './aws_s3/layout'; +import { Layout as AwsRDSLayout } from './aws_rds/layout'; +import { Layout as AwsSQSLayout } from './aws_sqs/layout'; +import { InventoryItemType } from './types'; +import { LayoutProps } from '../../public/pages/metrics/types'; + +interface Layouts { + [type: string]: ReactNode; +} + +const layouts: Layouts = { + host: HostLayout, + pod: PodLayout, + container: ContainerLayout, + awsEC2: AwsEC2Layout, + awsS3: AwsS3Layout, + awsRDS: AwsRDSLayout, + awsSQS: AwsSQSLayout, +}; + +export const findLayout = (type: InventoryItemType) => { + const Layout = layouts?.[type]; + if (!Layout) { + throw new Error( + i18n.translate('xpack.infra.inventoryModels.findLayout.error', { + defaultMessage: "The layout you've attempted to find does not exist", + }) + ); + } + return Layout as FunctionComponent; +}; diff --git a/x-pack/plugins/infra/common/inventory_models/metrics.ts b/x-pack/plugins/infra/common/inventory_models/metrics.ts new file mode 100644 index 0000000000000..cadc059fc5aeb --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/metrics.ts @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { metrics as hostMetrics } from './host/metrics'; +import { metrics as sharedMetrics } from './shared/metrics'; +import { metrics as podMetrics } from './pod/metrics'; +import { metrics as containerMetrics } from './container/metrics'; +import { metrics as awsEC2Metrics } from './aws_ec2/metrics'; +import { metrics as awsS3Metrics } from './aws_s3/metrics'; +import { metrics as awsRDSMetrics } from './aws_rds/metrics'; +import { metrics as awsSQSMetrics } from './aws_sqs/metrics'; + +export const metrics = { + tsvb: { + ...hostMetrics.tsvb, + ...sharedMetrics.tsvb, + ...podMetrics.tsvb, + ...containerMetrics.tsvb, + ...awsEC2Metrics.tsvb, + ...awsS3Metrics.tsvb, + ...awsRDSMetrics.tsvb, + ...awsSQSMetrics.tsvb, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/pod/index.ts b/x-pack/plugins/infra/common/inventory_models/pod/index.ts new file mode 100644 index 0000000000000..f76a0304e26c0 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/pod/index.ts @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { i18n } from '@kbn/i18n'; +import { metrics } from './metrics'; +import { InventoryModel } from '../types'; +import { nginx as nginxRequiredMetrics } from '../shared/metrics/required_metrics'; + +export const pod: InventoryModel = { + id: 'pod', + displayName: i18n.translate('xpack.infra.inventoryModel.pod.displayName', { + defaultMessage: 'Kubernetes Pods', + }), + requiredModule: 'kubernetes', + crosslinkSupport: { + details: true, + logs: true, + apm: true, + uptime: true, + }, + fields: { + id: 'kubernetes.pod.uid', + name: 'kubernetes.pod.name', + ip: 'kubernetes.pod.ip', + }, + metrics, + requiredMetrics: [ + 'podOverview', + 'podCpuUsage', + 'podMemoryUsage', + 'podNetworkTraffic', + ...nginxRequiredMetrics, + ], +}; diff --git a/x-pack/plugins/infra/common/inventory_models/pod/layout.tsx b/x-pack/plugins/infra/common/inventory_models/pod/layout.tsx new file mode 100644 index 0000000000000..f0c27ccff13b1 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/pod/layout.tsx @@ -0,0 +1,159 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import React from 'react'; +import { i18n } from '@kbn/i18n'; +import { LayoutPropsWithTheme } from '../../../public/pages/metrics/types'; +import { Section } from '../../../public/pages/metrics/components/section'; +import { SubSection } from '../../../public/pages/metrics/components/sub_section'; +import { GaugesSectionVis } from '../../../public/pages/metrics/components/gauges_section_vis'; +import { ChartSectionVis } from '../../../public/pages/metrics/components/chart_section_vis'; +import { withTheme } from '../../../../../common/eui_styled_components'; +import * as Nginx from '../shared/layouts/nginx'; +import { MetadataDetails } from '../../../public/pages/metrics/components/metadata_details'; +import { LayoutContent } from '../../../public/pages/metrics/components/layout_content'; + +export const Layout = withTheme(({ metrics, theme }: LayoutPropsWithTheme) => ( + + + +
+ + + + + + + + + + + + +
+ +
+
+)); diff --git a/x-pack/plugins/infra/common/inventory_models/pod/metrics/index.ts b/x-pack/plugins/infra/common/inventory_models/pod/metrics/index.ts new file mode 100644 index 0000000000000..b4420b5532cc6 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/pod/metrics/index.ts @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { cpu } from './snapshot/cpu'; +import { memory } from './snapshot/memory'; +import { rx } from './snapshot/rx'; +import { tx } from './snapshot/tx'; + +import { podOverview } from './tsvb/pod_overview'; +import { podCpuUsage } from './tsvb/pod_cpu_usage'; +import { podLogUsage } from './tsvb/pod_log_usage'; +import { podMemoryUsage } from './tsvb/pod_memory_usage'; +import { podNetworkTraffic } from './tsvb/pod_network_traffic'; +import { InventoryMetrics } from '../../types'; + +export const metrics: InventoryMetrics = { + tsvb: { + podOverview, + podCpuUsage, + podLogUsage, + podNetworkTraffic, + podMemoryUsage, + }, + snapshot: { cpu, memory, rx, tx }, + defaultSnapshot: 'cpu', + defaultTimeRangeInSeconds: 3600, // 1 hour +}; diff --git a/x-pack/plugins/infra/common/inventory_models/pod/metrics/snapshot/cpu.ts b/x-pack/plugins/infra/common/inventory_models/pod/metrics/snapshot/cpu.ts new file mode 100644 index 0000000000000..f25dd8179aa1a --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/pod/metrics/snapshot/cpu.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const cpu: SnapshotModel = { + cpu: { + avg: { + field: 'kubernetes.pod.cpu.usage.node.pct', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/pod/metrics/snapshot/memory.ts b/x-pack/plugins/infra/common/inventory_models/pod/metrics/snapshot/memory.ts new file mode 100644 index 0000000000000..28a71d9b0275a --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/pod/metrics/snapshot/memory.ts @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const memory: SnapshotModel = { + memory: { avg: { field: 'kubernetes.pod.memory.usage.node.pct' } }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/pod/metrics/snapshot/rx.ts b/x-pack/plugins/infra/common/inventory_models/pod/metrics/snapshot/rx.ts new file mode 100644 index 0000000000000..e9e1b1ddd9b41 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/pod/metrics/snapshot/rx.ts @@ -0,0 +1,8 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { networkTraffic } from '../../../shared/metrics/snapshot/network_traffic'; +export const rx = networkTraffic('rx', 'kubernetes.pod.network.rx.bytes'); diff --git a/x-pack/plugins/infra/common/inventory_models/pod/metrics/snapshot/tx.ts b/x-pack/plugins/infra/common/inventory_models/pod/metrics/snapshot/tx.ts new file mode 100644 index 0000000000000..198355ca29ff6 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/pod/metrics/snapshot/tx.ts @@ -0,0 +1,8 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { networkTraffic } from '../../../shared/metrics/snapshot/network_traffic'; +export const tx = networkTraffic('tx', 'kubernetes.pod.network.tx.bytes'); diff --git a/x-pack/plugins/infra/common/inventory_models/pod/metrics/tsvb/pod_cpu_usage.ts b/x-pack/plugins/infra/common/inventory_models/pod/metrics/tsvb/pod_cpu_usage.ts new file mode 100644 index 0000000000000..1d778d11e0725 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/pod/metrics/tsvb/pod_cpu_usage.ts @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const podCpuUsage: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'podCpuUsage', + requires: ['kubernetes.pod'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'cpu', + split_mode: 'everything', + metrics: [ + { + field: 'kubernetes.pod.cpu.usage.node.pct', + id: 'avg-cpu-usage', + type: 'avg', + }, + ], + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/pod/metrics/tsvb/pod_log_usage.ts b/x-pack/plugins/infra/common/inventory_models/pod/metrics/tsvb/pod_log_usage.ts new file mode 100644 index 0000000000000..d960322a477aa --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/pod/metrics/tsvb/pod_log_usage.ts @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const podLogUsage: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'podLogUsage', + requires: ['kubernetes.pod'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'logs', + split_mode: 'everything', + metrics: [ + { + field: 'kubernetes.container.logs.used.bytes', + id: 'avg-log-used', + type: 'avg', + }, + { + field: 'kubernetes.container.logs.capacity.bytes', + id: 'max-log-cap', + type: 'max', + }, + { + id: 'calc-usage-limit', + script: 'params.usage / params.limit', + type: 'calculation', + variables: [ + { + field: 'avg-log-userd', + id: 'var-usage', + name: 'usage', + }, + { + field: 'max-log-cap', + id: 'var-limit', + name: 'limit', + }, + ], + }, + ], + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/pod/metrics/tsvb/pod_memory_usage.ts b/x-pack/plugins/infra/common/inventory_models/pod/metrics/tsvb/pod_memory_usage.ts new file mode 100644 index 0000000000000..8dd63787c605a --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/pod/metrics/tsvb/pod_memory_usage.ts @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const podMemoryUsage: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'podMemoryUsage', + requires: ['kubernetes.pod'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'memory', + split_mode: 'everything', + metrics: [ + { + field: 'kubernetes.pod.memory.usage.node.pct', + id: 'avg-memory-usage', + type: 'avg', + }, + ], + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/pod/metrics/tsvb/pod_network_traffic.ts b/x-pack/plugins/infra/common/inventory_models/pod/metrics/tsvb/pod_network_traffic.ts new file mode 100644 index 0000000000000..139e592609ae1 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/pod/metrics/tsvb/pod_network_traffic.ts @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const podNetworkTraffic: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'podNetworkTraffic', + requires: ['kubernetes.pod'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'tx', + split_mode: 'everything', + metrics: [ + { + field: 'kubernetes.pod.network.tx.bytes', + id: 'max-network-tx', + type: 'max', + }, + { + field: 'max-network-tx', + id: 'deriv-max-network-tx', + type: 'derivative', + unit: '1s', + }, + { + id: 'posonly-deriv-max-net-tx', + type: 'calculation', + variables: [{ id: 'var-rate', name: 'rate', field: 'deriv-max-network-tx' }], + script: 'params.rate > 0.0 ? params.rate : 0.0', + }, + ], + }, + { + id: 'rx', + split_mode: 'everything', + metrics: [ + { + field: 'kubernetes.pod.network.rx.bytes', + id: 'max-network-rx', + type: 'max', + }, + { + field: 'max-network-rx', + id: 'deriv-max-network-rx', + type: 'derivative', + unit: '1s', + }, + { + id: 'posonly-deriv-max-net-tx', + type: 'calculation', + variables: [{ id: 'var-rate', name: 'rate', field: 'deriv-max-network-tx' }], + script: 'params.rate > 0.0 ? params.rate : 0.0', + }, + { + id: 'invert-posonly-deriv-max-network-rx', + script: 'params.rate * -1', + type: 'calculation', + variables: [ + { + field: 'posonly-deriv-max-network-rx', + id: 'var-rate', + name: 'rate', + }, + ], + }, + ], + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/pod/metrics/tsvb/pod_overview.ts b/x-pack/plugins/infra/common/inventory_models/pod/metrics/tsvb/pod_overview.ts new file mode 100644 index 0000000000000..129cbd96eb2ce --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/pod/metrics/tsvb/pod_overview.ts @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const podOverview: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'podOverview', + requires: ['kubernetes.pod'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'cpu', + split_mode: 'everything', + metrics: [ + { + field: 'kubernetes.pod.cpu.usage.node.pct', + id: 'avg-cpu-usage', + type: 'avg', + }, + ], + }, + { + id: 'memory', + split_mode: 'everything', + metrics: [ + { + field: 'kubernetes.pod.memory.usage.node.pct', + id: 'avg-memory-usage', + type: 'avg', + }, + ], + }, + { + id: 'rx', + split_mode: 'everything', + metrics: [ + { + field: 'kubernetes.pod.network.rx.bytes', + id: 'max-network-rx', + type: 'max', + }, + { + field: 'max-network-rx', + id: 'deriv-max-network-rx', + type: 'derivative', + unit: '1s', + }, + { + id: 'posonly-deriv-max-network-rx', + type: 'calculation', + variables: [{ id: 'var-rate', name: 'rate', field: 'deriv-max-network-rx' }], + script: 'params.rate > 0.0 ? params.rate : 0.0', + }, + ], + }, + { + id: 'tx', + split_mode: 'everything', + metrics: [ + { + field: 'kubernetes.pod.network.tx.bytes', + id: 'max-network-tx', + type: 'max', + }, + { + field: 'max-network-tx', + id: 'deriv-max-network-tx', + type: 'derivative', + unit: '1s', + }, + { + id: 'posonly-deriv-max-network-tx', + type: 'calculation', + variables: [{ id: 'var-rate', name: 'rate', field: 'deriv-max-network-tx' }], + script: 'params.rate > 0.0 ? params.rate : 0.0', + }, + ], + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/pod/toolbar_items.tsx b/x-pack/plugins/infra/common/inventory_models/pod/toolbar_items.tsx new file mode 100644 index 0000000000000..9ef4a889dc589 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/pod/toolbar_items.tsx @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import React from 'react'; +import { ToolbarProps } from '../../../public/components/inventory/toolbars/toolbar'; +import { MetricsAndGroupByToolbarItems } from '../shared/compontents/metrics_and_groupby_toolbar_items'; +import { InfraSnapshotMetricType } from '../../graphql/types'; + +export const PodToolbarItems = (props: ToolbarProps) => { + const metricTypes = [ + InfraSnapshotMetricType.cpu, + InfraSnapshotMetricType.memory, + InfraSnapshotMetricType.rx, + InfraSnapshotMetricType.tx, + ]; + const groupByFields = ['kubernetes.namespace', 'kubernetes.node.name', 'service.type']; + return ( + + ); +}; diff --git a/x-pack/plugins/infra/common/inventory_models/shared/compontents/cloud_toolbar_items.tsx b/x-pack/plugins/infra/common/inventory_models/shared/compontents/cloud_toolbar_items.tsx new file mode 100644 index 0000000000000..7150d58be4eb7 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/shared/compontents/cloud_toolbar_items.tsx @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import React from 'react'; +import { EuiFlexItem } from '@elastic/eui'; +import { ToolbarProps } from '../../../../public/components/inventory/toolbars/toolbar'; +import { WaffleAccountsControls } from '../../../../public/components/waffle/waffle_accounts_controls'; +import { WaffleRegionControls } from '../../../../public/components/waffle/waffle_region_controls'; + +type Props = ToolbarProps; + +export const CloudToolbarItems = (props: Props) => { + return ( + <> + {props.accounts.length > 0 && ( + + + + )} + {props.regions.length > 0 && ( + + + + )} + + ); +}; diff --git a/x-pack/plugins/infra/common/inventory_models/shared/compontents/metrics_and_groupby_toolbar_items.tsx b/x-pack/plugins/infra/common/inventory_models/shared/compontents/metrics_and_groupby_toolbar_items.tsx new file mode 100644 index 0000000000000..c46ad5c6df952 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/shared/compontents/metrics_and_groupby_toolbar_items.tsx @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import React, { useMemo } from 'react'; +import { EuiFlexItem } from '@elastic/eui'; +import { ToolbarProps } from '../../../../public/components/inventory/toolbars/toolbar'; +import { WaffleMetricControls } from '../../../../public/components/waffle/waffle_metric_controls'; +import { WaffleGroupByControls } from '../../../../public/components/waffle/waffle_group_by_controls'; +import { InfraSnapshotMetricType } from '../../../../public/graphql/types'; +import { + toGroupByOpt, + toMetricOpt, +} from '../../../../public/components/inventory/toolbars/toolbar_wrapper'; + +interface Props extends ToolbarProps { + metricTypes: InfraSnapshotMetricType[]; + groupByFields: string[]; +} + +export const MetricsAndGroupByToolbarItems = (props: Props) => { + const metricOptions = useMemo(() => props.metricTypes.map(toMetricOpt), [props.metricTypes]); + + const groupByOptions = useMemo(() => props.groupByFields.map(toGroupByOpt), [ + props.groupByFields, + ]); + + return ( + <> + + + + + + + + ); +}; diff --git a/x-pack/plugins/infra/common/inventory_models/shared/layouts/aws.tsx b/x-pack/plugins/infra/common/inventory_models/shared/layouts/aws.tsx new file mode 100644 index 0000000000000..2cabbe4c33ff3 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/shared/layouts/aws.tsx @@ -0,0 +1,237 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import React from 'react'; +import { i18n } from '@kbn/i18n'; +import { LayoutPropsWithTheme } from '../../../../public/pages/metrics/types'; +import { Section } from '../../../../public/pages/metrics/components/section'; +import { SubSection } from '../../../../public/pages/metrics/components/sub_section'; +import { GaugesSectionVis } from '../../../../public/pages/metrics/components/gauges_section_vis'; +import { ChartSectionVis } from '../../../../public/pages/metrics/components/chart_section_vis'; +import { withTheme } from '../../../../../../common/eui_styled_components'; + +export const Layout = withTheme(({ metrics, theme }: LayoutPropsWithTheme) => ( + +
+ + + + + + + + + + + + + + + + + + +
+
+)); diff --git a/x-pack/plugins/infra/common/inventory_models/shared/layouts/nginx.tsx b/x-pack/plugins/infra/common/inventory_models/shared/layouts/nginx.tsx new file mode 100644 index 0000000000000..9d31ffa775d21 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/shared/layouts/nginx.tsx @@ -0,0 +1,103 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import React from 'react'; +import { i18n } from '@kbn/i18n'; +import { LayoutPropsWithTheme } from '../../../../public/pages/metrics/types'; +import { Section } from '../../../../public/pages/metrics/components/section'; +import { SubSection } from '../../../../public/pages/metrics/components/sub_section'; +import { ChartSectionVis } from '../../../../public/pages/metrics/components/chart_section_vis'; +import { withTheme } from '../../../../../../common/eui_styled_components'; + +export const Layout = withTheme(({ metrics, theme }: LayoutPropsWithTheme) => ( + +
+ + + + + + + + + + + + +
+
+)); diff --git a/x-pack/plugins/infra/common/inventory_models/shared/metrics/index.ts b/x-pack/plugins/infra/common/inventory_models/shared/metrics/index.ts new file mode 100644 index 0000000000000..2bab5c5229c5b --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/shared/metrics/index.ts @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { nginxRequestRate } from './tsvb/nginx_request_rate'; +import { nginxActiveConnections } from './tsvb/nginx_active_connections'; +import { nginxHits } from './tsvb/nginx_hits'; +import { nginxRequestsPerConnection } from './tsvb/nginx_requests_per_connection'; + +import { awsCpuUtilization } from './tsvb/aws_cpu_utilization'; +import { awsDiskioBytes } from './tsvb/aws_diskio_bytes'; +import { awsDiskioOps } from './tsvb/aws_diskio_ops'; +import { awsNetworkBytes } from './tsvb/aws_network_bytes'; +import { awsNetworkPackets } from './tsvb/aws_network_packets'; +import { awsOverview } from './tsvb/aws_overview'; +import { InventoryMetrics } from '../../types'; +import { count } from './snapshot/count'; + +export const metrics: InventoryMetrics = { + tsvb: { + nginxActiveConnections, + nginxHits, + nginxRequestRate, + nginxRequestsPerConnection, + awsCpuUtilization, + awsDiskioBytes, + awsDiskioOps, + awsNetworkBytes, + awsNetworkPackets, + awsOverview, + }, + snapshot: { + count, + }, + defaultSnapshot: 'count', + defaultTimeRangeInSeconds: 3600, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/shared/metrics/required_metrics.ts b/x-pack/plugins/infra/common/inventory_models/shared/metrics/required_metrics.ts new file mode 100644 index 0000000000000..0b2623c448646 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/shared/metrics/required_metrics.ts @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { InventoryMetric } from '../../types'; + +export const nginx: InventoryMetric[] = [ + 'nginxHits', + 'nginxRequestRate', + 'nginxActiveConnections', + 'nginxRequestsPerConnection', +]; + +export const aws: InventoryMetric[] = [ + 'awsOverview', + 'awsCpuUtilization', + 'awsNetworkBytes', + 'awsNetworkPackets', + 'awsDiskioOps', + 'awsDiskioBytes', +]; diff --git a/x-pack/plugins/infra/common/inventory_models/shared/metrics/snapshot/count.ts b/x-pack/plugins/infra/common/inventory_models/shared/metrics/snapshot/count.ts new file mode 100644 index 0000000000000..ed8398a5d4a77 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/shared/metrics/snapshot/count.ts @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const count: SnapshotModel = { + count: { + bucket_script: { + buckets_path: { count: '_count' }, + script: { + source: 'count * 1', + lang: 'expression', + }, + gap_policy: 'skip', + }, + }, +}; diff --git a/x-pack/plugins/infra/common/inventory_models/shared/metrics/snapshot/network_traffic.ts b/x-pack/plugins/infra/common/inventory_models/shared/metrics/snapshot/network_traffic.ts new file mode 100644 index 0000000000000..37e90a6416ba7 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/shared/metrics/snapshot/network_traffic.ts @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const networkTraffic = (id: string, field: string): SnapshotModel => { + return { + [`${id}_max`]: { max: { field } }, + [`${id}_deriv`]: { + derivative: { + buckets_path: `${id}_max`, + gap_policy: 'skip', + unit: '1s', + }, + }, + [id]: { + bucket_script: { + buckets_path: { value: `${id}_deriv[normalized_value]` }, + script: { + source: 'params.value > 0.0 ? params.value : 0.0', + lang: 'painless', + }, + gap_policy: 'skip', + }, + }, + }; +}; diff --git a/x-pack/plugins/infra/common/inventory_models/shared/metrics/snapshot/network_traffic_with_interfaces.ts b/x-pack/plugins/infra/common/inventory_models/shared/metrics/snapshot/network_traffic_with_interfaces.ts new file mode 100644 index 0000000000000..1ba5cf037e708 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/shared/metrics/snapshot/network_traffic_with_interfaces.ts @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const networkTrafficWithInterfaces = ( + id: string, + metricField: string, + interfaceField: string +): SnapshotModel => ({ + [`${id}_interfaces`]: { + terms: { field: interfaceField }, + aggregations: { + [`${id}_interface_max`]: { max: { field: metricField } }, + }, + }, + [`${id}_sum_of_interfaces`]: { + sum_bucket: { + buckets_path: `${id}_interfaces>${id}_interface_max`, + }, + }, + [`${id}_deriv`]: { + derivative: { + buckets_path: `${id}_sum_of_interfaces`, + gap_policy: 'skip', + unit: '1s', + }, + }, + [id]: { + bucket_script: { + buckets_path: { value: `${id}_deriv[normalized_value]` }, + script: { + source: 'params.value > 0.0 ? params.value : 0.0', + lang: 'painless', + }, + gap_policy: 'skip', + }, + }, +}); diff --git a/x-pack/plugins/infra/common/inventory_models/shared/metrics/snapshot/rate.ts b/x-pack/plugins/infra/common/inventory_models/shared/metrics/snapshot/rate.ts new file mode 100644 index 0000000000000..e1c7c7df52628 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/shared/metrics/snapshot/rate.ts @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SnapshotModel } from '../../../types'; + +export const rate = (id: string, field: string): SnapshotModel => { + return { + [`${id}_max`]: { max: { field } }, + [`${id}_deriv`]: { + derivative: { + buckets_path: `${id}_max`, + gap_policy: 'skip', + unit: '1s', + }, + }, + [id]: { + bucket_script: { + buckets_path: { value: `${id}_deriv[normalized_value]` }, + script: { + source: 'params.value > 0.0 ? params.value : 0.0', + lang: 'painless', + }, + gap_policy: 'skip', + }, + }, + }; +}; diff --git a/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_cpu_utilization.ts b/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_cpu_utilization.ts new file mode 100644 index 0000000000000..9092d8f3f5d54 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_cpu_utilization.ts @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const awsCpuUtilization: TSVBMetricModelCreator = ( + timeField, + indexPattern +): TSVBMetricModel => ({ + id: 'awsCpuUtilization', + requires: ['aws.ec2'], + map_field_to: 'cloud.instance.id', + id_type: 'cloud', + index_pattern: indexPattern, + interval: '>=5m', + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'cpu-util', + metrics: [ + { + field: 'aws.ec2.cpu.total.pct', + id: 'avg-cpu-util', + type: 'avg', + }, + ], + split_mode: 'everything', + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_diskio_bytes.ts b/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_diskio_bytes.ts new file mode 100644 index 0000000000000..8abb6f5d5ddff --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_diskio_bytes.ts @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const awsDiskioBytes: TSVBMetricModelCreator = ( + timeField, + indexPattern +): TSVBMetricModel => ({ + id: 'awsDiskioBytes', + requires: ['aws.ec2'], + index_pattern: indexPattern, + map_field_to: 'cloud.instance.id', + id_type: 'cloud', + interval: '>=5m', + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'writes', + metrics: [ + { + field: 'aws.ec2.diskio.write.bytes', + id: 'sum-diskio-out', + type: 'sum', + }, + { + id: 'csum-sum-diskio-out', + field: 'sum-diskio-out', + type: 'cumulative_sum', + }, + { + id: 'deriv-csum-sum-diskio-out', + unit: '1s', + type: 'derivative', + field: 'csum-sum-diskio-out', + }, + { + id: 'posonly-deriv-csum-sum-diskio-out', + field: 'deriv-csum-sum-diskio-out', + type: 'positive_only', + }, + ], + split_mode: 'everything', + }, + { + id: 'reads', + metrics: [ + { + field: 'aws.ec2.diskio.read.bytes', + id: 'sum-diskio-in', + type: 'sum', + }, + { + id: 'csum-sum-diskio-in', + field: 'sum-diskio-in', + type: 'cumulative_sum', + }, + { + id: 'deriv-csum-sum-diskio-in', + unit: '1s', + type: 'derivative', + field: 'csum-sum-diskio-in', + }, + { + id: 'posonly-deriv-csum-sum-diskio-in', + field: 'deriv-csum-sum-diskio-in', + type: 'positive_only', + }, + { + id: 'inverted-posonly-deriv-csum-sum-diskio-in', + type: 'calculation', + variables: [{ id: 'var-rate', name: 'rate', field: 'posonly-deriv-csum-sum-diskio-in' }], + script: 'params.rate * -1', + }, + ], + split_mode: 'everything', + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_diskio_ops.ts b/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_diskio_ops.ts new file mode 100644 index 0000000000000..d81438e3b5a61 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_diskio_ops.ts @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const awsDiskioOps: TSVBMetricModelCreator = (timeField, indexPattern): TSVBMetricModel => ({ + id: 'awsDiskioOps', + requires: ['aws.ec2'], + index_pattern: indexPattern, + map_field_to: 'cloud.instance.id', + id_type: 'cloud', + interval: '>=5m', + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'writes', + metrics: [ + { + field: 'aws.ec2.diskio.write.count', + id: 'sum-diskio-writes', + type: 'sum', + }, + { + id: 'csum-sum-diskio-writes', + field: 'sum-diskio-writes', + type: 'cumulative_sum', + }, + { + id: 'deriv-csum-sum-diskio-writes', + unit: '1s', + type: 'derivative', + field: 'csum-sum-diskio-writes', + }, + { + id: 'posonly-deriv-csum-sum-diskio-writes', + field: 'deriv-csum-sum-diskio-writes', + type: 'positive_only', + }, + ], + split_mode: 'everything', + }, + { + id: 'reads', + metrics: [ + { + field: 'aws.ec2.diskio.read.count', + id: 'sum-diskio-reads', + type: 'sum', + }, + { + id: 'csum-sum-diskio-reads', + field: 'sum-diskio-reads', + type: 'cumulative_sum', + }, + { + id: 'deriv-csum-sum-diskio-reads', + unit: '1s', + type: 'derivative', + field: 'csum-sum-diskio-reads', + }, + { + id: 'posonly-deriv-csum-sum-diskio-reads', + field: 'deriv-csum-sum-diskio-reads', + type: 'positive_only', + }, + { + id: 'inverted-posonly-deriv-csum-sum-diskio-reads', + type: 'calculation', + variables: [ + { id: 'var-rate', name: 'rate', field: 'posonly-deriv-csum-sum-diskio-reads' }, + ], + script: 'params.rate * -1', + }, + ], + split_mode: 'everything', + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_network_bytes.ts b/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_network_bytes.ts new file mode 100644 index 0000000000000..bbae45a5b4b8e --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_network_bytes.ts @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +// see discussion in: https://github.com/elastic/kibana/issues/42687 + +export const awsNetworkBytes: TSVBMetricModelCreator = ( + timeField, + indexPattern +): TSVBMetricModel => ({ + id: 'awsNetworkBytes', + requires: ['aws.ec2'], + index_pattern: indexPattern, + map_field_to: 'cloud.instance.id', + id_type: 'cloud', + interval: '>=5m', + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'tx', + metrics: [ + { + field: 'aws.ec2.network.out.bytes', + id: 'sum-net-out', + type: 'sum', + }, + { + id: 'csum-sum-net-out', + field: 'sum-net-out', + type: 'cumulative_sum', + }, + { + id: 'deriv-csum-sum-net-out', + unit: '1s', + type: 'derivative', + field: 'csum-sum-net-out', + }, + { + id: 'posonly-deriv-csum-sum-net-out', + field: 'deriv-csum-sum-net-out', + type: 'positive_only', + }, + ], + split_mode: 'everything', + }, + { + id: 'rx', + metrics: [ + { + field: 'aws.ec2.network.in.bytes', + id: 'sum-net-in', + type: 'sum', + }, + { + id: 'csum-sum-net-in', + field: 'sum-net-in', + type: 'cumulative_sum', + }, + { + id: 'deriv-csum-sum-net-in', + unit: '1s', + type: 'derivative', + field: 'csum-sum-net-in', + }, + { + id: 'posonly-deriv-csum-sum-net-in', + field: 'deriv-csum-sum-net-in', + type: 'positive_only', + }, + { + id: 'inverted-posonly-deriv-csum-sum-net-in', + type: 'calculation', + variables: [{ id: 'var-rate', name: 'rate', field: 'posonly-deriv-csum-sum-net-in' }], + script: 'params.rate * -1', + }, + ], + split_mode: 'everything', + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_network_packets.ts b/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_network_packets.ts new file mode 100644 index 0000000000000..e2e6c4846a412 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_network_packets.ts @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const awsNetworkPackets: TSVBMetricModelCreator = ( + timeField, + indexPattern +): TSVBMetricModel => ({ + id: 'awsNetworkPackets', + requires: ['aws.ec2'], + index_pattern: indexPattern, + map_field_to: 'cloud.instance.id', + id_type: 'cloud', + interval: '>=5m', + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'packets-out', + metrics: [ + { + field: 'aws.ec2.network.out.packets', + id: 'avg-net-out', + type: 'avg', + }, + ], + split_mode: 'everything', + }, + { + id: 'packets-in', + metrics: [ + { + field: 'aws.ec2.network.in.packets', + id: 'avg-net-in', + type: 'avg', + }, + { + id: 'inverted-avg-net-in', + type: 'calculation', + variables: [{ id: 'var-avg', name: 'avg', field: 'avg-net-in' }], + script: 'params.avg * -1', + }, + ], + split_mode: 'everything', + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_overview.ts b/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_overview.ts new file mode 100644 index 0000000000000..5ba61d1f92517 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/aws_overview.ts @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const awsOverview: TSVBMetricModelCreator = (timeField, indexPattern): TSVBMetricModel => ({ + id: 'awsOverview', + requires: ['aws.ec2'], + index_pattern: indexPattern, + map_field_to: 'cloud.instance.id', + id_type: 'cloud', + interval: '>=5m', + time_field: timeField, + type: 'gauge', + series: [ + { + id: 'cpu-util', + split_mode: 'everything', + metrics: [ + { + field: 'aws.ec2.cpu.total.pct', + id: 'cpu-total-pct', + type: 'max', + }, + ], + }, + { + id: 'status-check-failed', + split_mode: 'everything', + metrics: [ + { + field: 'aws.ec2.status.check_failed', + id: 'status-check-failed', + type: 'max', + }, + ], + }, + { + id: 'packets-out', + split_mode: 'everything', + metrics: [ + { + field: 'aws.ec2.network.out.packets', + id: 'network-out-packets', + type: 'avg', + }, + ], + }, + { + id: 'packets-in', + split_mode: 'everything', + metrics: [ + { + field: 'aws.ec2.network.in.packets', + id: 'network-in-packets', + type: 'avg', + }, + ], + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/nginx_active_connections.ts b/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/nginx_active_connections.ts new file mode 100644 index 0000000000000..61e7fc88496a3 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/nginx_active_connections.ts @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const nginxActiveConnections: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'nginxActiveConnections', + requires: ['nginx.stubstatus'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'connections', + metrics: [ + { + field: 'nginx.stubstatus.active', + id: 'avg-active', + type: 'avg', + }, + ], + split_mode: 'everything', + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/nginx_hits.ts b/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/nginx_hits.ts new file mode 100644 index 0000000000000..546102ec0ad8b --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/nginx_hits.ts @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const nginxHits: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'nginxHits', + requires: ['nginx.access'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: '200s', + metrics: [ + { + id: 'count-200', + type: 'count', + }, + ], + split_mode: 'filter', + filter: { + query: 'http.response.status_code:[200 TO 299]', + language: 'lucene', + }, + }, + { + id: '300s', + metrics: [ + { + id: 'count-300', + type: 'count', + }, + ], + split_mode: 'filter', + filter: { + query: 'http.response.status_code:[300 TO 399]', + language: 'lucene', + }, + }, + { + id: '400s', + metrics: [ + { + id: 'count-400', + type: 'count', + }, + ], + split_mode: 'filter', + filter: { + query: 'http.response.status_code:[400 TO 499]', + language: 'lucene', + }, + }, + { + id: '500s', + metrics: [ + { + id: 'count-500', + type: 'count', + }, + ], + split_mode: 'filter', + filter: { + query: 'http.response.status_code:[500 TO 599]', + language: 'lucene', + }, + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/nginx_request_rate.ts b/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/nginx_request_rate.ts new file mode 100644 index 0000000000000..5904ba62cd309 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/nginx_request_rate.ts @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const nginxRequestRate: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'nginxRequestRate', + requires: ['nginx.stubstatus'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'rate', + metrics: [ + { + field: 'nginx.stubstatus.requests', + id: 'max-requests', + type: 'max', + }, + { + field: 'max-requests', + id: 'derv-max-requests', + type: 'derivative', + unit: '1s', + }, + { + id: 'posonly-derv-max-requests', + type: 'calculation', + variables: [{ id: 'var-rate', name: 'rate', field: 'derv-max-requests' }], + script: 'params.rate > 0.0 ? params.rate : 0.0', + }, + ], + split_mode: 'everything', + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/nginx_requests_per_connection.ts b/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/nginx_requests_per_connection.ts new file mode 100644 index 0000000000000..c76d5a9edf8fc --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/shared/metrics/tsvb/nginx_requests_per_connection.ts @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TSVBMetricModelCreator, TSVBMetricModel } from '../../../types'; + +export const nginxRequestsPerConnection: TSVBMetricModelCreator = ( + timeField, + indexPattern, + interval +): TSVBMetricModel => ({ + id: 'nginxRequestsPerConnection', + requires: ['nginx.stubstatus'], + index_pattern: indexPattern, + interval, + time_field: timeField, + type: 'timeseries', + series: [ + { + id: 'reqPerConns', + metrics: [ + { + field: 'nginx.stubstatus.handled', + id: 'max-handled', + type: 'max', + }, + { + field: 'nginx.stubstatus.requests', + id: 'max-requests', + type: 'max', + }, + { + id: 'reqs-per-connection', + type: 'calculation', + variables: [ + { id: 'var-handled', name: 'handled', field: 'max-handled' }, + { id: 'var-requests', name: 'requests', field: 'max-requests' }, + ], + script: + 'params.handled > 0.0 && params.requests > 0.0 ? params.handled / params.requests : 0.0', + }, + ], + split_mode: 'everything', + }, + ], +}); diff --git a/x-pack/plugins/infra/common/inventory_models/toolbars.ts b/x-pack/plugins/infra/common/inventory_models/toolbars.ts new file mode 100644 index 0000000000000..05def078c7f2d --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/toolbars.ts @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { ReactNode, FunctionComponent } from 'react'; +import { i18n } from '@kbn/i18n'; +import { InventoryItemType } from './types'; +import { HostToolbarItems } from './host/toolbar_items'; +import { ContainerToolbarItems } from './container/toolbar_items'; +import { PodToolbarItems } from './pod/toolbar_items'; +import { ToolbarProps } from '../../public/components/inventory/toolbars/toolbar'; +import { AwsEC2ToolbarItems } from './aws_ec2/toolbar_items'; +import { AwsS3ToolbarItems } from './aws_s3/toolbar_items'; +import { AwsRDSToolbarItems } from './aws_rds/toolbar_items'; +import { AwsSQSToolbarItems } from './aws_sqs/toolbar_items'; + +interface Toolbars { + [type: string]: ReactNode; +} + +const toolbars: Toolbars = { + host: HostToolbarItems, + container: ContainerToolbarItems, + pod: PodToolbarItems, + awsEC2: AwsEC2ToolbarItems, + awsS3: AwsS3ToolbarItems, + awsRDS: AwsRDSToolbarItems, + awsSQS: AwsSQSToolbarItems, +}; + +export const findToolbar = (type: InventoryItemType) => { + const Toolbar = toolbars?.[type]; + if (!Toolbar) { + throw new Error( + i18n.translate('xpack.infra.inventoryModels.findToolbar.error', { + defaultMessage: "The toolbar you've attempted to find does not exist.", + }) + ); + } + return Toolbar as FunctionComponent; +}; diff --git a/x-pack/plugins/infra/common/inventory_models/types.ts b/x-pack/plugins/infra/common/inventory_models/types.ts new file mode 100644 index 0000000000000..cc2396547edc4 --- /dev/null +++ b/x-pack/plugins/infra/common/inventory_models/types.ts @@ -0,0 +1,337 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import * as rt from 'io-ts'; + +export const ItemTypeRT = rt.keyof({ + host: null, + pod: null, + container: null, + awsEC2: null, + awsS3: null, + awsSQS: null, + awsRDS: null, +}); + +export const InventoryVisTypeRT = rt.keyof({ + line: null, + area: null, + bar: null, +}); + +export type InventoryVisType = rt.TypeOf; + +export const InventoryFormatterTypeRT = rt.keyof({ + abbreviatedNumber: null, + bits: null, + bytes: null, + number: null, + percent: null, + highPercision: null, +}); +export type InventoryFormatterType = rt.TypeOf; +export type InventoryItemType = rt.TypeOf; + +export const InventoryMetricRT = rt.keyof({ + hostSystemOverview: null, + hostCpuUsage: null, + hostFilesystem: null, + hostK8sOverview: null, + hostK8sCpuCap: null, + hostK8sDiskCap: null, + hostK8sMemoryCap: null, + hostK8sPodCap: null, + hostLoad: null, + hostMemoryUsage: null, + hostNetworkTraffic: null, + hostDockerOverview: null, + hostDockerInfo: null, + hostDockerTop5ByCpu: null, + hostDockerTop5ByMemory: null, + podOverview: null, + podCpuUsage: null, + podMemoryUsage: null, + podLogUsage: null, + podNetworkTraffic: null, + containerOverview: null, + containerCpuKernel: null, + containerCpuUsage: null, + containerDiskIOOps: null, + containerDiskIOBytes: null, + containerMemory: null, + containerNetworkTraffic: null, + nginxHits: null, + nginxRequestRate: null, + nginxActiveConnections: null, + nginxRequestsPerConnection: null, + awsOverview: null, + awsCpuUtilization: null, + awsNetworkBytes: null, + awsNetworkPackets: null, + awsDiskioBytes: null, + awsDiskioOps: null, + awsEC2CpuUtilization: null, + awsEC2NetworkTraffic: null, + awsEC2DiskIOBytes: null, + awsS3TotalRequests: null, + awsS3NumberOfObjects: null, + awsS3BucketSize: null, + awsS3DownloadBytes: null, + awsS3UploadBytes: null, + awsRDSCpuTotal: null, + awsRDSConnections: null, + awsRDSQueriesExecuted: null, + awsRDSActiveTransactions: null, + awsRDSLatency: null, + awsSQSMessagesVisible: null, + awsSQSMessagesDelayed: null, + awsSQSMessagesSent: null, + awsSQSMessagesEmpty: null, + awsSQSOldestMessage: null, + custom: null, +}); +export type InventoryMetric = rt.TypeOf; + +export const TSVBMetricTypeRT = rt.keyof({ + avg: null, + max: null, + min: null, + calculation: null, + cardinality: null, + series_agg: null, + positive_only: null, + derivative: null, + count: null, + sum: null, + cumulative_sum: null, +}); + +export type TSVBMetricType = rt.TypeOf; + +export const TSVBMetricModelCountRT = rt.type({ + id: rt.string, + type: rt.literal('count'), +}); + +export const TSVBMetricModelBasicMetricRT = rt.intersection([ + rt.type({ + id: rt.string, + type: TSVBMetricTypeRT, + }), + rt.partial({ + field: rt.string, + }), +]); + +export const TSVBMetricModelVariableRT = rt.type({ + field: rt.string, + id: rt.string, + name: rt.string, +}); + +export const TSVBMetricModelBucketScriptRT = rt.type({ + id: rt.string, + script: rt.string, + type: rt.literal('calculation'), + variables: rt.array(TSVBMetricModelVariableRT), +}); + +export const TSVBMetricModelDerivativeRT = rt.type({ + id: rt.string, + field: rt.string, + unit: rt.string, + type: rt.literal('derivative'), +}); + +export const TSVBMetricModelSeriesAggRT = rt.type({ + id: rt.string, + function: rt.string, + type: rt.literal('series_agg'), +}); + +export const TSVBMetricRT = rt.union([ + TSVBMetricModelCountRT, + TSVBMetricModelBasicMetricRT, + TSVBMetricModelBucketScriptRT, + TSVBMetricModelDerivativeRT, + TSVBMetricModelSeriesAggRT, +]); +export type TSVBMetric = rt.TypeOf; + +export const TSVBSeriesRT = rt.intersection([ + rt.type({ + id: rt.string, + metrics: rt.array(TSVBMetricRT), + split_mode: rt.string, + }), + rt.partial({ + terms_field: rt.string, + terms_size: rt.number, + terms_order_by: rt.string, + filter: rt.type({ + query: rt.string, + language: rt.keyof({ + lucene: null, + kuery: null, + }), + }), + }), +]); + +export type TSVBSeries = rt.TypeOf; + +export const TSVBMetricModelRT = rt.intersection([ + rt.type({ + id: InventoryMetricRT, + requires: rt.array(rt.string), + index_pattern: rt.union([rt.string, rt.array(rt.string)]), + interval: rt.string, + time_field: rt.string, + type: rt.string, + series: rt.array(TSVBSeriesRT), + }), + rt.partial({ + filter: rt.string, + map_field_to: rt.string, + id_type: rt.keyof({ cloud: null, node: null }), + drop_last_bucket: rt.boolean, + }), +]); + +export type TSVBMetricModel = rt.TypeOf; + +export type TSVBMetricModelCreator = ( + timeField: string, + indexPattern: string | string[], + interval: string +) => TSVBMetricModel; + +export const SnapshotModelMetricAggRT = rt.record( + rt.string, + rt.union([ + rt.undefined, + rt.type({ + field: rt.string, + }), + ]) +); + +export const SnapshotModelBucketScriptRT = rt.type({ + bucket_script: rt.intersection([ + rt.type({ + buckets_path: rt.record(rt.string, rt.union([rt.undefined, rt.string])), + script: rt.type({ + source: rt.string, + lang: rt.keyof({ painless: null, expression: null }), + }), + }), + rt.partial({ gap_policy: rt.keyof({ skip: null, insert_zeros: null }) }), + ]), +}); + +export const SnapshotModelCumulativeSumRT = rt.type({ + cumulative_sum: rt.type({ + buckets_path: rt.string, + }), +}); + +export const SnapshotModelDerivativeRT = rt.type({ + derivative: rt.type({ + buckets_path: rt.string, + gap_policy: rt.keyof({ skip: null, insert_zeros: null }), + unit: rt.string, + }), +}); + +export const SnapshotModelSumBucketRT = rt.type({ + sum_bucket: rt.type({ + buckets_path: rt.string, + }), +}); + +interface SnapshotTermsWithAggregation { + terms: { field: string }; + aggregations: SnapshotModel; +} + +export const SnapshotTermsWithAggregationRT: rt.Type = rt.recursion( + 'SnapshotModelRT', + () => + rt.type({ + terms: rt.type({ field: rt.string }), + aggregations: SnapshotModelRT, + }) +); + +export const SnapshotModelAggregationRT = rt.union([ + SnapshotModelMetricAggRT, + SnapshotModelBucketScriptRT, + SnapshotModelCumulativeSumRT, + SnapshotModelDerivativeRT, + SnapshotModelSumBucketRT, + SnapshotTermsWithAggregationRT, +]); + +export const SnapshotModelRT = rt.record( + rt.string, + rt.union([rt.undefined, SnapshotModelAggregationRT]) +); +export type SnapshotModel = rt.TypeOf; + +export const SnapshotMetricTypeRT = rt.keyof({ + count: null, + cpu: null, + load: null, + memory: null, + tx: null, + rx: null, + logRate: null, + diskIOReadBytes: null, + diskIOWriteBytes: null, + s3TotalRequests: null, + s3NumberOfObjects: null, + s3BucketSize: null, + s3DownloadBytes: null, + s3UploadBytes: null, + rdsConnections: null, + rdsQueriesExecuted: null, + rdsActiveTransactions: null, + rdsLatency: null, + sqsMessagesVisible: null, + sqsMessagesDelayed: null, + sqsMessagesSent: null, + sqsMessagesEmpty: null, + sqsOldestMessage: null, +}); + +export type SnapshotMetricType = rt.TypeOf; + +export interface InventoryMetrics { + tsvb: { [name: string]: TSVBMetricModelCreator }; + snapshot: { [name: string]: SnapshotModel }; + defaultSnapshot: SnapshotMetricType; + /** This is used by the inventory view to calculate the appropriate amount of time for the metrics detail page. Some metris like awsS3 require multiple days where others like host only need an hour.*/ + defaultTimeRangeInSeconds: number; +} + +export interface InventoryModel { + id: string; + displayName: string; + requiredModule: string; + fields: { + id: string; + name: string; + ip?: string; + }; + crosslinkSupport: { + details: boolean; + logs: boolean; + apm: boolean; + uptime: boolean; + }; + metrics: InventoryMetrics; + requiredMetrics: InventoryMetric[]; +} diff --git a/x-pack/plugins/infra/common/log_analysis/index.ts b/x-pack/plugins/infra/common/log_analysis/index.ts new file mode 100644 index 0000000000000..79913f829191d --- /dev/null +++ b/x-pack/plugins/infra/common/log_analysis/index.ts @@ -0,0 +1,8 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export * from './log_analysis'; +export * from './job_parameters'; diff --git a/x-pack/plugins/infra/common/log_analysis/job_parameters.ts b/x-pack/plugins/infra/common/log_analysis/job_parameters.ts new file mode 100644 index 0000000000000..8c08e24d8665d --- /dev/null +++ b/x-pack/plugins/infra/common/log_analysis/job_parameters.ts @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import * as rt from 'io-ts'; + +export const bucketSpan = 900000; + +export const categoriesMessageField = 'message'; + +export const partitionField = 'event.dataset'; + +export const getJobIdPrefix = (spaceId: string, sourceId: string) => + `kibana-logs-ui-${spaceId}-${sourceId}-`; + +export const getJobId = (spaceId: string, sourceId: string, jobType: string) => + `${getJobIdPrefix(spaceId, sourceId)}${jobType}`; + +export const getDatafeedId = (spaceId: string, sourceId: string, jobType: string) => + `datafeed-${getJobId(spaceId, sourceId, jobType)}`; + +export const jobSourceConfigurationRT = rt.type({ + indexPattern: rt.string, + timestampField: rt.string, + bucketSpan: rt.number, +}); + +export type JobSourceConfiguration = rt.TypeOf; diff --git a/x-pack/plugins/infra/common/log_analysis/log_analysis.ts b/x-pack/plugins/infra/common/log_analysis/log_analysis.ts new file mode 100644 index 0000000000000..4a6f20d549799 --- /dev/null +++ b/x-pack/plugins/infra/common/log_analysis/log_analysis.ts @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import * as rt from 'io-ts'; + +export const jobTypeRT = rt.keyof({ + 'log-entry-rate': null, +}); + +export type JobType = rt.TypeOf; + +// combines and abstracts job and datafeed status +export type JobStatus = + | 'unknown' + | 'missing' + | 'initializing' + | 'stopped' + | 'started' + | 'finished' + | 'failed'; + +export type SetupStatus = + | 'initializing' // acquiring job statuses to determine setup status + | 'unknown' // job status could not be acquired (failed request etc) + | 'required' // jobs are missing + | 'requiredForReconfiguration' // the configurations don't match the source configurations + | 'requiredForUpdate' // the definitions don't match the module definitions + | 'pending' // In the process of setting up the module for the first time or retrying, waiting for response + | 'succeeded' // setup succeeded, notifying user + | 'failed' // setup failed, notifying user + | 'hiddenAfterSuccess' // hide the setup screen and we show the results for the first time + | 'skipped' // setup hidden because the module is in a correct state already + | 'skippedButReconfigurable' // setup hidden even though the job configurations are outdated + | 'skippedButUpdatable'; // setup hidden even though the job definitions are outdated + +/** + * Maps a job status to the possibility that results have already been produced + * before this state was reached. + */ +export const isJobStatusWithResults = (jobStatus: JobStatus) => + ['started', 'finished', 'stopped', 'failed'].includes(jobStatus); + +export const isHealthyJobStatus = (jobStatus: JobStatus) => + ['started', 'finished'].includes(jobStatus); + +/** + * Maps a setup status to the possibility that results have already been + * produced before this state was reached. + */ +export const isSetupStatusWithResults = (setupStatus: SetupStatus) => + ['skipped', 'hiddenAfterSuccess', 'skippedButReconfigurable', 'skippedButUpdatable'].includes( + setupStatus + ); + +const KIBANA_SAMPLE_DATA_INDICES = ['kibana_sample_data_logs*']; + +export const isExampleDataIndex = (indexName: string) => + KIBANA_SAMPLE_DATA_INDICES.includes(indexName); diff --git a/x-pack/plugins/infra/common/log_entry/index.ts b/x-pack/plugins/infra/common/log_entry/index.ts new file mode 100644 index 0000000000000..66cc5108b6692 --- /dev/null +++ b/x-pack/plugins/infra/common/log_entry/index.ts @@ -0,0 +1,7 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export * from './log_entry'; diff --git a/x-pack/plugins/infra/common/log_entry/log_entry.ts b/x-pack/plugins/infra/common/log_entry/log_entry.ts new file mode 100644 index 0000000000000..e02acebe27711 --- /dev/null +++ b/x-pack/plugins/infra/common/log_entry/log_entry.ts @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TimeKey } from '../time'; +import { InfraLogEntry } from '../graphql/types'; + +export type LogEntry = InfraLogEntry; + +export interface LogEntryOrigin { + id: string; + index: string; + type: string; +} + +export type LogEntryTime = TimeKey; + +export interface LogEntryFieldsMapping { + message: string; + tiebreaker: string; + time: string; +} + +export function isEqual(time1: LogEntryTime, time2: LogEntryTime) { + return time1.time === time2.time && time1.tiebreaker === time2.tiebreaker; +} + +export function isLess(time1: LogEntryTime, time2: LogEntryTime) { + return ( + time1.time < time2.time || (time1.time === time2.time && time1.tiebreaker < time2.tiebreaker) + ); +} + +export function isLessOrEqual(time1: LogEntryTime, time2: LogEntryTime) { + return ( + time1.time < time2.time || (time1.time === time2.time && time1.tiebreaker <= time2.tiebreaker) + ); +} + +export function isBetween(min: LogEntryTime, max: LogEntryTime, operand: LogEntryTime) { + return isLessOrEqual(min, operand) && isLessOrEqual(operand, max); +} diff --git a/x-pack/plugins/infra/common/log_search_result/index.ts b/x-pack/plugins/infra/common/log_search_result/index.ts new file mode 100644 index 0000000000000..6795cc1543798 --- /dev/null +++ b/x-pack/plugins/infra/common/log_search_result/index.ts @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export { + getSearchResultIndexBeforeTime, + getSearchResultIndexAfterTime, + getSearchResultKey, + SearchResult, +} from './log_search_result'; diff --git a/x-pack/plugins/infra/common/log_search_result/log_search_result.ts b/x-pack/plugins/infra/common/log_search_result/log_search_result.ts new file mode 100644 index 0000000000000..a56a9d8e3531c --- /dev/null +++ b/x-pack/plugins/infra/common/log_search_result/log_search_result.ts @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { bisector } from 'd3-array'; + +import { compareToTimeKey, TimeKey } from '../time'; + +export interface SearchResult { + gid: string; + fields: TimeKey; + matches: SearchResultFieldMatches; +} + +export interface SearchResultFieldMatches { + [field: string]: string[]; +} + +export const getSearchResultKey = (result: SearchResult) => + ({ + gid: result.gid, + tiebreaker: result.fields.tiebreaker, + time: result.fields.time, + } as TimeKey); + +const searchResultTimeBisector = bisector(compareToTimeKey(getSearchResultKey)); +export const getSearchResultIndexBeforeTime = searchResultTimeBisector.left; +export const getSearchResultIndexAfterTime = searchResultTimeBisector.right; diff --git a/x-pack/plugins/infra/common/log_search_summary/index.ts b/x-pack/plugins/infra/common/log_search_summary/index.ts new file mode 100644 index 0000000000000..4ba04cb3ea6a4 --- /dev/null +++ b/x-pack/plugins/infra/common/log_search_summary/index.ts @@ -0,0 +1,7 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export { SearchSummaryBucket } from './log_search_summary'; diff --git a/x-pack/plugins/infra/common/log_search_summary/log_search_summary.ts b/x-pack/plugins/infra/common/log_search_summary/log_search_summary.ts new file mode 100644 index 0000000000000..72cf643311798 --- /dev/null +++ b/x-pack/plugins/infra/common/log_search_summary/log_search_summary.ts @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SearchResult } from '../log_search_result'; + +export interface SearchSummaryBucket { + start: number; + end: number; + count: number; + representative: SearchResult; +} diff --git a/x-pack/plugins/infra/common/log_text_scale/index.ts b/x-pack/plugins/infra/common/log_text_scale/index.ts new file mode 100644 index 0000000000000..7fee2bbd398bd --- /dev/null +++ b/x-pack/plugins/infra/common/log_text_scale/index.ts @@ -0,0 +1,7 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export * from './log_text_scale'; diff --git a/x-pack/plugins/infra/common/log_text_scale/log_text_scale.ts b/x-pack/plugins/infra/common/log_text_scale/log_text_scale.ts new file mode 100644 index 0000000000000..6b8b26498ea45 --- /dev/null +++ b/x-pack/plugins/infra/common/log_text_scale/log_text_scale.ts @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export type TextScale = 'small' | 'medium' | 'large'; + +export function isTextScale(maybeTextScale: string): maybeTextScale is TextScale { + return ['small', 'medium', 'large'].includes(maybeTextScale); +} diff --git a/x-pack/plugins/infra/common/runtime_types.ts b/x-pack/plugins/infra/common/runtime_types.ts new file mode 100644 index 0000000000000..297743f9b3456 --- /dev/null +++ b/x-pack/plugins/infra/common/runtime_types.ts @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { Errors } from 'io-ts'; +import { failure } from 'io-ts/lib/PathReporter'; + +export const createPlainError = (message: string) => new Error(message); + +export const throwErrors = (createError: (message: string) => Error) => (errors: Errors) => { + throw createError(failure(errors).join('\n')); +}; diff --git a/x-pack/plugins/infra/common/saved_objects/inventory_view.ts b/x-pack/plugins/infra/common/saved_objects/inventory_view.ts new file mode 100644 index 0000000000000..c86be102f85a8 --- /dev/null +++ b/x-pack/plugins/infra/common/saved_objects/inventory_view.ts @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { ElasticsearchMappingOf } from '../../server/utils/typed_elasticsearch_mappings'; +import { WaffleViewState } from '../../public/containers/waffle/with_waffle_view_state'; + +export const inventoryViewSavedObjectType = 'inventory-view'; +import { SavedViewSavedObject } from '../../public/hooks/use_saved_view'; + +export const inventoryViewSavedObjectMappings: { + [inventoryViewSavedObjectType]: ElasticsearchMappingOf>; +} = { + [inventoryViewSavedObjectType]: { + properties: { + name: { + type: 'keyword', + }, + metric: { + properties: { + type: { + type: 'keyword', + }, + }, + }, + groupBy: { + type: 'nested', + properties: { + label: { + type: 'keyword', + }, + field: { + type: 'keyword', + }, + }, + }, + nodeType: { + type: 'keyword', + }, + view: { + type: 'keyword', + }, + customOptions: { + type: 'nested', + properties: { + text: { + type: 'keyword', + }, + field: { + type: 'keyword', + }, + }, + }, + boundsOverride: { + properties: { + max: { + type: 'integer', + }, + min: { + type: 'integer', + }, + }, + }, + autoBounds: { + type: 'boolean', + }, + time: { + type: 'integer', + }, + autoReload: { + type: 'boolean', + }, + filterQuery: { + properties: { + kind: { + type: 'keyword', + }, + expression: { + type: 'keyword', + }, + }, + }, + }, + }, +}; diff --git a/x-pack/plugins/infra/common/saved_objects/metrics_explorer_view.ts b/x-pack/plugins/infra/common/saved_objects/metrics_explorer_view.ts new file mode 100644 index 0000000000000..e4ec71907eaa8 --- /dev/null +++ b/x-pack/plugins/infra/common/saved_objects/metrics_explorer_view.ts @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { ElasticsearchMappingOf } from '../../server/utils/typed_elasticsearch_mappings'; +import { + MetricsExplorerOptions, + MetricsExplorerChartOptions, + MetricsExplorerTimeOptions, +} from '../../public/containers/metrics_explorer/use_metrics_explorer_options'; +import { SavedViewSavedObject } from '../../public/hooks/use_saved_view'; + +interface MetricsExplorerSavedView { + options: MetricsExplorerOptions; + chartOptions: MetricsExplorerChartOptions; + currentTimerange: MetricsExplorerTimeOptions; +} + +export const metricsExplorerViewSavedObjectType = 'metrics-explorer-view'; + +export const metricsExplorerViewSavedObjectMappings: { + [metricsExplorerViewSavedObjectType]: ElasticsearchMappingOf< + SavedViewSavedObject + >; +} = { + [metricsExplorerViewSavedObjectType]: { + properties: { + name: { + type: 'keyword', + }, + options: { + properties: { + metrics: { + type: 'nested', + properties: { + aggregation: { + type: 'keyword', + }, + field: { + type: 'keyword', + }, + color: { + type: 'keyword', + }, + label: { + type: 'keyword', + }, + }, + }, + limit: { + type: 'integer', + }, + groupBy: { + type: 'keyword', + }, + filterQuery: { + type: 'keyword', + }, + aggregation: { + type: 'keyword', + }, + }, + }, + chartOptions: { + properties: { + type: { + type: 'keyword', + }, + yAxisMode: { + type: 'keyword', + }, + stack: { + type: 'boolean', + }, + }, + }, + currentTimerange: { + properties: { + from: { + type: 'keyword', + }, + to: { + type: 'keyword', + }, + interval: { + type: 'keyword', + }, + }, + }, + }, + }, +}; diff --git a/x-pack/plugins/infra/common/time/index.ts b/x-pack/plugins/infra/common/time/index.ts new file mode 100644 index 0000000000000..f49d46fa4920f --- /dev/null +++ b/x-pack/plugins/infra/common/time/index.ts @@ -0,0 +1,9 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export * from './time_unit'; +export * from './time_scale'; +export * from './time_key'; diff --git a/x-pack/plugins/infra/common/time/time_key.ts b/x-pack/plugins/infra/common/time/time_key.ts new file mode 100644 index 0000000000000..117cd38314de0 --- /dev/null +++ b/x-pack/plugins/infra/common/time/time_key.ts @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { ascending, bisector } from 'd3-array'; +import pick from 'lodash/fp/pick'; + +export interface TimeKey { + time: number; + tiebreaker: number; + gid?: string; + fromAutoReload?: boolean; +} + +export interface UniqueTimeKey extends TimeKey { + gid: string; +} + +export type Comparator = (firstValue: any, secondValue: any) => number; + +export const isTimeKey = (value: any): value is TimeKey => + value && + typeof value === 'object' && + typeof value.time === 'number' && + typeof value.tiebreaker === 'number'; + +export const pickTimeKey = (value: T): TimeKey => + pick(['time', 'tiebreaker'], value); + +export function compareTimeKeys( + firstKey: TimeKey, + secondKey: TimeKey, + compareValues: Comparator = ascending +): number { + const timeComparison = compareValues(firstKey.time, secondKey.time); + + if (timeComparison === 0) { + const tiebreakerComparison = compareValues(firstKey.tiebreaker, secondKey.tiebreaker); + + if ( + tiebreakerComparison === 0 && + typeof firstKey.gid !== 'undefined' && + typeof secondKey.gid !== 'undefined' + ) { + return compareValues(firstKey.gid, secondKey.gid); + } + + return tiebreakerComparison; + } + + return timeComparison; +} + +export const compareToTimeKey = ( + keyAccessor: (value: Value) => TimeKey, + compareValues?: Comparator +) => (value: Value, key: TimeKey) => compareTimeKeys(keyAccessor(value), key, compareValues); + +export const getIndexAtTimeKey = ( + keyAccessor: (value: Value) => TimeKey, + compareValues?: Comparator +) => { + const comparator = compareToTimeKey(keyAccessor, compareValues); + const collectionBisector = bisector(comparator); + + return (collection: Value[], key: TimeKey): number | null => { + const index = collectionBisector.left(collection, key); + + if (index >= collection.length) { + return null; + } + + if (comparator(collection[index], key) !== 0) { + return null; + } + + return index; + }; +}; + +export const timeKeyIsBetween = (min: TimeKey, max: TimeKey, operand: TimeKey) => + compareTimeKeys(min, operand) <= 0 && compareTimeKeys(max, operand) >= 0; + +export const getPreviousTimeKey = (timeKey: TimeKey) => ({ + ...timeKey, + time: timeKey.time, + tiebreaker: timeKey.tiebreaker - 1, +}); + +export const getNextTimeKey = (timeKey: TimeKey) => ({ + ...timeKey, + time: timeKey.time, + tiebreaker: timeKey.tiebreaker + 1, +}); diff --git a/x-pack/plugins/infra/common/time/time_scale.ts b/x-pack/plugins/infra/common/time/time_scale.ts new file mode 100644 index 0000000000000..0381f294f81cb --- /dev/null +++ b/x-pack/plugins/infra/common/time/time_scale.ts @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { TimeUnit, timeUnitLabels } from './time_unit'; + +export interface TimeScale { + unit: TimeUnit; + value: number; +} + +export const getMillisOfScale = (scale: TimeScale) => scale.unit * scale.value; + +export const getLabelOfScale = (scale: TimeScale) => `${scale.value}${timeUnitLabels[scale.unit]}`; + +export const decomposeIntoUnits = (time: number, units: TimeUnit[]) => + units.reduce((result, unitMillis) => { + const offset = result.reduce( + (accumulatedOffset, timeScale) => accumulatedOffset + getMillisOfScale(timeScale), + 0 + ); + const value = Math.floor((time - offset) / unitMillis); + + if (value > 0) { + return [ + ...result, + { + unit: unitMillis, + value, + }, + ]; + } else { + return result; + } + }, []); diff --git a/x-pack/plugins/infra/common/time/time_unit.ts b/x-pack/plugins/infra/common/time/time_unit.ts new file mode 100644 index 0000000000000..4273a9fcf2ef3 --- /dev/null +++ b/x-pack/plugins/infra/common/time/time_unit.ts @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export enum TimeUnit { + Millisecond = 1, + Second = Millisecond * 1000, + Minute = Second * 60, + Hour = Minute * 60, + Day = Hour * 24, + Month = Day * 30, + Year = Month * 12, +} + +export type ElasticsearchTimeUnit = 's' | 'm' | 'h' | 'd' | 'M' | 'y'; + +export const timeUnitLabels = { + [TimeUnit.Millisecond]: 'ms', + [TimeUnit.Second]: 's', + [TimeUnit.Minute]: 'm', + [TimeUnit.Hour]: 'h', + [TimeUnit.Day]: 'd', + [TimeUnit.Month]: 'M', + [TimeUnit.Year]: 'y', +}; + +export const elasticSearchTimeUnits: { + [key: string]: ElasticsearchTimeUnit; +} = { + [TimeUnit.Second]: 's', + [TimeUnit.Minute]: 'm', + [TimeUnit.Hour]: 'h', + [TimeUnit.Day]: 'd', + [TimeUnit.Month]: 'M', + [TimeUnit.Year]: 'y', +}; + +export const getElasticSearchTimeUnit = (scale: TimeUnit): ElasticsearchTimeUnit => + elasticSearchTimeUnits[scale]; diff --git a/x-pack/plugins/infra/common/typed_json.ts b/x-pack/plugins/infra/common/typed_json.ts new file mode 100644 index 0000000000000..98b5456fe44b8 --- /dev/null +++ b/x-pack/plugins/infra/common/typed_json.ts @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export type JsonValue = null | boolean | number | string | JsonObject | JsonArray; + +// eslint-disable-next-line @typescript-eslint/no-empty-interface +export interface JsonArray extends Array {} + +export interface JsonObject { + [key: string]: JsonValue; +} diff --git a/x-pack/plugins/infra/common/utility_types.ts b/x-pack/plugins/infra/common/utility_types.ts new file mode 100644 index 0000000000000..93fc9b729ca74 --- /dev/null +++ b/x-pack/plugins/infra/common/utility_types.ts @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export type Pick2 = { + [P1 in K1]: { [P2 in K2]: T[K1][P2] }; +}; +export type Pick3 = { + [P1 in K1]: { [P2 in K2]: { [P3 in K3]: T[K1][K2][P3] } }; +}; + +export type MandatoryProperty = T & + { + [prop in Prop]-?: NonNullable; + }; + +/** + * Portions of below code are derived from https://github.com/tycho01/typical + * under the MIT License + * + * Copyright (c) 2017 Thomas Crockett + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + */ + +export type DeepPartial = T extends any[] + ? DeepPartialArray + : T extends object + ? DeepPartialObject + : T; + +// eslint-disable-next-line @typescript-eslint/no-empty-interface +interface DeepPartialArray extends Array> {} + +type DeepPartialObject = { [P in keyof T]+?: DeepPartial }; diff --git a/x-pack/plugins/infra/server/features.ts b/x-pack/plugins/infra/server/features.ts new file mode 100644 index 0000000000000..fc20813c777b6 --- /dev/null +++ b/x-pack/plugins/infra/server/features.ts @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { i18n } from '@kbn/i18n'; + +export const METRICS_FEATURE = { + id: 'infrastructure', + name: i18n.translate('xpack.infra.featureRegistry.linkInfrastructureTitle', { + defaultMessage: 'Infrastructure', + }), + icon: 'infraApp', + navLinkId: 'infra:home', + app: ['infra', 'kibana'], + catalogue: ['infraops'], + privileges: { + all: { + api: ['infra'], + savedObject: { + all: ['infrastructure-ui-source'], + read: ['index-pattern'], + }, + ui: ['show', 'configureSource', 'save'], + }, + read: { + api: ['infra'], + savedObject: { + all: [], + read: ['infrastructure-ui-source', 'index-pattern'], + }, + ui: ['show'], + }, + }, +}; + +export const LOGS_FEATURE = { + id: 'logs', + name: i18n.translate('xpack.infra.featureRegistry.linkLogsTitle', { + defaultMessage: 'Logs', + }), + icon: 'loggingApp', + navLinkId: 'infra:logs', + app: ['infra', 'kibana'], + catalogue: ['infralogging'], + privileges: { + all: { + api: ['infra'], + savedObject: { + all: ['infrastructure-ui-source'], + read: [], + }, + ui: ['show', 'configureSource', 'save'], + }, + read: { + api: ['infra'], + savedObject: { + all: [], + read: ['infrastructure-ui-source'], + }, + ui: ['show'], + }, + }, +}; diff --git a/x-pack/plugins/infra/server/graphql/index.ts b/x-pack/plugins/infra/server/graphql/index.ts new file mode 100644 index 0000000000000..82fef41db1a73 --- /dev/null +++ b/x-pack/plugins/infra/server/graphql/index.ts @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { rootSchema } from '../../common/graphql/root/schema.gql'; +import { sharedSchema } from '../../common/graphql/shared/schema.gql'; +import { logEntriesSchema } from './log_entries/schema.gql'; +import { sourceStatusSchema } from './source_status/schema.gql'; +import { sourcesSchema } from './sources/schema.gql'; + +export const schemas = [ + rootSchema, + sharedSchema, + logEntriesSchema, + sourcesSchema, + sourceStatusSchema, +]; diff --git a/x-pack/plugins/infra/server/graphql/log_entries/index.ts b/x-pack/plugins/infra/server/graphql/log_entries/index.ts new file mode 100644 index 0000000000000..21134862663ec --- /dev/null +++ b/x-pack/plugins/infra/server/graphql/log_entries/index.ts @@ -0,0 +1,7 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export { createLogEntriesResolvers } from './resolvers'; diff --git a/x-pack/plugins/infra/server/graphql/log_entries/resolvers.ts b/x-pack/plugins/infra/server/graphql/log_entries/resolvers.ts new file mode 100644 index 0000000000000..edbb736b2c4fd --- /dev/null +++ b/x-pack/plugins/infra/server/graphql/log_entries/resolvers.ts @@ -0,0 +1,175 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { + InfraLogEntryColumn, + InfraLogEntryFieldColumn, + InfraLogEntryMessageColumn, + InfraLogEntryTimestampColumn, + InfraLogMessageConstantSegment, + InfraLogMessageFieldSegment, + InfraLogMessageSegment, + InfraSourceResolvers, +} from '../../graphql/types'; +import { InfraLogEntriesDomain } from '../../lib/domains/log_entries_domain'; +import { parseFilterQuery } from '../../utils/serialized_query'; +import { ChildResolverOf, InfraResolverOf } from '../../utils/typed_resolvers'; +import { QuerySourceResolver } from '../sources/resolvers'; + +export type InfraSourceLogEntriesAroundResolver = ChildResolverOf< + InfraResolverOf, + QuerySourceResolver +>; + +export type InfraSourceLogEntriesBetweenResolver = ChildResolverOf< + InfraResolverOf, + QuerySourceResolver +>; + +export type InfraSourceLogEntryHighlightsResolver = ChildResolverOf< + InfraResolverOf, + QuerySourceResolver +>; + +export const createLogEntriesResolvers = (libs: { + logEntries: InfraLogEntriesDomain; +}): { + InfraSource: { + logEntriesAround: InfraSourceLogEntriesAroundResolver; + logEntriesBetween: InfraSourceLogEntriesBetweenResolver; + logEntryHighlights: InfraSourceLogEntryHighlightsResolver; + }; + InfraLogEntryColumn: { + __resolveType( + logEntryColumn: InfraLogEntryColumn + ): + | 'InfraLogEntryTimestampColumn' + | 'InfraLogEntryMessageColumn' + | 'InfraLogEntryFieldColumn' + | null; + }; + InfraLogMessageSegment: { + __resolveType( + messageSegment: InfraLogMessageSegment + ): 'InfraLogMessageFieldSegment' | 'InfraLogMessageConstantSegment' | null; + }; +} => ({ + InfraSource: { + async logEntriesAround(source, args, { req }) { + const countBefore = args.countBefore || 0; + const countAfter = args.countAfter || 0; + + const { entriesBefore, entriesAfter } = await libs.logEntries.getLogEntriesAround( + req, + source.id, + args.key, + countBefore + 1, + countAfter + 1, + parseFilterQuery(args.filterQuery) + ); + + const hasMoreBefore = entriesBefore.length > countBefore; + const hasMoreAfter = entriesAfter.length > countAfter; + + const entries = [ + ...(hasMoreBefore ? entriesBefore.slice(1) : entriesBefore), + ...(hasMoreAfter ? entriesAfter.slice(0, -1) : entriesAfter), + ]; + + return { + start: entries.length > 0 ? entries[0].key : null, + end: entries.length > 0 ? entries[entries.length - 1].key : null, + hasMoreBefore, + hasMoreAfter, + filterQuery: args.filterQuery, + entries, + }; + }, + async logEntriesBetween(source, args, { req }) { + const entries = await libs.logEntries.getLogEntriesBetween( + req, + source.id, + args.startKey, + args.endKey, + parseFilterQuery(args.filterQuery) + ); + + return { + start: entries.length > 0 ? entries[0].key : null, + end: entries.length > 0 ? entries[entries.length - 1].key : null, + hasMoreBefore: true, + hasMoreAfter: true, + filterQuery: args.filterQuery, + entries, + }; + }, + async logEntryHighlights(source, args, { req }) { + const highlightedLogEntrySets = await libs.logEntries.getLogEntryHighlights( + req, + source.id, + args.startKey, + args.endKey, + args.highlights.filter(highlightInput => !!highlightInput.query), + parseFilterQuery(args.filterQuery) + ); + + return highlightedLogEntrySets.map(entries => ({ + start: entries.length > 0 ? entries[0].key : null, + end: entries.length > 0 ? entries[entries.length - 1].key : null, + hasMoreBefore: true, + hasMoreAfter: true, + filterQuery: args.filterQuery, + entries, + })); + }, + }, + InfraLogEntryColumn: { + __resolveType(logEntryColumn) { + if (isTimestampColumn(logEntryColumn)) { + return 'InfraLogEntryTimestampColumn'; + } + + if (isMessageColumn(logEntryColumn)) { + return 'InfraLogEntryMessageColumn'; + } + + if (isFieldColumn(logEntryColumn)) { + return 'InfraLogEntryFieldColumn'; + } + + return null; + }, + }, + InfraLogMessageSegment: { + __resolveType(messageSegment) { + if (isConstantSegment(messageSegment)) { + return 'InfraLogMessageConstantSegment'; + } + + if (isFieldSegment(messageSegment)) { + return 'InfraLogMessageFieldSegment'; + } + + return null; + }, + }, +}); + +const isTimestampColumn = (column: InfraLogEntryColumn): column is InfraLogEntryTimestampColumn => + 'timestamp' in column; + +const isMessageColumn = (column: InfraLogEntryColumn): column is InfraLogEntryMessageColumn => + 'message' in column; + +const isFieldColumn = (column: InfraLogEntryColumn): column is InfraLogEntryFieldColumn => + 'field' in column && 'value' in column; + +const isConstantSegment = ( + segment: InfraLogMessageSegment +): segment is InfraLogMessageConstantSegment => 'constant' in segment; + +const isFieldSegment = (segment: InfraLogMessageSegment): segment is InfraLogMessageFieldSegment => + 'field' in segment && 'value' in segment && 'highlights' in segment; diff --git a/x-pack/plugins/infra/server/graphql/log_entries/schema.gql.ts b/x-pack/plugins/infra/server/graphql/log_entries/schema.gql.ts new file mode 100644 index 0000000000000..945f2f85435e5 --- /dev/null +++ b/x-pack/plugins/infra/server/graphql/log_entries/schema.gql.ts @@ -0,0 +1,136 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import gql from 'graphql-tag'; + +export const logEntriesSchema = gql` + "A segment of the log entry message that was derived from a field" + type InfraLogMessageFieldSegment { + "The field the segment was derived from" + field: String! + "The segment's message" + value: String! + "A list of highlighted substrings of the value" + highlights: [String!]! + } + + "A segment of the log entry message that was derived from a string literal" + type InfraLogMessageConstantSegment { + "The segment's message" + constant: String! + } + + "A segment of the log entry message" + union InfraLogMessageSegment = InfraLogMessageFieldSegment | InfraLogMessageConstantSegment + + "A special built-in column that contains the log entry's timestamp" + type InfraLogEntryTimestampColumn { + "The id of the corresponding column configuration" + columnId: ID! + "The timestamp" + timestamp: Float! + } + + "A special built-in column that contains the log entry's constructed message" + type InfraLogEntryMessageColumn { + "The id of the corresponding column configuration" + columnId: ID! + "A list of the formatted log entry segments" + message: [InfraLogMessageSegment!]! + } + + "A column that contains the value of a field of the log entry" + type InfraLogEntryFieldColumn { + "The id of the corresponding column configuration" + columnId: ID! + "The field name of the column" + field: String! + "The value of the field in the log entry" + value: String! + "A list of highlighted substrings of the value" + highlights: [String!]! + } + + "A column of a log entry" + union InfraLogEntryColumn = + InfraLogEntryTimestampColumn + | InfraLogEntryMessageColumn + | InfraLogEntryFieldColumn + + "A log entry" + type InfraLogEntry { + "A unique representation of the log entry's position in the event stream" + key: InfraTimeKey! + "The log entry's id" + gid: String! + "The source id" + source: String! + "The columns used for rendering the log entry" + columns: [InfraLogEntryColumn!]! + } + + "A highlighting definition" + input InfraLogEntryHighlightInput { + "The query to highlight by" + query: String! + "The number of highlighted documents to include beyond the beginning of the interval" + countBefore: Int! + "The number of highlighted documents to include beyond the end of the interval" + countAfter: Int! + } + + "A consecutive sequence of log entries" + type InfraLogEntryInterval { + "The key corresponding to the start of the interval covered by the entries" + start: InfraTimeKey + "The key corresponding to the end of the interval covered by the entries" + end: InfraTimeKey + "Whether there are more log entries available before the start" + hasMoreBefore: Boolean! + "Whether there are more log entries available after the end" + hasMoreAfter: Boolean! + "The query the log entries were filtered by" + filterQuery: String + "The query the log entries were highlighted with" + highlightQuery: String + "A list of the log entries" + entries: [InfraLogEntry!]! + } + + extend type InfraSource { + "A consecutive span of log entries surrounding a point in time" + logEntriesAround( + "The sort key that corresponds to the point in time" + key: InfraTimeKeyInput! + "The maximum number of preceding to return" + countBefore: Int = 0 + "The maximum number of following to return" + countAfter: Int = 0 + "The query to filter the log entries by" + filterQuery: String + ): InfraLogEntryInterval! + "A consecutive span of log entries within an interval" + logEntriesBetween( + "The sort key that corresponds to the start of the interval" + startKey: InfraTimeKeyInput! + "The sort key that corresponds to the end of the interval" + endKey: InfraTimeKeyInput! + "The query to filter the log entries by" + filterQuery: String + ): InfraLogEntryInterval! + "Sequences of log entries matching sets of highlighting queries within an interval" + logEntryHighlights( + "The sort key that corresponds to the start of the interval" + startKey: InfraTimeKeyInput! + "The sort key that corresponds to the end of the interval" + endKey: InfraTimeKeyInput! + "The query to filter the log entries by" + filterQuery: String + "The highlighting to apply to the log entries" + highlights: [InfraLogEntryHighlightInput!]! + ): [InfraLogEntryInterval!]! + } +`; diff --git a/x-pack/plugins/infra/server/graphql/source_status/index.ts b/x-pack/plugins/infra/server/graphql/source_status/index.ts new file mode 100644 index 0000000000000..abc91fa3815c8 --- /dev/null +++ b/x-pack/plugins/infra/server/graphql/source_status/index.ts @@ -0,0 +1,7 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export { createSourceStatusResolvers } from './resolvers'; diff --git a/x-pack/plugins/infra/server/graphql/source_status/resolvers.ts b/x-pack/plugins/infra/server/graphql/source_status/resolvers.ts new file mode 100644 index 0000000000000..848d66058e64c --- /dev/null +++ b/x-pack/plugins/infra/server/graphql/source_status/resolvers.ts @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { InfraIndexType, InfraSourceStatusResolvers } from '../../graphql/types'; +import { InfraFieldsDomain } from '../../lib/domains/fields_domain'; +import { InfraSourceStatus } from '../../lib/source_status'; +import { ChildResolverOf, InfraResolverOf } from '../../utils/typed_resolvers'; +import { QuerySourceResolver } from '../sources/resolvers'; + +export type InfraSourceStatusMetricAliasExistsResolver = ChildResolverOf< + InfraResolverOf, + QuerySourceResolver +>; + +export type InfraSourceStatusMetricIndicesExistResolver = ChildResolverOf< + InfraResolverOf, + QuerySourceResolver +>; + +export type InfraSourceStatusMetricIndicesResolver = ChildResolverOf< + InfraResolverOf, + QuerySourceResolver +>; + +export type InfraSourceStatusLogAliasExistsResolver = ChildResolverOf< + InfraResolverOf, + QuerySourceResolver +>; + +export type InfraSourceStatusLogIndicesExistResolver = ChildResolverOf< + InfraResolverOf, + QuerySourceResolver +>; + +export type InfraSourceStatusLogIndicesResolver = ChildResolverOf< + InfraResolverOf, + QuerySourceResolver +>; + +export type InfraSourceStatusIndexFieldsResolver = ChildResolverOf< + InfraResolverOf, + QuerySourceResolver +>; + +export const createSourceStatusResolvers = (libs: { + sourceStatus: InfraSourceStatus; + fields: InfraFieldsDomain; +}): { + InfraSourceStatus: { + metricAliasExists: InfraSourceStatusMetricAliasExistsResolver; + metricIndicesExist: InfraSourceStatusMetricIndicesExistResolver; + metricIndices: InfraSourceStatusMetricIndicesResolver; + logAliasExists: InfraSourceStatusLogAliasExistsResolver; + logIndicesExist: InfraSourceStatusLogIndicesExistResolver; + logIndices: InfraSourceStatusLogIndicesResolver; + indexFields: InfraSourceStatusIndexFieldsResolver; + }; +} => ({ + InfraSourceStatus: { + async metricAliasExists(source, args, { req }) { + return await libs.sourceStatus.hasMetricAlias(req, source.id); + }, + async metricIndicesExist(source, args, { req }) { + return await libs.sourceStatus.hasMetricIndices(req, source.id); + }, + async metricIndices(source, args, { req }) { + return await libs.sourceStatus.getMetricIndexNames(req, source.id); + }, + async logAliasExists(source, args, { req }) { + return await libs.sourceStatus.hasLogAlias(req, source.id); + }, + async logIndicesExist(source, args, { req }) { + return await libs.sourceStatus.hasLogIndices(req, source.id); + }, + async logIndices(source, args, { req }) { + return await libs.sourceStatus.getLogIndexNames(req, source.id); + }, + async indexFields(source, args, { req }) { + const fields = await libs.fields.getFields( + req, + source.id, + args.indexType || InfraIndexType.ANY + ); + return fields; + }, + }, +}); diff --git a/x-pack/plugins/infra/server/graphql/source_status/schema.gql.ts b/x-pack/plugins/infra/server/graphql/source_status/schema.gql.ts new file mode 100644 index 0000000000000..e0482382c6d6a --- /dev/null +++ b/x-pack/plugins/infra/server/graphql/source_status/schema.gql.ts @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import gql from 'graphql-tag'; + +export const sourceStatusSchema = gql` + "A descriptor of a field in an index" + type InfraIndexField { + "The name of the field" + name: String! + "The type of the field's values as recognized by Kibana" + type: String! + "Whether the field's values can be efficiently searched for" + searchable: Boolean! + "Whether the field's values can be aggregated" + aggregatable: Boolean! + "Whether the field should be displayed based on event.module and a ECS allowed list" + displayable: Boolean! + } + + extend type InfraSourceStatus { + "Whether the configured metric alias exists" + metricAliasExists: Boolean! + "Whether the configured log alias exists" + logAliasExists: Boolean! + "Whether the configured alias or wildcard pattern resolve to any metric indices" + metricIndicesExist: Boolean! + "Whether the configured alias or wildcard pattern resolve to any log indices" + logIndicesExist: Boolean! + "The list of indices in the metric alias" + metricIndices: [String!]! + "The list of indices in the log alias" + logIndices: [String!]! + "The list of fields defined in the index mappings" + indexFields(indexType: InfraIndexType = ANY): [InfraIndexField!]! + } +`; diff --git a/x-pack/plugins/infra/server/graphql/sources/index.ts b/x-pack/plugins/infra/server/graphql/sources/index.ts new file mode 100644 index 0000000000000..ee187d8c31bec --- /dev/null +++ b/x-pack/plugins/infra/server/graphql/sources/index.ts @@ -0,0 +1,8 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export { createSourcesResolvers } from './resolvers'; +export { sourcesSchema } from './schema.gql'; diff --git a/x-pack/plugins/infra/server/graphql/sources/resolvers.ts b/x-pack/plugins/infra/server/graphql/sources/resolvers.ts new file mode 100644 index 0000000000000..1fe1431392a38 --- /dev/null +++ b/x-pack/plugins/infra/server/graphql/sources/resolvers.ts @@ -0,0 +1,197 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { UserInputError } from 'apollo-server-errors'; +import { failure } from 'io-ts/lib/PathReporter'; + +import { identity } from 'fp-ts/lib/function'; +import { pipe } from 'fp-ts/lib/pipeable'; +import { fold } from 'fp-ts/lib/Either'; +import { + InfraSourceLogColumn, + InfraSourceResolvers, + MutationResolvers, + QueryResolvers, + UpdateSourceLogColumnInput, +} from '../../graphql/types'; +import { InfraSourceStatus } from '../../lib/source_status'; +import { + InfraSources, + SavedSourceConfigurationFieldColumnRuntimeType, + SavedSourceConfigurationMessageColumnRuntimeType, + SavedSourceConfigurationTimestampColumnRuntimeType, + SavedSourceConfigurationColumnRuntimeType, +} from '../../lib/sources'; +import { + ChildResolverOf, + InfraResolverOf, + InfraResolverWithFields, + ResultOf, +} from '../../utils/typed_resolvers'; + +export type QuerySourceResolver = InfraResolverWithFields< + QueryResolvers.SourceResolver, + 'id' | 'version' | 'updatedAt' | 'configuration' +>; + +export type QueryAllSourcesResolver = InfraResolverWithFields< + QueryResolvers.AllSourcesResolver, + 'id' | 'version' | 'updatedAt' | 'configuration' +>; + +export type InfraSourceStatusResolver = ChildResolverOf< + InfraResolverOf>>, + QuerySourceResolver +>; + +export type MutationCreateSourceResolver = InfraResolverOf< + MutationResolvers.CreateSourceResolver<{ + source: ResultOf; + }> +>; + +export type MutationDeleteSourceResolver = InfraResolverOf; + +export type MutationUpdateSourceResolver = InfraResolverOf< + MutationResolvers.UpdateSourceResolver<{ + source: ResultOf; + }> +>; + +interface SourcesResolversDeps { + sources: InfraSources; + sourceStatus: InfraSourceStatus; +} + +export const createSourcesResolvers = ( + libs: SourcesResolversDeps +): { + Query: { + source: QuerySourceResolver; + allSources: QueryAllSourcesResolver; + }; + InfraSource: { + status: InfraSourceStatusResolver; + }; + InfraSourceLogColumn: { + __resolveType( + logColumn: InfraSourceLogColumn + ): + | 'InfraSourceTimestampLogColumn' + | 'InfraSourceMessageLogColumn' + | 'InfraSourceFieldLogColumn' + | null; + }; + Mutation: { + createSource: MutationCreateSourceResolver; + deleteSource: MutationDeleteSourceResolver; + updateSource: MutationUpdateSourceResolver; + }; +} => ({ + Query: { + async source(root, args, { req }) { + const requestedSourceConfiguration = await libs.sources.getSourceConfiguration(req, args.id); + + return requestedSourceConfiguration; + }, + async allSources(root, args, { req }) { + const sourceConfigurations = await libs.sources.getAllSourceConfigurations(req); + + return sourceConfigurations; + }, + }, + InfraSource: { + async status(source) { + return source; + }, + }, + InfraSourceLogColumn: { + __resolveType(logColumn) { + if (SavedSourceConfigurationTimestampColumnRuntimeType.is(logColumn)) { + return 'InfraSourceTimestampLogColumn'; + } + + if (SavedSourceConfigurationMessageColumnRuntimeType.is(logColumn)) { + return 'InfraSourceMessageLogColumn'; + } + + if (SavedSourceConfigurationFieldColumnRuntimeType.is(logColumn)) { + return 'InfraSourceFieldLogColumn'; + } + + return null; + }, + }, + Mutation: { + async createSource(root, args, { req }) { + const sourceConfiguration = await libs.sources.createSourceConfiguration( + req, + args.id, + compactObject({ + ...args.sourceProperties, + fields: args.sourceProperties.fields + ? compactObject(args.sourceProperties.fields) + : undefined, + logColumns: decodeLogColumns(args.sourceProperties.logColumns), + }) + ); + + return { + source: sourceConfiguration, + }; + }, + async deleteSource(root, args, { req }) { + await libs.sources.deleteSourceConfiguration(req, args.id); + + return { + id: args.id, + }; + }, + async updateSource(root, args, { req }) { + const updatedSourceConfiguration = await libs.sources.updateSourceConfiguration( + req, + args.id, + compactObject({ + ...args.sourceProperties, + fields: args.sourceProperties.fields + ? compactObject(args.sourceProperties.fields) + : undefined, + logColumns: decodeLogColumns(args.sourceProperties.logColumns), + }) + ); + + return { + source: updatedSourceConfiguration, + }; + }, + }, +}); + +type CompactObject = { [K in keyof T]: NonNullable }; + +const compactObject = (obj: T): CompactObject => + Object.entries(obj).reduce>( + (accumulatedObj, [key, value]) => + typeof value === 'undefined' || value === null + ? accumulatedObj + : { + ...(accumulatedObj as any), + [key]: value, + }, + {} as CompactObject + ); + +const decodeLogColumns = (logColumns?: UpdateSourceLogColumnInput[] | null) => + logColumns + ? logColumns.map(logColumn => + pipe( + SavedSourceConfigurationColumnRuntimeType.decode(logColumn), + fold(errors => { + throw new UserInputError(failure(errors).join('\n')); + }, identity) + ) + ) + : undefined; diff --git a/x-pack/plugins/infra/server/graphql/sources/schema.gql.ts b/x-pack/plugins/infra/server/graphql/sources/schema.gql.ts new file mode 100644 index 0000000000000..a39399cec7c32 --- /dev/null +++ b/x-pack/plugins/infra/server/graphql/sources/schema.gql.ts @@ -0,0 +1,201 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import gql from 'graphql-tag'; + +export const sourcesSchema = gql` + "A source of infrastructure data" + type InfraSource { + "The id of the source" + id: ID! + "The version number the source configuration was last persisted with" + version: String + "The timestamp the source configuration was last persisted at" + updatedAt: Float + "The origin of the source (one of 'fallback', 'internal', 'stored')" + origin: String! + "The raw configuration of the source" + configuration: InfraSourceConfiguration! + "The status of the source" + status: InfraSourceStatus! + } + + "The status of an infrastructure data source" + type InfraSourceStatus + + "A set of configuration options for an infrastructure data source" + type InfraSourceConfiguration { + "The name of the data source" + name: String! + "A description of the data source" + description: String! + "The alias to read metric data from" + metricAlias: String! + "The alias to read log data from" + logAlias: String! + "The field mapping to use for this source" + fields: InfraSourceFields! + "The columns to use for log display" + logColumns: [InfraSourceLogColumn!]! + } + + "A mapping of semantic fields to their document counterparts" + type InfraSourceFields { + "The field to identify a container by" + container: String! + "The fields to identify a host by" + host: String! + "The fields to use as the log message" + message: [String!]! + "The field to identify a pod by" + pod: String! + "The field to use as a tiebreaker for log events that have identical timestamps" + tiebreaker: String! + "The field to use as a timestamp for metrics and logs" + timestamp: String! + } + + "The built-in timestamp log column" + type InfraSourceTimestampLogColumn { + timestampColumn: InfraSourceTimestampLogColumnAttributes! + } + + type InfraSourceTimestampLogColumnAttributes { + "A unique id for the column" + id: ID! + } + + "The built-in message log column" + type InfraSourceMessageLogColumn { + messageColumn: InfraSourceMessageLogColumnAttributes! + } + + type InfraSourceMessageLogColumnAttributes { + "A unique id for the column" + id: ID! + } + + "A log column containing a field value" + type InfraSourceFieldLogColumn { + fieldColumn: InfraSourceFieldLogColumnAttributes! + } + + type InfraSourceFieldLogColumnAttributes { + "A unique id for the column" + id: ID! + "The field name this column refers to" + field: String! + } + + "All known log column types" + union InfraSourceLogColumn = + InfraSourceTimestampLogColumn + | InfraSourceMessageLogColumn + | InfraSourceFieldLogColumn + + extend type Query { + """ + Get an infrastructure data source by id. + + The resolution order for the source configuration attributes is as follows + with the first defined value winning: + + 1. The attributes of the saved object with the given 'id'. + 2. The attributes defined in the static Kibana configuration key + 'xpack.infra.sources.default'. + 3. The hard-coded default values. + + As a consequence, querying a source that doesn't exist doesn't error out, + but returns the configured or hardcoded defaults. + """ + source("The id of the source" id: ID!): InfraSource! + "Get a list of all infrastructure data sources" + allSources: [InfraSource!]! + } + + "The properties to update the source with" + input UpdateSourceInput { + "The name of the data source" + name: String + "A description of the data source" + description: String + "The alias to read metric data from" + metricAlias: String + "The alias to read log data from" + logAlias: String + "The field mapping to use for this source" + fields: UpdateSourceFieldsInput + "The log columns to display for this source" + logColumns: [UpdateSourceLogColumnInput!] + } + + "The mapping of semantic fields of the source to be created" + input UpdateSourceFieldsInput { + "The field to identify a container by" + container: String + "The fields to identify a host by" + host: String + "The field to identify a pod by" + pod: String + "The field to use as a tiebreaker for log events that have identical timestamps" + tiebreaker: String + "The field to use as a timestamp for metrics and logs" + timestamp: String + } + + "One of the log column types to display for this source" + input UpdateSourceLogColumnInput { + "A custom field log column" + fieldColumn: UpdateSourceFieldLogColumnInput + "A built-in message log column" + messageColumn: UpdateSourceMessageLogColumnInput + "A built-in timestamp log column" + timestampColumn: UpdateSourceTimestampLogColumnInput + } + + input UpdateSourceFieldLogColumnInput { + id: ID! + field: String! + } + + input UpdateSourceMessageLogColumnInput { + id: ID! + } + + input UpdateSourceTimestampLogColumnInput { + id: ID! + } + + "The result of a successful source update" + type UpdateSourceResult { + "The source that was updated" + source: InfraSource! + } + + "The result of a source deletion operations" + type DeleteSourceResult { + "The id of the source that was deleted" + id: ID! + } + + extend type Mutation { + "Create a new source of infrastructure data" + createSource( + "The id of the source" + id: ID! + sourceProperties: UpdateSourceInput! + ): UpdateSourceResult! + "Modify an existing source" + updateSource( + "The id of the source" + id: ID! + "The properties to update the source with" + sourceProperties: UpdateSourceInput! + ): UpdateSourceResult! + "Delete a source of infrastructure data" + deleteSource("The id of the source" id: ID!): DeleteSourceResult! + } +`; diff --git a/x-pack/plugins/infra/server/graphql/types.ts b/x-pack/plugins/infra/server/graphql/types.ts new file mode 100644 index 0000000000000..1d6b03ac7bffb --- /dev/null +++ b/x-pack/plugins/infra/server/graphql/types.ts @@ -0,0 +1,1513 @@ +/* tslint:disable */ +import { InfraContext } from '../lib/infra_types'; +import { GraphQLResolveInfo } from 'graphql'; + +export type Resolver = ( + parent: Parent, + args: Args, + context: Context, + info: GraphQLResolveInfo +) => Promise | Result; + +export interface ISubscriptionResolverObject { + subscribe( + parent: P, + args: Args, + context: Context, + info: GraphQLResolveInfo + ): AsyncIterator; + resolve?( + parent: P, + args: Args, + context: Context, + info: GraphQLResolveInfo + ): R | Result | Promise; +} + +export type SubscriptionResolver = + | ((...args: any[]) => ISubscriptionResolverObject) + | ISubscriptionResolverObject; + +// ==================================================== +// START: Typescript template +// ==================================================== + +// ==================================================== +// Types +// ==================================================== + +export interface Query { + /** Get an infrastructure data source by id.The resolution order for the source configuration attributes is as followswith the first defined value winning:1. The attributes of the saved object with the given 'id'.2. The attributes defined in the static Kibana configuration key'xpack.infra.sources.default'.3. The hard-coded default values.As a consequence, querying a source that doesn't exist doesn't error out,but returns the configured or hardcoded defaults. */ + source: InfraSource; + /** Get a list of all infrastructure data sources */ + allSources: InfraSource[]; +} +/** A source of infrastructure data */ +export interface InfraSource { + /** The id of the source */ + id: string; + /** The version number the source configuration was last persisted with */ + version?: string | null; + /** The timestamp the source configuration was last persisted at */ + updatedAt?: number | null; + /** The origin of the source (one of 'fallback', 'internal', 'stored') */ + origin: string; + /** The raw configuration of the source */ + configuration: InfraSourceConfiguration; + /** The status of the source */ + status: InfraSourceStatus; + /** A consecutive span of log entries surrounding a point in time */ + logEntriesAround: InfraLogEntryInterval; + /** A consecutive span of log entries within an interval */ + logEntriesBetween: InfraLogEntryInterval; + /** Sequences of log entries matching sets of highlighting queries within an interval */ + logEntryHighlights: InfraLogEntryInterval[]; + + /** A snapshot of nodes */ + snapshot?: InfraSnapshotResponse | null; + + metrics: InfraMetricData[]; +} +/** A set of configuration options for an infrastructure data source */ +export interface InfraSourceConfiguration { + /** The name of the data source */ + name: string; + /** A description of the data source */ + description: string; + /** The alias to read metric data from */ + metricAlias: string; + /** The alias to read log data from */ + logAlias: string; + /** The field mapping to use for this source */ + fields: InfraSourceFields; + /** The columns to use for log display */ + logColumns: InfraSourceLogColumn[]; +} +/** A mapping of semantic fields to their document counterparts */ +export interface InfraSourceFields { + /** The field to identify a container by */ + container: string; + /** The fields to identify a host by */ + host: string; + /** The fields to use as the log message */ + message: string[]; + /** The field to identify a pod by */ + pod: string; + /** The field to use as a tiebreaker for log events that have identical timestamps */ + tiebreaker: string; + /** The field to use as a timestamp for metrics and logs */ + timestamp: string; +} +/** The built-in timestamp log column */ +export interface InfraSourceTimestampLogColumn { + timestampColumn: InfraSourceTimestampLogColumnAttributes; +} + +export interface InfraSourceTimestampLogColumnAttributes { + /** A unique id for the column */ + id: string; +} +/** The built-in message log column */ +export interface InfraSourceMessageLogColumn { + messageColumn: InfraSourceMessageLogColumnAttributes; +} + +export interface InfraSourceMessageLogColumnAttributes { + /** A unique id for the column */ + id: string; +} +/** A log column containing a field value */ +export interface InfraSourceFieldLogColumn { + fieldColumn: InfraSourceFieldLogColumnAttributes; +} + +export interface InfraSourceFieldLogColumnAttributes { + /** A unique id for the column */ + id: string; + /** The field name this column refers to */ + field: string; +} +/** The status of an infrastructure data source */ +export interface InfraSourceStatus { + /** Whether the configured metric alias exists */ + metricAliasExists: boolean; + /** Whether the configured log alias exists */ + logAliasExists: boolean; + /** Whether the configured alias or wildcard pattern resolve to any metric indices */ + metricIndicesExist: boolean; + /** Whether the configured alias or wildcard pattern resolve to any log indices */ + logIndicesExist: boolean; + /** The list of indices in the metric alias */ + metricIndices: string[]; + /** The list of indices in the log alias */ + logIndices: string[]; + /** The list of fields defined in the index mappings */ + indexFields: InfraIndexField[]; +} +/** A descriptor of a field in an index */ +export interface InfraIndexField { + /** The name of the field */ + name: string; + /** The type of the field's values as recognized by Kibana */ + type: string; + /** Whether the field's values can be efficiently searched for */ + searchable: boolean; + /** Whether the field's values can be aggregated */ + aggregatable: boolean; + /** Whether the field should be displayed based on event.module and a ECS allowed list */ + displayable: boolean; +} +/** A consecutive sequence of log entries */ +export interface InfraLogEntryInterval { + /** The key corresponding to the start of the interval covered by the entries */ + start?: InfraTimeKey | null; + /** The key corresponding to the end of the interval covered by the entries */ + end?: InfraTimeKey | null; + /** Whether there are more log entries available before the start */ + hasMoreBefore: boolean; + /** Whether there are more log entries available after the end */ + hasMoreAfter: boolean; + /** The query the log entries were filtered by */ + filterQuery?: string | null; + /** The query the log entries were highlighted with */ + highlightQuery?: string | null; + /** A list of the log entries */ + entries: InfraLogEntry[]; +} +/** A representation of the log entry's position in the event stream */ +export interface InfraTimeKey { + /** The timestamp of the event that the log entry corresponds to */ + time: number; + /** The tiebreaker that disambiguates events with the same timestamp */ + tiebreaker: number; +} +/** A log entry */ +export interface InfraLogEntry { + /** A unique representation of the log entry's position in the event stream */ + key: InfraTimeKey; + /** The log entry's id */ + gid: string; + /** The source id */ + source: string; + /** The columns used for rendering the log entry */ + columns: InfraLogEntryColumn[]; +} +/** A special built-in column that contains the log entry's timestamp */ +export interface InfraLogEntryTimestampColumn { + /** The id of the corresponding column configuration */ + columnId: string; + /** The timestamp */ + timestamp: number; +} +/** A special built-in column that contains the log entry's constructed message */ +export interface InfraLogEntryMessageColumn { + /** The id of the corresponding column configuration */ + columnId: string; + /** A list of the formatted log entry segments */ + message: InfraLogMessageSegment[]; +} +/** A segment of the log entry message that was derived from a field */ +export interface InfraLogMessageFieldSegment { + /** The field the segment was derived from */ + field: string; + /** The segment's message */ + value: string; + /** A list of highlighted substrings of the value */ + highlights: string[]; +} +/** A segment of the log entry message that was derived from a string literal */ +export interface InfraLogMessageConstantSegment { + /** The segment's message */ + constant: string; +} +/** A column that contains the value of a field of the log entry */ +export interface InfraLogEntryFieldColumn { + /** The id of the corresponding column configuration */ + columnId: string; + /** The field name of the column */ + field: string; + /** The value of the field in the log entry */ + value: string; + /** A list of highlighted substrings of the value */ + highlights: string[]; +} + +export interface InfraSnapshotResponse { + /** Nodes of type host, container or pod grouped by 0, 1 or 2 terms */ + nodes: InfraSnapshotNode[]; +} + +export interface InfraSnapshotNode { + path: InfraSnapshotNodePath[]; + + metric: InfraSnapshotNodeMetric; +} + +export interface InfraSnapshotNodePath { + value: string; + + label: string; + + ip?: string | null; +} + +export interface InfraSnapshotNodeMetric { + name: InfraSnapshotMetricType; + + value?: number | null; + + avg?: number | null; + + max?: number | null; +} + +export interface InfraMetricData { + id?: InfraMetric | null; + + series: InfraDataSeries[]; +} + +export interface InfraDataSeries { + id: string; + + label: string; + + data: InfraDataPoint[]; +} + +export interface InfraDataPoint { + timestamp: number; + + value?: number | null; +} + +export interface Mutation { + /** Create a new source of infrastructure data */ + createSource: UpdateSourceResult; + /** Modify an existing source */ + updateSource: UpdateSourceResult; + /** Delete a source of infrastructure data */ + deleteSource: DeleteSourceResult; +} +/** The result of a successful source update */ +export interface UpdateSourceResult { + /** The source that was updated */ + source: InfraSource; +} +/** The result of a source deletion operations */ +export interface DeleteSourceResult { + /** The id of the source that was deleted */ + id: string; +} + +// ==================================================== +// InputTypes +// ==================================================== + +export interface InfraTimeKeyInput { + time: number; + + tiebreaker: number; +} +/** A highlighting definition */ +export interface InfraLogEntryHighlightInput { + /** The query to highlight by */ + query: string; + /** The number of highlighted documents to include beyond the beginning of the interval */ + countBefore: number; + /** The number of highlighted documents to include beyond the end of the interval */ + countAfter: number; +} + +export interface InfraTimerangeInput { + /** The interval string to use for last bucket. The format is '{value}{unit}'. For example '5m' would return the metrics for the last 5 minutes of the timespan. */ + interval: string; + /** The end of the timerange */ + to: number; + /** The beginning of the timerange */ + from: number; +} + +export interface InfraSnapshotGroupbyInput { + /** The label to use in the results for the group by for the terms group by */ + label?: string | null; + /** The field to group by from a terms aggregation, this is ignored by the filter type */ + field?: string | null; +} + +export interface InfraSnapshotMetricInput { + /** The type of metric */ + type: InfraSnapshotMetricType; +} + +export interface InfraNodeIdsInput { + nodeId: string; + + cloudId?: string | null; +} +/** The properties to update the source with */ +export interface UpdateSourceInput { + /** The name of the data source */ + name?: string | null; + /** A description of the data source */ + description?: string | null; + /** The alias to read metric data from */ + metricAlias?: string | null; + /** The alias to read log data from */ + logAlias?: string | null; + /** The field mapping to use for this source */ + fields?: UpdateSourceFieldsInput | null; + /** The log columns to display for this source */ + logColumns?: UpdateSourceLogColumnInput[] | null; +} +/** The mapping of semantic fields of the source to be created */ +export interface UpdateSourceFieldsInput { + /** The field to identify a container by */ + container?: string | null; + /** The fields to identify a host by */ + host?: string | null; + /** The field to identify a pod by */ + pod?: string | null; + /** The field to use as a tiebreaker for log events that have identical timestamps */ + tiebreaker?: string | null; + /** The field to use as a timestamp for metrics and logs */ + timestamp?: string | null; +} +/** One of the log column types to display for this source */ +export interface UpdateSourceLogColumnInput { + /** A custom field log column */ + fieldColumn?: UpdateSourceFieldLogColumnInput | null; + /** A built-in message log column */ + messageColumn?: UpdateSourceMessageLogColumnInput | null; + /** A built-in timestamp log column */ + timestampColumn?: UpdateSourceTimestampLogColumnInput | null; +} + +export interface UpdateSourceFieldLogColumnInput { + id: string; + + field: string; +} + +export interface UpdateSourceMessageLogColumnInput { + id: string; +} + +export interface UpdateSourceTimestampLogColumnInput { + id: string; +} + +// ==================================================== +// Arguments +// ==================================================== + +export interface SourceQueryArgs { + /** The id of the source */ + id: string; +} +export interface LogEntriesAroundInfraSourceArgs { + /** The sort key that corresponds to the point in time */ + key: InfraTimeKeyInput; + /** The maximum number of preceding to return */ + countBefore?: number | null; + /** The maximum number of following to return */ + countAfter?: number | null; + /** The query to filter the log entries by */ + filterQuery?: string | null; +} +export interface LogEntriesBetweenInfraSourceArgs { + /** The sort key that corresponds to the start of the interval */ + startKey: InfraTimeKeyInput; + /** The sort key that corresponds to the end of the interval */ + endKey: InfraTimeKeyInput; + /** The query to filter the log entries by */ + filterQuery?: string | null; +} +export interface LogEntryHighlightsInfraSourceArgs { + /** The sort key that corresponds to the start of the interval */ + startKey: InfraTimeKeyInput; + /** The sort key that corresponds to the end of the interval */ + endKey: InfraTimeKeyInput; + /** The query to filter the log entries by */ + filterQuery?: string | null; + /** The highlighting to apply to the log entries */ + highlights: InfraLogEntryHighlightInput[]; +} +export interface SnapshotInfraSourceArgs { + timerange: InfraTimerangeInput; + + filterQuery?: string | null; +} +export interface MetricsInfraSourceArgs { + nodeIds: InfraNodeIdsInput; + + nodeType: InfraNodeType; + + timerange: InfraTimerangeInput; + + metrics: InfraMetric[]; +} +export interface IndexFieldsInfraSourceStatusArgs { + indexType?: InfraIndexType | null; +} +export interface NodesInfraSnapshotResponseArgs { + type: InfraNodeType; + + groupBy: InfraSnapshotGroupbyInput[]; + + metric: InfraSnapshotMetricInput; +} +export interface CreateSourceMutationArgs { + /** The id of the source */ + id: string; + + sourceProperties: UpdateSourceInput; +} +export interface UpdateSourceMutationArgs { + /** The id of the source */ + id: string; + /** The properties to update the source with */ + sourceProperties: UpdateSourceInput; +} +export interface DeleteSourceMutationArgs { + /** The id of the source */ + id: string; +} + +// ==================================================== +// Enums +// ==================================================== + +export enum InfraIndexType { + ANY = 'ANY', + LOGS = 'LOGS', + METRICS = 'METRICS', +} + +export enum InfraNodeType { + pod = 'pod', + container = 'container', + host = 'host', + awsEC2 = 'awsEC2', + awsS3 = 'awsS3', + awsRDS = 'awsRDS', + awsSQS = 'awsSQS', +} + +export enum InfraSnapshotMetricType { + count = 'count', + cpu = 'cpu', + load = 'load', + memory = 'memory', + tx = 'tx', + rx = 'rx', + logRate = 'logRate', + diskIOReadBytes = 'diskIOReadBytes', + diskIOWriteBytes = 'diskIOWriteBytes', + s3TotalRequests = 's3TotalRequests', + s3NumberOfObjects = 's3NumberOfObjects', + s3BucketSize = 's3BucketSize', + s3DownloadBytes = 's3DownloadBytes', + s3UploadBytes = 's3UploadBytes', + rdsConnections = 'rdsConnections', + rdsQueriesExecuted = 'rdsQueriesExecuted', + rdsActiveTransactions = 'rdsActiveTransactions', + rdsLatency = 'rdsLatency', + sqsMessagesVisible = 'sqsMessagesVisible', + sqsMessagesDelayed = 'sqsMessagesDelayed', + sqsMessagesSent = 'sqsMessagesSent', + sqsMessagesEmpty = 'sqsMessagesEmpty', + sqsOldestMessage = 'sqsOldestMessage', +} + +export enum InfraMetric { + hostSystemOverview = 'hostSystemOverview', + hostCpuUsage = 'hostCpuUsage', + hostFilesystem = 'hostFilesystem', + hostK8sOverview = 'hostK8sOverview', + hostK8sCpuCap = 'hostK8sCpuCap', + hostK8sDiskCap = 'hostK8sDiskCap', + hostK8sMemoryCap = 'hostK8sMemoryCap', + hostK8sPodCap = 'hostK8sPodCap', + hostLoad = 'hostLoad', + hostMemoryUsage = 'hostMemoryUsage', + hostNetworkTraffic = 'hostNetworkTraffic', + hostDockerOverview = 'hostDockerOverview', + hostDockerInfo = 'hostDockerInfo', + hostDockerTop5ByCpu = 'hostDockerTop5ByCpu', + hostDockerTop5ByMemory = 'hostDockerTop5ByMemory', + podOverview = 'podOverview', + podCpuUsage = 'podCpuUsage', + podMemoryUsage = 'podMemoryUsage', + podLogUsage = 'podLogUsage', + podNetworkTraffic = 'podNetworkTraffic', + containerOverview = 'containerOverview', + containerCpuKernel = 'containerCpuKernel', + containerCpuUsage = 'containerCpuUsage', + containerDiskIOOps = 'containerDiskIOOps', + containerDiskIOBytes = 'containerDiskIOBytes', + containerMemory = 'containerMemory', + containerNetworkTraffic = 'containerNetworkTraffic', + nginxHits = 'nginxHits', + nginxRequestRate = 'nginxRequestRate', + nginxActiveConnections = 'nginxActiveConnections', + nginxRequestsPerConnection = 'nginxRequestsPerConnection', + awsOverview = 'awsOverview', + awsCpuUtilization = 'awsCpuUtilization', + awsNetworkBytes = 'awsNetworkBytes', + awsNetworkPackets = 'awsNetworkPackets', + awsDiskioBytes = 'awsDiskioBytes', + awsDiskioOps = 'awsDiskioOps', + awsEC2CpuUtilization = 'awsEC2CpuUtilization', + awsEC2DiskIOBytes = 'awsEC2DiskIOBytes', + awsEC2NetworkTraffic = 'awsEC2NetworkTraffic', + awsS3TotalRequests = 'awsS3TotalRequests', + awsS3NumberOfObjects = 'awsS3NumberOfObjects', + awsS3BucketSize = 'awsS3BucketSize', + awsS3DownloadBytes = 'awsS3DownloadBytes', + awsS3UploadBytes = 'awsS3UploadBytes', + awsRDSCpuTotal = 'awsRDSCpuTotal', + awsRDSConnections = 'awsRDSConnections', + awsRDSQueriesExecuted = 'awsRDSQueriesExecuted', + awsRDSActiveTransactions = 'awsRDSActiveTransactions', + awsRDSLatency = 'awsRDSLatency', + awsSQSMessagesVisible = 'awsSQSMessagesVisible', + awsSQSMessagesDelayed = 'awsSQSMessagesDelayed', + awsSQSMessagesSent = 'awsSQSMessagesSent', + awsSQSMessagesEmpty = 'awsSQSMessagesEmpty', + awsSQSOldestMessage = 'awsSQSOldestMessage', + custom = 'custom', +} + +// ==================================================== +// Unions +// ==================================================== + +/** All known log column types */ +export type InfraSourceLogColumn = + | InfraSourceTimestampLogColumn + | InfraSourceMessageLogColumn + | InfraSourceFieldLogColumn; + +/** A column of a log entry */ +export type InfraLogEntryColumn = + | InfraLogEntryTimestampColumn + | InfraLogEntryMessageColumn + | InfraLogEntryFieldColumn; + +/** A segment of the log entry message */ +export type InfraLogMessageSegment = InfraLogMessageFieldSegment | InfraLogMessageConstantSegment; + +// ==================================================== +// END: Typescript template +// ==================================================== + +// ==================================================== +// Resolvers +// ==================================================== + +export namespace QueryResolvers { + export interface Resolvers { + /** Get an infrastructure data source by id.The resolution order for the source configuration attributes is as followswith the first defined value winning:1. The attributes of the saved object with the given 'id'.2. The attributes defined in the static Kibana configuration key'xpack.infra.sources.default'.3. The hard-coded default values.As a consequence, querying a source that doesn't exist doesn't error out,but returns the configured or hardcoded defaults. */ + source?: SourceResolver; + /** Get a list of all infrastructure data sources */ + allSources?: AllSourcesResolver; + } + + export type SourceResolver = Resolver< + R, + Parent, + Context, + SourceArgs + >; + export interface SourceArgs { + /** The id of the source */ + id: string; + } + + export type AllSourcesResolver< + R = InfraSource[], + Parent = never, + Context = InfraContext + > = Resolver; +} +/** A source of infrastructure data */ +export namespace InfraSourceResolvers { + export interface Resolvers { + /** The id of the source */ + id?: IdResolver; + /** The version number the source configuration was last persisted with */ + version?: VersionResolver; + /** The timestamp the source configuration was last persisted at */ + updatedAt?: UpdatedAtResolver; + /** The origin of the source (one of 'fallback', 'internal', 'stored') */ + origin?: OriginResolver; + /** The raw configuration of the source */ + configuration?: ConfigurationResolver; + /** The status of the source */ + status?: StatusResolver; + /** A consecutive span of log entries surrounding a point in time */ + logEntriesAround?: LogEntriesAroundResolver; + /** A consecutive span of log entries within an interval */ + logEntriesBetween?: LogEntriesBetweenResolver; + /** Sequences of log entries matching sets of highlighting queries within an interval */ + logEntryHighlights?: LogEntryHighlightsResolver; + + /** A snapshot of nodes */ + snapshot?: SnapshotResolver; + + metrics?: MetricsResolver; + } + + export type IdResolver = Resolver< + R, + Parent, + Context + >; + export type VersionResolver< + R = string | null, + Parent = InfraSource, + Context = InfraContext + > = Resolver; + export type UpdatedAtResolver< + R = number | null, + Parent = InfraSource, + Context = InfraContext + > = Resolver; + export type OriginResolver = Resolver< + R, + Parent, + Context + >; + export type ConfigurationResolver< + R = InfraSourceConfiguration, + Parent = InfraSource, + Context = InfraContext + > = Resolver; + export type StatusResolver< + R = InfraSourceStatus, + Parent = InfraSource, + Context = InfraContext + > = Resolver; + export type LogEntriesAroundResolver< + R = InfraLogEntryInterval, + Parent = InfraSource, + Context = InfraContext + > = Resolver; + export interface LogEntriesAroundArgs { + /** The sort key that corresponds to the point in time */ + key: InfraTimeKeyInput; + /** The maximum number of preceding to return */ + countBefore?: number | null; + /** The maximum number of following to return */ + countAfter?: number | null; + /** The query to filter the log entries by */ + filterQuery?: string | null; + } + + export type LogEntriesBetweenResolver< + R = InfraLogEntryInterval, + Parent = InfraSource, + Context = InfraContext + > = Resolver; + export interface LogEntriesBetweenArgs { + /** The sort key that corresponds to the start of the interval */ + startKey: InfraTimeKeyInput; + /** The sort key that corresponds to the end of the interval */ + endKey: InfraTimeKeyInput; + /** The query to filter the log entries by */ + filterQuery?: string | null; + } + + export type LogEntryHighlightsResolver< + R = InfraLogEntryInterval[], + Parent = InfraSource, + Context = InfraContext + > = Resolver; + export interface LogEntryHighlightsArgs { + /** The sort key that corresponds to the start of the interval */ + startKey: InfraTimeKeyInput; + /** The sort key that corresponds to the end of the interval */ + endKey: InfraTimeKeyInput; + /** The query to filter the log entries by */ + filterQuery?: string | null; + /** The highlighting to apply to the log entries */ + highlights: InfraLogEntryHighlightInput[]; + } + + export type SnapshotResolver< + R = InfraSnapshotResponse | null, + Parent = InfraSource, + Context = InfraContext + > = Resolver; + export interface SnapshotArgs { + timerange: InfraTimerangeInput; + + filterQuery?: string | null; + } + + export type MetricsResolver< + R = InfraMetricData[], + Parent = InfraSource, + Context = InfraContext + > = Resolver; + export interface MetricsArgs { + nodeIds: InfraNodeIdsInput; + + nodeType: InfraNodeType; + + timerange: InfraTimerangeInput; + + metrics: InfraMetric[]; + } +} +/** A set of configuration options for an infrastructure data source */ +export namespace InfraSourceConfigurationResolvers { + export interface Resolvers { + /** The name of the data source */ + name?: NameResolver; + /** A description of the data source */ + description?: DescriptionResolver; + /** The alias to read metric data from */ + metricAlias?: MetricAliasResolver; + /** The alias to read log data from */ + logAlias?: LogAliasResolver; + /** The field mapping to use for this source */ + fields?: FieldsResolver; + /** The columns to use for log display */ + logColumns?: LogColumnsResolver; + } + + export type NameResolver< + R = string, + Parent = InfraSourceConfiguration, + Context = InfraContext + > = Resolver; + export type DescriptionResolver< + R = string, + Parent = InfraSourceConfiguration, + Context = InfraContext + > = Resolver; + export type MetricAliasResolver< + R = string, + Parent = InfraSourceConfiguration, + Context = InfraContext + > = Resolver; + export type LogAliasResolver< + R = string, + Parent = InfraSourceConfiguration, + Context = InfraContext + > = Resolver; + export type FieldsResolver< + R = InfraSourceFields, + Parent = InfraSourceConfiguration, + Context = InfraContext + > = Resolver; + export type LogColumnsResolver< + R = InfraSourceLogColumn[], + Parent = InfraSourceConfiguration, + Context = InfraContext + > = Resolver; +} +/** A mapping of semantic fields to their document counterparts */ +export namespace InfraSourceFieldsResolvers { + export interface Resolvers { + /** The field to identify a container by */ + container?: ContainerResolver; + /** The fields to identify a host by */ + host?: HostResolver; + /** The fields to use as the log message */ + message?: MessageResolver; + /** The field to identify a pod by */ + pod?: PodResolver; + /** The field to use as a tiebreaker for log events that have identical timestamps */ + tiebreaker?: TiebreakerResolver; + /** The field to use as a timestamp for metrics and logs */ + timestamp?: TimestampResolver; + } + + export type ContainerResolver< + R = string, + Parent = InfraSourceFields, + Context = InfraContext + > = Resolver; + export type HostResolver< + R = string, + Parent = InfraSourceFields, + Context = InfraContext + > = Resolver; + export type MessageResolver< + R = string[], + Parent = InfraSourceFields, + Context = InfraContext + > = Resolver; + export type PodResolver< + R = string, + Parent = InfraSourceFields, + Context = InfraContext + > = Resolver; + export type TiebreakerResolver< + R = string, + Parent = InfraSourceFields, + Context = InfraContext + > = Resolver; + export type TimestampResolver< + R = string, + Parent = InfraSourceFields, + Context = InfraContext + > = Resolver; +} +/** The built-in timestamp log column */ +export namespace InfraSourceTimestampLogColumnResolvers { + export interface Resolvers { + timestampColumn?: TimestampColumnResolver< + InfraSourceTimestampLogColumnAttributes, + TypeParent, + Context + >; + } + + export type TimestampColumnResolver< + R = InfraSourceTimestampLogColumnAttributes, + Parent = InfraSourceTimestampLogColumn, + Context = InfraContext + > = Resolver; +} + +export namespace InfraSourceTimestampLogColumnAttributesResolvers { + export interface Resolvers< + Context = InfraContext, + TypeParent = InfraSourceTimestampLogColumnAttributes + > { + /** A unique id for the column */ + id?: IdResolver; + } + + export type IdResolver< + R = string, + Parent = InfraSourceTimestampLogColumnAttributes, + Context = InfraContext + > = Resolver; +} +/** The built-in message log column */ +export namespace InfraSourceMessageLogColumnResolvers { + export interface Resolvers { + messageColumn?: MessageColumnResolver< + InfraSourceMessageLogColumnAttributes, + TypeParent, + Context + >; + } + + export type MessageColumnResolver< + R = InfraSourceMessageLogColumnAttributes, + Parent = InfraSourceMessageLogColumn, + Context = InfraContext + > = Resolver; +} + +export namespace InfraSourceMessageLogColumnAttributesResolvers { + export interface Resolvers< + Context = InfraContext, + TypeParent = InfraSourceMessageLogColumnAttributes + > { + /** A unique id for the column */ + id?: IdResolver; + } + + export type IdResolver< + R = string, + Parent = InfraSourceMessageLogColumnAttributes, + Context = InfraContext + > = Resolver; +} +/** A log column containing a field value */ +export namespace InfraSourceFieldLogColumnResolvers { + export interface Resolvers { + fieldColumn?: FieldColumnResolver; + } + + export type FieldColumnResolver< + R = InfraSourceFieldLogColumnAttributes, + Parent = InfraSourceFieldLogColumn, + Context = InfraContext + > = Resolver; +} + +export namespace InfraSourceFieldLogColumnAttributesResolvers { + export interface Resolvers< + Context = InfraContext, + TypeParent = InfraSourceFieldLogColumnAttributes + > { + /** A unique id for the column */ + id?: IdResolver; + /** The field name this column refers to */ + field?: FieldResolver; + } + + export type IdResolver< + R = string, + Parent = InfraSourceFieldLogColumnAttributes, + Context = InfraContext + > = Resolver; + export type FieldResolver< + R = string, + Parent = InfraSourceFieldLogColumnAttributes, + Context = InfraContext + > = Resolver; +} +/** The status of an infrastructure data source */ +export namespace InfraSourceStatusResolvers { + export interface Resolvers { + /** Whether the configured metric alias exists */ + metricAliasExists?: MetricAliasExistsResolver; + /** Whether the configured log alias exists */ + logAliasExists?: LogAliasExistsResolver; + /** Whether the configured alias or wildcard pattern resolve to any metric indices */ + metricIndicesExist?: MetricIndicesExistResolver; + /** Whether the configured alias or wildcard pattern resolve to any log indices */ + logIndicesExist?: LogIndicesExistResolver; + /** The list of indices in the metric alias */ + metricIndices?: MetricIndicesResolver; + /** The list of indices in the log alias */ + logIndices?: LogIndicesResolver; + /** The list of fields defined in the index mappings */ + indexFields?: IndexFieldsResolver; + } + + export type MetricAliasExistsResolver< + R = boolean, + Parent = InfraSourceStatus, + Context = InfraContext + > = Resolver; + export type LogAliasExistsResolver< + R = boolean, + Parent = InfraSourceStatus, + Context = InfraContext + > = Resolver; + export type MetricIndicesExistResolver< + R = boolean, + Parent = InfraSourceStatus, + Context = InfraContext + > = Resolver; + export type LogIndicesExistResolver< + R = boolean, + Parent = InfraSourceStatus, + Context = InfraContext + > = Resolver; + export type MetricIndicesResolver< + R = string[], + Parent = InfraSourceStatus, + Context = InfraContext + > = Resolver; + export type LogIndicesResolver< + R = string[], + Parent = InfraSourceStatus, + Context = InfraContext + > = Resolver; + export type IndexFieldsResolver< + R = InfraIndexField[], + Parent = InfraSourceStatus, + Context = InfraContext + > = Resolver; + export interface IndexFieldsArgs { + indexType?: InfraIndexType | null; + } +} +/** A descriptor of a field in an index */ +export namespace InfraIndexFieldResolvers { + export interface Resolvers { + /** The name of the field */ + name?: NameResolver; + /** The type of the field's values as recognized by Kibana */ + type?: TypeResolver; + /** Whether the field's values can be efficiently searched for */ + searchable?: SearchableResolver; + /** Whether the field's values can be aggregated */ + aggregatable?: AggregatableResolver; + /** Whether the field should be displayed based on event.module and a ECS allowed list */ + displayable?: DisplayableResolver; + } + + export type NameResolver = Resolver< + R, + Parent, + Context + >; + export type TypeResolver = Resolver< + R, + Parent, + Context + >; + export type SearchableResolver< + R = boolean, + Parent = InfraIndexField, + Context = InfraContext + > = Resolver; + export type AggregatableResolver< + R = boolean, + Parent = InfraIndexField, + Context = InfraContext + > = Resolver; + export type DisplayableResolver< + R = boolean, + Parent = InfraIndexField, + Context = InfraContext + > = Resolver; +} +/** A consecutive sequence of log entries */ +export namespace InfraLogEntryIntervalResolvers { + export interface Resolvers { + /** The key corresponding to the start of the interval covered by the entries */ + start?: StartResolver; + /** The key corresponding to the end of the interval covered by the entries */ + end?: EndResolver; + /** Whether there are more log entries available before the start */ + hasMoreBefore?: HasMoreBeforeResolver; + /** Whether there are more log entries available after the end */ + hasMoreAfter?: HasMoreAfterResolver; + /** The query the log entries were filtered by */ + filterQuery?: FilterQueryResolver; + /** The query the log entries were highlighted with */ + highlightQuery?: HighlightQueryResolver; + /** A list of the log entries */ + entries?: EntriesResolver; + } + + export type StartResolver< + R = InfraTimeKey | null, + Parent = InfraLogEntryInterval, + Context = InfraContext + > = Resolver; + export type EndResolver< + R = InfraTimeKey | null, + Parent = InfraLogEntryInterval, + Context = InfraContext + > = Resolver; + export type HasMoreBeforeResolver< + R = boolean, + Parent = InfraLogEntryInterval, + Context = InfraContext + > = Resolver; + export type HasMoreAfterResolver< + R = boolean, + Parent = InfraLogEntryInterval, + Context = InfraContext + > = Resolver; + export type FilterQueryResolver< + R = string | null, + Parent = InfraLogEntryInterval, + Context = InfraContext + > = Resolver; + export type HighlightQueryResolver< + R = string | null, + Parent = InfraLogEntryInterval, + Context = InfraContext + > = Resolver; + export type EntriesResolver< + R = InfraLogEntry[], + Parent = InfraLogEntryInterval, + Context = InfraContext + > = Resolver; +} +/** A representation of the log entry's position in the event stream */ +export namespace InfraTimeKeyResolvers { + export interface Resolvers { + /** The timestamp of the event that the log entry corresponds to */ + time?: TimeResolver; + /** The tiebreaker that disambiguates events with the same timestamp */ + tiebreaker?: TiebreakerResolver; + } + + export type TimeResolver = Resolver< + R, + Parent, + Context + >; + export type TiebreakerResolver< + R = number, + Parent = InfraTimeKey, + Context = InfraContext + > = Resolver; +} +/** A log entry */ +export namespace InfraLogEntryResolvers { + export interface Resolvers { + /** A unique representation of the log entry's position in the event stream */ + key?: KeyResolver; + /** The log entry's id */ + gid?: GidResolver; + /** The source id */ + source?: SourceResolver; + /** The columns used for rendering the log entry */ + columns?: ColumnsResolver; + } + + export type KeyResolver< + R = InfraTimeKey, + Parent = InfraLogEntry, + Context = InfraContext + > = Resolver; + export type GidResolver = Resolver< + R, + Parent, + Context + >; + export type SourceResolver = Resolver< + R, + Parent, + Context + >; + export type ColumnsResolver< + R = InfraLogEntryColumn[], + Parent = InfraLogEntry, + Context = InfraContext + > = Resolver; +} +/** A special built-in column that contains the log entry's timestamp */ +export namespace InfraLogEntryTimestampColumnResolvers { + export interface Resolvers { + /** The id of the corresponding column configuration */ + columnId?: ColumnIdResolver; + /** The timestamp */ + timestamp?: TimestampResolver; + } + + export type ColumnIdResolver< + R = string, + Parent = InfraLogEntryTimestampColumn, + Context = InfraContext + > = Resolver; + export type TimestampResolver< + R = number, + Parent = InfraLogEntryTimestampColumn, + Context = InfraContext + > = Resolver; +} +/** A special built-in column that contains the log entry's constructed message */ +export namespace InfraLogEntryMessageColumnResolvers { + export interface Resolvers { + /** The id of the corresponding column configuration */ + columnId?: ColumnIdResolver; + /** A list of the formatted log entry segments */ + message?: MessageResolver; + } + + export type ColumnIdResolver< + R = string, + Parent = InfraLogEntryMessageColumn, + Context = InfraContext + > = Resolver; + export type MessageResolver< + R = InfraLogMessageSegment[], + Parent = InfraLogEntryMessageColumn, + Context = InfraContext + > = Resolver; +} +/** A segment of the log entry message that was derived from a field */ +export namespace InfraLogMessageFieldSegmentResolvers { + export interface Resolvers { + /** The field the segment was derived from */ + field?: FieldResolver; + /** The segment's message */ + value?: ValueResolver; + /** A list of highlighted substrings of the value */ + highlights?: HighlightsResolver; + } + + export type FieldResolver< + R = string, + Parent = InfraLogMessageFieldSegment, + Context = InfraContext + > = Resolver; + export type ValueResolver< + R = string, + Parent = InfraLogMessageFieldSegment, + Context = InfraContext + > = Resolver; + export type HighlightsResolver< + R = string[], + Parent = InfraLogMessageFieldSegment, + Context = InfraContext + > = Resolver; +} +/** A segment of the log entry message that was derived from a string literal */ +export namespace InfraLogMessageConstantSegmentResolvers { + export interface Resolvers { + /** The segment's message */ + constant?: ConstantResolver; + } + + export type ConstantResolver< + R = string, + Parent = InfraLogMessageConstantSegment, + Context = InfraContext + > = Resolver; +} +/** A column that contains the value of a field of the log entry */ +export namespace InfraLogEntryFieldColumnResolvers { + export interface Resolvers { + /** The id of the corresponding column configuration */ + columnId?: ColumnIdResolver; + /** The field name of the column */ + field?: FieldResolver; + /** The value of the field in the log entry */ + value?: ValueResolver; + /** A list of highlighted substrings of the value */ + highlights?: HighlightsResolver; + } + + export type ColumnIdResolver< + R = string, + Parent = InfraLogEntryFieldColumn, + Context = InfraContext + > = Resolver; + export type FieldResolver< + R = string, + Parent = InfraLogEntryFieldColumn, + Context = InfraContext + > = Resolver; + export type ValueResolver< + R = string, + Parent = InfraLogEntryFieldColumn, + Context = InfraContext + > = Resolver; + export type HighlightsResolver< + R = string[], + Parent = InfraLogEntryFieldColumn, + Context = InfraContext + > = Resolver; +} + +export namespace InfraSnapshotResponseResolvers { + export interface Resolvers { + /** Nodes of type host, container or pod grouped by 0, 1 or 2 terms */ + nodes?: NodesResolver; + } + + export type NodesResolver< + R = InfraSnapshotNode[], + Parent = InfraSnapshotResponse, + Context = InfraContext + > = Resolver; + export interface NodesArgs { + type: InfraNodeType; + + groupBy: InfraSnapshotGroupbyInput[]; + + metric: InfraSnapshotMetricInput; + } +} + +export namespace InfraSnapshotNodeResolvers { + export interface Resolvers { + path?: PathResolver; + + metric?: MetricResolver; + } + + export type PathResolver< + R = InfraSnapshotNodePath[], + Parent = InfraSnapshotNode, + Context = InfraContext + > = Resolver; + export type MetricResolver< + R = InfraSnapshotNodeMetric, + Parent = InfraSnapshotNode, + Context = InfraContext + > = Resolver; +} + +export namespace InfraSnapshotNodePathResolvers { + export interface Resolvers { + value?: ValueResolver; + + label?: LabelResolver; + + ip?: IpResolver; + } + + export type ValueResolver< + R = string, + Parent = InfraSnapshotNodePath, + Context = InfraContext + > = Resolver; + export type LabelResolver< + R = string, + Parent = InfraSnapshotNodePath, + Context = InfraContext + > = Resolver; + export type IpResolver< + R = string | null, + Parent = InfraSnapshotNodePath, + Context = InfraContext + > = Resolver; +} + +export namespace InfraSnapshotNodeMetricResolvers { + export interface Resolvers { + name?: NameResolver; + + value?: ValueResolver; + + avg?: AvgResolver; + + max?: MaxResolver; + } + + export type NameResolver< + R = InfraSnapshotMetricType, + Parent = InfraSnapshotNodeMetric, + Context = InfraContext + > = Resolver; + export type ValueResolver< + R = number | null, + Parent = InfraSnapshotNodeMetric, + Context = InfraContext + > = Resolver; + export type AvgResolver< + R = number | null, + Parent = InfraSnapshotNodeMetric, + Context = InfraContext + > = Resolver; + export type MaxResolver< + R = number | null, + Parent = InfraSnapshotNodeMetric, + Context = InfraContext + > = Resolver; +} + +export namespace InfraMetricDataResolvers { + export interface Resolvers { + id?: IdResolver; + + series?: SeriesResolver; + } + + export type IdResolver< + R = InfraMetric | null, + Parent = InfraMetricData, + Context = InfraContext + > = Resolver; + export type SeriesResolver< + R = InfraDataSeries[], + Parent = InfraMetricData, + Context = InfraContext + > = Resolver; +} + +export namespace InfraDataSeriesResolvers { + export interface Resolvers { + id?: IdResolver; + + label?: LabelResolver; + + data?: DataResolver; + } + + export type IdResolver = Resolver< + R, + Parent, + Context + >; + export type LabelResolver< + R = string, + Parent = InfraDataSeries, + Context = InfraContext + > = Resolver; + export type DataResolver< + R = InfraDataPoint[], + Parent = InfraDataSeries, + Context = InfraContext + > = Resolver; +} + +export namespace InfraDataPointResolvers { + export interface Resolvers { + timestamp?: TimestampResolver; + + value?: ValueResolver; + } + + export type TimestampResolver< + R = number, + Parent = InfraDataPoint, + Context = InfraContext + > = Resolver; + export type ValueResolver< + R = number | null, + Parent = InfraDataPoint, + Context = InfraContext + > = Resolver; +} + +export namespace MutationResolvers { + export interface Resolvers { + /** Create a new source of infrastructure data */ + createSource?: CreateSourceResolver; + /** Modify an existing source */ + updateSource?: UpdateSourceResolver; + /** Delete a source of infrastructure data */ + deleteSource?: DeleteSourceResolver; + } + + export type CreateSourceResolver< + R = UpdateSourceResult, + Parent = never, + Context = InfraContext + > = Resolver; + export interface CreateSourceArgs { + /** The id of the source */ + id: string; + + sourceProperties: UpdateSourceInput; + } + + export type UpdateSourceResolver< + R = UpdateSourceResult, + Parent = never, + Context = InfraContext + > = Resolver; + export interface UpdateSourceArgs { + /** The id of the source */ + id: string; + /** The properties to update the source with */ + sourceProperties: UpdateSourceInput; + } + + export type DeleteSourceResolver< + R = DeleteSourceResult, + Parent = never, + Context = InfraContext + > = Resolver; + export interface DeleteSourceArgs { + /** The id of the source */ + id: string; + } +} +/** The result of a successful source update */ +export namespace UpdateSourceResultResolvers { + export interface Resolvers { + /** The source that was updated */ + source?: SourceResolver; + } + + export type SourceResolver< + R = InfraSource, + Parent = UpdateSourceResult, + Context = InfraContext + > = Resolver; +} +/** The result of a source deletion operations */ +export namespace DeleteSourceResultResolvers { + export interface Resolvers { + /** The id of the source that was deleted */ + id?: IdResolver; + } + + export type IdResolver< + R = string, + Parent = DeleteSourceResult, + Context = InfraContext + > = Resolver; +} diff --git a/x-pack/plugins/infra/server/infra_server.ts b/x-pack/plugins/infra/server/infra_server.ts new file mode 100644 index 0000000000000..108e1b1e3f392 --- /dev/null +++ b/x-pack/plugins/infra/server/infra_server.ts @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { IResolvers, makeExecutableSchema } from 'graphql-tools'; +import { initIpToHostName } from './routes/ip_to_hostname'; +import { schemas } from './graphql'; +import { createLogEntriesResolvers } from './graphql/log_entries'; +import { createSourceStatusResolvers } from './graphql/source_status'; +import { createSourcesResolvers } from './graphql/sources'; +import { InfraBackendLibs } from './lib/infra_types'; +import { + initGetLogEntryRateRoute, + initValidateLogAnalysisIndicesRoute, +} from './routes/log_analysis'; +import { initMetricExplorerRoute } from './routes/metrics_explorer'; +import { initMetadataRoute } from './routes/metadata'; +import { initSnapshotRoute } from './routes/snapshot'; +import { initNodeDetailsRoute } from './routes/node_details'; +import { + initLogEntriesSummaryRoute, + initLogEntriesSummaryHighlightsRoute, + initLogEntriesItemRoute, +} from './routes/log_entries'; +import { initInventoryMetaRoute } from './routes/inventory_metadata'; + +export const initInfraServer = (libs: InfraBackendLibs) => { + const schema = makeExecutableSchema({ + resolvers: [ + createLogEntriesResolvers(libs) as IResolvers, + createSourcesResolvers(libs) as IResolvers, + createSourceStatusResolvers(libs) as IResolvers, + ], + typeDefs: schemas, + }); + + libs.framework.registerGraphQLEndpoint('/graphql', schema); + + initIpToHostName(libs); + initGetLogEntryRateRoute(libs); + initSnapshotRoute(libs); + initNodeDetailsRoute(libs); + initValidateLogAnalysisIndicesRoute(libs); + initLogEntriesSummaryRoute(libs); + initLogEntriesSummaryHighlightsRoute(libs); + initLogEntriesItemRoute(libs); + initMetricExplorerRoute(libs); + initMetadataRoute(libs); + initInventoryMetaRoute(libs); +}; diff --git a/x-pack/plugins/infra/server/kibana.index.ts b/x-pack/plugins/infra/server/kibana.index.ts new file mode 100644 index 0000000000000..b4301b3edf367 --- /dev/null +++ b/x-pack/plugins/infra/server/kibana.index.ts @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { Server } from 'hapi'; +import JoiNamespace from 'joi'; + +export interface KbnServer extends Server { + usage: any; +} + +// NP_TODO: this is only used in the root index file AFAICT, can remove after migrating to NP +export const getConfigSchema = (Joi: typeof JoiNamespace) => { + const InfraDefaultSourceConfigSchema = Joi.object({ + metricAlias: Joi.string(), + logAlias: Joi.string(), + fields: Joi.object({ + container: Joi.string(), + host: Joi.string(), + message: Joi.array() + .items(Joi.string()) + .single(), + pod: Joi.string(), + tiebreaker: Joi.string(), + timestamp: Joi.string(), + }), + }); + + // NP_TODO: make sure this is all represented in the NP config schema + const InfraRootConfigSchema = Joi.object({ + enabled: Joi.boolean().default(true), + query: Joi.object({ + partitionSize: Joi.number(), + partitionFactor: Joi.number(), + }).default(), + sources: Joi.object() + .keys({ + default: InfraDefaultSourceConfigSchema, + }) + .default(), + }).default(); + + return InfraRootConfigSchema; +}; diff --git a/x-pack/plugins/infra/server/lib/adapters/fields/adapter_types.ts b/x-pack/plugins/infra/server/lib/adapters/fields/adapter_types.ts new file mode 100644 index 0000000000000..3aaa23b378096 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/adapters/fields/adapter_types.ts @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { RequestHandlerContext } from 'src/core/server'; + +export interface FieldsAdapter { + getIndexFields( + requestContext: RequestHandlerContext, + indices: string, + timefield: string + ): Promise; +} + +export interface IndexFieldDescriptor { + name: string; + type: string; + searchable: boolean; + aggregatable: boolean; + displayable: boolean; +} diff --git a/x-pack/plugins/infra/server/lib/adapters/fields/framework_fields_adapter.ts b/x-pack/plugins/infra/server/lib/adapters/fields/framework_fields_adapter.ts new file mode 100644 index 0000000000000..834c991d5c6a4 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/adapters/fields/framework_fields_adapter.ts @@ -0,0 +1,126 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { startsWith, uniq, first } from 'lodash'; +import { RequestHandlerContext } from 'src/core/server'; +import { InfraDatabaseSearchResponse } from '../framework'; +import { KibanaFramework } from '../framework/kibana_framework_adapter'; +import { FieldsAdapter, IndexFieldDescriptor } from './adapter_types'; +import { getAllowedListForPrefix } from '../../../../common/ecs_allowed_list'; +import { getAllCompositeData } from '../../../utils/get_all_composite_data'; +import { createAfterKeyHandler } from '../../../utils/create_afterkey_handler'; + +interface Bucket { + key: { dataset: string }; + doc_count: number; +} + +interface DataSetResponse { + datasets: { + buckets: Bucket[]; + after_key: { + dataset: string; + }; + }; +} + +export class FrameworkFieldsAdapter implements FieldsAdapter { + private framework: KibanaFramework; + + constructor(framework: KibanaFramework) { + this.framework = framework; + } + + public async getIndexFields( + requestContext: RequestHandlerContext, + indices: string, + timefield: string + ): Promise { + const indexPatternsService = this.framework.getIndexPatternsService(requestContext); + const response = await indexPatternsService.getFieldsForWildcard({ + pattern: indices, + }); + const { dataSets, modules } = await this.getDataSetsAndModules( + requestContext, + indices, + timefield + ); + const allowedList = modules.reduce( + (acc, name) => uniq([...acc, ...getAllowedListForPrefix(name)]), + [] as string[] + ); + const dataSetsWithAllowedList = [...allowedList, ...dataSets]; + return response.map(field => ({ + ...field, + displayable: dataSetsWithAllowedList.some(name => startsWith(field.name, name)), + })); + } + + private async getDataSetsAndModules( + requestContext: RequestHandlerContext, + indices: string, + timefield: string + ): Promise<{ dataSets: string[]; modules: string[] }> { + const params = { + index: indices, + allowNoIndices: true, + ignoreUnavailable: true, + body: { + size: 0, + query: { + bool: { + filter: [ + { + range: { + [timefield]: { + gte: 'now-24h', + lte: 'now', + }, + }, + }, + ], + }, + }, + aggs: { + datasets: { + composite: { + sources: [ + { + dataset: { + terms: { + field: 'event.dataset', + }, + }, + }, + ], + }, + }, + }, + }, + }; + + const bucketSelector = (response: InfraDatabaseSearchResponse<{}, DataSetResponse>) => + (response.aggregations && response.aggregations.datasets.buckets) || []; + const handleAfterKey = createAfterKeyHandler( + 'body.aggs.datasets.composite.after', + input => input?.aggregations?.datasets?.after_key + ); + + const buckets = await getAllCompositeData( + this.framework, + requestContext, + params, + bucketSelector, + handleAfterKey + ); + const dataSets = buckets.map(bucket => bucket.key.dataset); + const modules = dataSets.reduce((acc, dataset) => { + const module = first(dataset.split(/\./)); + return module ? uniq([...acc, module]) : acc; + }, [] as string[]); + return { modules, dataSets }; + } +} diff --git a/x-pack/plugins/infra/server/lib/adapters/fields/index.ts b/x-pack/plugins/infra/server/lib/adapters/fields/index.ts new file mode 100644 index 0000000000000..4e09b5d0e9e2d --- /dev/null +++ b/x-pack/plugins/infra/server/lib/adapters/fields/index.ts @@ -0,0 +1,7 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export * from './adapter_types'; diff --git a/x-pack/plugins/infra/server/lib/adapters/framework/adapter_types.ts b/x-pack/plugins/infra/server/lib/adapters/framework/adapter_types.ts new file mode 100644 index 0000000000000..b14536275cec3 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/adapters/framework/adapter_types.ts @@ -0,0 +1,172 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { SearchResponse, GenericParams } from 'elasticsearch'; +import { Lifecycle } from 'hapi'; +import { UsageCollectionSetup } from 'src/plugins/usage_collection/server'; +import { RouteMethod, RouteConfig } from '../../../../../../../../src/core/server'; +import { PluginSetupContract as FeaturesPluginSetup } from '../../../../../../../plugins/features/server'; +import { SpacesPluginSetup } from '../../../../../../../plugins/spaces/server'; +import { VisTypeTimeseriesSetup } from '../../../../../../../../src/plugins/vis_type_timeseries/server'; +import { APMPluginContract } from '../../../../../../../plugins/apm/server'; +import { HomeServerPluginSetup } from '../../../../../../../../src/plugins/home/server'; + +// NP_TODO: Compose real types from plugins we depend on, no "any" +export interface InfraServerPluginDeps { + home: HomeServerPluginSetup; + spaces: SpacesPluginSetup; + usageCollection: UsageCollectionSetup; + metrics: VisTypeTimeseriesSetup; + indexPatterns: { + indexPatternsServiceFactory: any; + }; + features: FeaturesPluginSetup; + apm: APMPluginContract; +} + +export interface CallWithRequestParams extends GenericParams { + max_concurrent_shard_requests?: number; + name?: string; + index?: string | string[]; + ignore_unavailable?: boolean; + allow_no_indices?: boolean; + size?: number; + terminate_after?: number; + fields?: string | string[]; +} + +export type InfraResponse = Lifecycle.ReturnValue; + +export interface InfraFrameworkPluginOptions { + register: any; + options: any; +} + +export interface InfraDatabaseResponse { + took: number; + timeout: boolean; +} + +export interface InfraDatabaseSearchResponse + extends InfraDatabaseResponse { + _shards: { + total: number; + successful: number; + skipped: number; + failed: number; + }; + aggregations?: Aggregations; + hits: { + total: { + value: number; + relation: string; + }; + hits: Hit[]; + }; +} + +export interface InfraDatabaseMultiResponse extends InfraDatabaseResponse { + responses: Array>; +} + +export interface InfraDatabaseFieldCapsResponse extends InfraDatabaseResponse { + indices: string[]; + fields: InfraFieldsResponse; +} + +export interface InfraDatabaseGetIndicesAliasResponse { + [indexName: string]: { + aliases: { + [aliasName: string]: any; + }; + }; +} + +export interface InfraDatabaseGetIndicesResponse { + [indexName: string]: { + aliases: { + [aliasName: string]: any; + }; + mappings: { + _meta: object; + dynamic_templates: any[]; + date_detection: boolean; + properties: { + [fieldName: string]: any; + }; + }; + settings: { index: object }; + }; +} + +export type SearchHit = SearchResponse['hits']['hits'][0]; + +export interface SortedSearchHit extends SearchHit { + sort: any[]; + _source: { + [field: string]: any; + }; +} + +export type InfraDateRangeAggregationBucket = { + from?: number; + to?: number; + doc_count: number; + key: string; +} & NestedAggregation; + +export interface InfraDateRangeAggregationResponse { + buckets: Array>; +} + +export interface InfraTopHitsAggregationResponse { + hits: { + hits: []; + }; +} + +export interface InfraMetadataAggregationBucket { + key: string; +} + +export interface InfraMetadataAggregationResponse { + buckets: InfraMetadataAggregationBucket[]; +} + +export interface InfraFieldsResponse { + [name: string]: InfraFieldDef; +} + +export interface InfraFieldDetails { + searchable: boolean; + aggregatable: boolean; + type: string; +} + +export interface InfraFieldDef { + [type: string]: InfraFieldDetails; +} + +export interface InfraTSVBResponse { + [key: string]: InfraTSVBPanel; +} + +export interface InfraTSVBPanel { + id: string; + series: InfraTSVBSeries[]; +} + +export interface InfraTSVBSeries { + id: string; + label: string; + data: InfraTSVBDataPoint[]; +} + +export type InfraTSVBDataPoint = [number, number]; + +export type InfraRouteConfig = { + method: RouteMethod; +} & RouteConfig; diff --git a/x-pack/plugins/infra/server/lib/adapters/framework/index.ts b/x-pack/plugins/infra/server/lib/adapters/framework/index.ts new file mode 100644 index 0000000000000..4e09b5d0e9e2d --- /dev/null +++ b/x-pack/plugins/infra/server/lib/adapters/framework/index.ts @@ -0,0 +1,7 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export * from './adapter_types'; diff --git a/x-pack/plugins/infra/server/lib/adapters/framework/kibana_framework_adapter.ts b/x-pack/plugins/infra/server/lib/adapters/framework/kibana_framework_adapter.ts new file mode 100644 index 0000000000000..4409667d8390a --- /dev/null +++ b/x-pack/plugins/infra/server/lib/adapters/framework/kibana_framework_adapter.ts @@ -0,0 +1,259 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/* eslint-disable @typescript-eslint/array-type */ + +import { GenericParams } from 'elasticsearch'; +import { GraphQLSchema } from 'graphql'; +import { Legacy } from 'kibana'; +import { runHttpQuery } from 'apollo-server-core'; +import { schema, TypeOf } from '@kbn/config-schema'; +import { + InfraRouteConfig, + InfraTSVBResponse, + InfraServerPluginDeps, + CallWithRequestParams, + InfraDatabaseSearchResponse, + InfraDatabaseMultiResponse, + InfraDatabaseFieldCapsResponse, + InfraDatabaseGetIndicesResponse, + InfraDatabaseGetIndicesAliasResponse, +} from './adapter_types'; +import { TSVBMetricModel } from '../../../../common/inventory_models/types'; +import { + CoreSetup, + IRouter, + KibanaRequest, + RequestHandlerContext, + KibanaResponseFactory, + RouteMethod, +} from '../../../../../../../../src/core/server'; +import { RequestHandler } from '../../../../../../../../src/core/server'; +import { InfraConfig } from '../../../../../../../plugins/infra/server'; + +export class KibanaFramework { + public router: IRouter; + public plugins: InfraServerPluginDeps; + + constructor(core: CoreSetup, config: InfraConfig, plugins: InfraServerPluginDeps) { + this.router = core.http.createRouter(); + this.plugins = plugins; + } + + public registerRoute( + config: InfraRouteConfig, + handler: RequestHandler + ) { + const defaultOptions = { + tags: ['access:infra'], + }; + const routeConfig = { + path: config.path, + validate: config.validate, + // Currently we have no use of custom options beyond tags, this can be extended + // beyond defaultOptions if it's needed. + options: defaultOptions, + }; + switch (config.method) { + case 'get': + this.router.get(routeConfig, handler); + break; + case 'post': + this.router.post(routeConfig, handler); + break; + case 'delete': + this.router.delete(routeConfig, handler); + break; + case 'put': + this.router.put(routeConfig, handler); + break; + } + } + + public registerGraphQLEndpoint(routePath: string, gqlSchema: GraphQLSchema) { + // These endpoints are validated by GraphQL at runtime and with GraphQL generated types + const body = schema.object({}, { allowUnknowns: true }); + type Body = TypeOf; + + const routeOptions = { + path: `/api/infra${routePath}`, + validate: { + body, + }, + options: { + tags: ['access:infra'], + }, + }; + async function handler( + context: RequestHandlerContext, + request: KibanaRequest, + response: KibanaResponseFactory + ) { + try { + const query = + request.route.method === 'post' + ? (request.body as Record) + : (request.query as Record); + + const gqlResponse = await runHttpQuery([context, request], { + method: request.route.method.toUpperCase(), + options: (req: RequestHandlerContext, rawReq: KibanaRequest) => ({ + context: { req, rawReq }, + schema: gqlSchema, + }), + query, + }); + + return response.ok({ + body: gqlResponse, + headers: { + 'content-type': 'application/json', + }, + }); + } catch (error) { + const errorBody = { + message: error.message, + }; + + if ('HttpQueryError' !== error.name) { + return response.internalError({ + body: errorBody, + }); + } + + if (error.isGraphQLError === true) { + return response.customError({ + statusCode: error.statusCode, + body: errorBody, + headers: { + 'Content-Type': 'application/json', + }, + }); + } + + const { headers = [], statusCode = 500 } = error; + return response.customError({ + statusCode, + headers, + body: errorBody, + }); + } + } + this.router.post(routeOptions, handler); + this.router.get(routeOptions, handler); + } + + callWithRequest( + requestContext: RequestHandlerContext, + endpoint: 'search', + options?: CallWithRequestParams + ): Promise>; + callWithRequest( + requestContext: RequestHandlerContext, + endpoint: 'msearch', + options?: CallWithRequestParams + ): Promise>; + callWithRequest( + requestContext: RequestHandlerContext, + endpoint: 'fieldCaps', + options?: CallWithRequestParams + ): Promise; + callWithRequest( + requestContext: RequestHandlerContext, + endpoint: 'indices.existsAlias', + options?: CallWithRequestParams + ): Promise; + callWithRequest( + requestContext: RequestHandlerContext, + method: 'indices.getAlias', + options?: object + ): Promise; + callWithRequest( + requestContext: RequestHandlerContext, + method: 'indices.get' | 'ml.getBuckets', + options?: object + ): Promise; + callWithRequest( + requestContext: RequestHandlerContext, + endpoint: string, + options?: CallWithRequestParams + ): Promise; + + public async callWithRequest( + requestContext: RequestHandlerContext, + endpoint: string, + params: CallWithRequestParams + ) { + const { elasticsearch, uiSettings } = requestContext.core; + + const includeFrozen = await uiSettings.client.get('search:includeFrozen'); + if (endpoint === 'msearch') { + const maxConcurrentShardRequests = await uiSettings.client.get( + 'courier:maxConcurrentShardRequests' + ); + if (maxConcurrentShardRequests > 0) { + params = { ...params, max_concurrent_shard_requests: maxConcurrentShardRequests }; + } + } + + const frozenIndicesParams = ['search', 'msearch'].includes(endpoint) + ? { + ignore_throttled: !includeFrozen, + } + : {}; + + return elasticsearch.dataClient.callAsCurrentUser(endpoint, { + ...params, + ...frozenIndicesParams, + }); + } + + public getIndexPatternsService( + requestContext: RequestHandlerContext + ): Legacy.IndexPatternsService { + return this.plugins.indexPatterns.indexPatternsServiceFactory({ + callCluster: async (method: string, args: [GenericParams], ...rest: any[]) => { + const fieldCaps = await this.callWithRequest(requestContext, method, { + ...args, + allowNoIndices: true, + } as GenericParams); + return fieldCaps; + }, + }); + } + + public getSpaceId(request: KibanaRequest): string { + const spacesPlugin = this.plugins.spaces; + + if ( + spacesPlugin && + spacesPlugin.spacesService && + typeof spacesPlugin.spacesService.getSpaceId === 'function' + ) { + return spacesPlugin.spacesService.getSpaceId(request); + } else { + return 'default'; + } + } + + public async makeTSVBRequest( + requestContext: RequestHandlerContext, + model: TSVBMetricModel, + timerange: { min: number; max: number }, + filters: any[] + ): Promise { + const { getVisData } = this.plugins.metrics; + if (typeof getVisData !== 'function') { + throw new Error('TSVB is not available'); + } + const options = { + timerange, + panels: [model], + filters, + }; + return getVisData(requestContext, options); + } +} diff --git a/x-pack/plugins/infra/server/lib/adapters/log_entries/adapter_types.ts b/x-pack/plugins/infra/server/lib/adapters/log_entries/adapter_types.ts new file mode 100644 index 0000000000000..41bc2aa258807 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/adapters/log_entries/adapter_types.ts @@ -0,0 +1,5 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ diff --git a/x-pack/plugins/infra/server/lib/adapters/log_entries/index.ts b/x-pack/plugins/infra/server/lib/adapters/log_entries/index.ts new file mode 100644 index 0000000000000..41bc2aa258807 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/adapters/log_entries/index.ts @@ -0,0 +1,5 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ diff --git a/x-pack/plugins/infra/server/lib/adapters/log_entries/kibana_log_entries_adapter.ts b/x-pack/plugins/infra/server/lib/adapters/log_entries/kibana_log_entries_adapter.ts new file mode 100644 index 0000000000000..ec45171baa7b0 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/adapters/log_entries/kibana_log_entries_adapter.ts @@ -0,0 +1,390 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/* eslint-disable @typescript-eslint/no-empty-interface */ + +import { timeMilliseconds } from 'd3-time'; +import * as runtimeTypes from 'io-ts'; +import first from 'lodash/fp/first'; +import get from 'lodash/fp/get'; +import has from 'lodash/fp/has'; +import zip from 'lodash/fp/zip'; +import { pipe } from 'fp-ts/lib/pipeable'; +import { map, fold } from 'fp-ts/lib/Either'; +import { identity, constant } from 'fp-ts/lib/function'; +import { RequestHandlerContext } from 'src/core/server'; +import { compareTimeKeys, isTimeKey, TimeKey } from '../../../../common/time'; +import { JsonObject } from '../../../../common/typed_json'; +import { + LogEntriesAdapter, + LogEntryDocument, + LogEntryQuery, + LogSummaryBucket, +} from '../../domains/log_entries_domain'; +import { InfraSourceConfiguration } from '../../sources'; +import { SortedSearchHit } from '../framework'; +import { KibanaFramework } from '../framework/kibana_framework_adapter'; + +const DAY_MILLIS = 24 * 60 * 60 * 1000; +const LOOKUP_OFFSETS = [0, 1, 7, 30, 365, 10000, Infinity].map(days => days * DAY_MILLIS); +const TIMESTAMP_FORMAT = 'epoch_millis'; + +interface LogItemHit { + _index: string; + _id: string; + _source: JsonObject; + sort: [number, number]; +} + +export class InfraKibanaLogEntriesAdapter implements LogEntriesAdapter { + constructor(private readonly framework: KibanaFramework) {} + + public async getAdjacentLogEntryDocuments( + requestContext: RequestHandlerContext, + sourceConfiguration: InfraSourceConfiguration, + fields: string[], + start: TimeKey, + direction: 'asc' | 'desc', + maxCount: number, + filterQuery?: LogEntryQuery, + highlightQuery?: LogEntryQuery + ): Promise { + if (maxCount <= 0) { + return []; + } + + const intervals = getLookupIntervals(start.time, direction); + + let documents: LogEntryDocument[] = []; + for (const [intervalStart, intervalEnd] of intervals) { + if (documents.length >= maxCount) { + break; + } + + const documentsInInterval = await this.getLogEntryDocumentsBetween( + requestContext, + sourceConfiguration, + fields, + intervalStart, + intervalEnd, + documents.length > 0 ? documents[documents.length - 1].key : start, + maxCount - documents.length, + filterQuery, + highlightQuery + ); + + documents = [...documents, ...documentsInInterval]; + } + + return direction === 'asc' ? documents : documents.reverse(); + } + + public async getContainedLogEntryDocuments( + requestContext: RequestHandlerContext, + sourceConfiguration: InfraSourceConfiguration, + fields: string[], + start: TimeKey, + end: TimeKey, + filterQuery?: LogEntryQuery, + highlightQuery?: LogEntryQuery + ): Promise { + const documents = await this.getLogEntryDocumentsBetween( + requestContext, + sourceConfiguration, + fields, + start.time, + end.time, + start, + 10000, + filterQuery, + highlightQuery + ); + + return documents.filter(document => compareTimeKeys(document.key, end) < 0); + } + + public async getContainedLogSummaryBuckets( + requestContext: RequestHandlerContext, + sourceConfiguration: InfraSourceConfiguration, + start: number, + end: number, + bucketSize: number, + filterQuery?: LogEntryQuery + ): Promise { + const bucketIntervalStarts = timeMilliseconds(new Date(start), new Date(end), bucketSize); + + const query = { + allowNoIndices: true, + index: sourceConfiguration.logAlias, + ignoreUnavailable: true, + body: { + aggregations: { + count_by_date: { + date_range: { + field: sourceConfiguration.fields.timestamp, + format: TIMESTAMP_FORMAT, + ranges: bucketIntervalStarts.map(bucketIntervalStart => ({ + from: bucketIntervalStart.getTime(), + to: bucketIntervalStart.getTime() + bucketSize, + })), + }, + aggregations: { + top_hits_by_key: { + top_hits: { + size: 1, + sort: [ + { [sourceConfiguration.fields.timestamp]: 'asc' }, + { [sourceConfiguration.fields.tiebreaker]: 'asc' }, + ], + _source: false, + }, + }, + }, + }, + }, + query: { + bool: { + filter: [ + ...createQueryFilterClauses(filterQuery), + { + range: { + [sourceConfiguration.fields.timestamp]: { + gte: start, + lte: end, + format: TIMESTAMP_FORMAT, + }, + }, + }, + ], + }, + }, + size: 0, + track_total_hits: false, + }, + }; + + const response = await this.framework.callWithRequest(requestContext, 'search', query); + + return pipe( + LogSummaryResponseRuntimeType.decode(response), + map(logSummaryResponse => + logSummaryResponse.aggregations.count_by_date.buckets.map( + convertDateRangeBucketToSummaryBucket + ) + ), + fold(constant([]), identity) + ); + } + + public async getLogItem( + requestContext: RequestHandlerContext, + id: string, + sourceConfiguration: InfraSourceConfiguration + ) { + const search = (searchOptions: object) => + this.framework.callWithRequest(requestContext, 'search', searchOptions); + + const params = { + index: sourceConfiguration.logAlias, + terminate_after: 1, + body: { + size: 1, + sort: [ + { [sourceConfiguration.fields.timestamp]: 'desc' }, + { [sourceConfiguration.fields.tiebreaker]: 'desc' }, + ], + query: { + ids: { + values: [id], + }, + }, + }, + }; + + const response = await search(params); + const document = first(response.hits.hits); + if (!document) { + throw new Error('Document not found'); + } + return document; + } + + private async getLogEntryDocumentsBetween( + requestContext: RequestHandlerContext, + sourceConfiguration: InfraSourceConfiguration, + fields: string[], + start: number, + end: number, + after: TimeKey | null, + maxCount: number, + filterQuery?: LogEntryQuery, + highlightQuery?: LogEntryQuery + ): Promise { + if (maxCount <= 0) { + return []; + } + + const sortDirection: 'asc' | 'desc' = start <= end ? 'asc' : 'desc'; + + const startRange = { + [sortDirection === 'asc' ? 'gte' : 'lte']: start, + }; + const endRange = + end === Infinity + ? {} + : { + [sortDirection === 'asc' ? 'lte' : 'gte']: end, + }; + + const highlightClause = highlightQuery + ? { + highlight: { + boundary_scanner: 'word', + fields: fields.reduce( + (highlightFieldConfigs, fieldName) => ({ + ...highlightFieldConfigs, + [fieldName]: {}, + }), + {} + ), + fragment_size: 1, + number_of_fragments: 100, + post_tags: [''], + pre_tags: [''], + highlight_query: highlightQuery, + }, + } + : {}; + + const searchAfterClause = isTimeKey(after) + ? { + search_after: [after.time, after.tiebreaker], + } + : {}; + + const query = { + allowNoIndices: true, + index: sourceConfiguration.logAlias, + ignoreUnavailable: true, + body: { + query: { + bool: { + filter: [ + ...createQueryFilterClauses(filterQuery), + { + range: { + [sourceConfiguration.fields.timestamp]: { + ...startRange, + ...endRange, + format: TIMESTAMP_FORMAT, + }, + }, + }, + ], + }, + }, + ...highlightClause, + ...searchAfterClause, + _source: fields, + size: maxCount, + sort: [ + { [sourceConfiguration.fields.timestamp]: sortDirection }, + { [sourceConfiguration.fields.tiebreaker]: sortDirection }, + ], + track_total_hits: false, + }, + }; + + const response = await this.framework.callWithRequest( + requestContext, + 'search', + query + ); + const hits = response.hits.hits; + const documents = hits.map(convertHitToLogEntryDocument(fields)); + + return documents; + } +} + +function getLookupIntervals(start: number, direction: 'asc' | 'desc'): Array<[number, number]> { + const offsetSign = direction === 'asc' ? 1 : -1; + const translatedOffsets = LOOKUP_OFFSETS.map(offset => start + offset * offsetSign); + const intervals = zip(translatedOffsets.slice(0, -1), translatedOffsets.slice(1)) as Array< + [number, number] + >; + return intervals; +} + +const convertHitToLogEntryDocument = (fields: string[]) => ( + hit: SortedSearchHit +): LogEntryDocument => ({ + gid: hit._id, + fields: fields.reduce( + (flattenedFields, fieldName) => + has(fieldName, hit._source) + ? { + ...flattenedFields, + [fieldName]: get(fieldName, hit._source), + } + : flattenedFields, + {} as { [fieldName: string]: string | number | boolean | null } + ), + highlights: hit.highlight || {}, + key: { + time: hit.sort[0], + tiebreaker: hit.sort[1], + }, +}); + +const convertDateRangeBucketToSummaryBucket = ( + bucket: LogSummaryDateRangeBucket +): LogSummaryBucket => ({ + entriesCount: bucket.doc_count, + start: bucket.from || 0, + end: bucket.to || 0, + topEntryKeys: bucket.top_hits_by_key.hits.hits.map(hit => ({ + tiebreaker: hit.sort[1], + time: hit.sort[0], + })), +}); + +const createQueryFilterClauses = (filterQuery: LogEntryQuery | undefined) => + filterQuery ? [filterQuery] : []; + +const LogSummaryDateRangeBucketRuntimeType = runtimeTypes.intersection([ + runtimeTypes.type({ + doc_count: runtimeTypes.number, + key: runtimeTypes.string, + top_hits_by_key: runtimeTypes.type({ + hits: runtimeTypes.type({ + hits: runtimeTypes.array( + runtimeTypes.type({ + sort: runtimeTypes.tuple([runtimeTypes.number, runtimeTypes.number]), + }) + ), + }), + }), + }), + runtimeTypes.partial({ + from: runtimeTypes.number, + to: runtimeTypes.number, + }), +]); + +export interface LogSummaryDateRangeBucket + extends runtimeTypes.TypeOf {} + +const LogSummaryResponseRuntimeType = runtimeTypes.type({ + aggregations: runtimeTypes.type({ + count_by_date: runtimeTypes.type({ + buckets: runtimeTypes.array(LogSummaryDateRangeBucketRuntimeType), + }), + }), +}); + +export interface LogSummaryResponse + extends runtimeTypes.TypeOf {} diff --git a/x-pack/plugins/infra/server/lib/adapters/metrics/adapter_types.ts b/x-pack/plugins/infra/server/lib/adapters/metrics/adapter_types.ts new file mode 100644 index 0000000000000..844eaf7604927 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/adapters/metrics/adapter_types.ts @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { RequestHandlerContext, KibanaRequest } from 'src/core/server'; +import { + InfraMetric, + InfraMetricData, + InfraNodeType, + InfraTimerangeInput, +} from '../../../graphql/types'; +import { InfraSourceConfiguration } from '../../sources'; + +export interface InfraMetricsRequestOptions { + nodeIds: { + nodeId: string; + cloudId?: string | null; + }; + nodeType: InfraNodeType; + sourceConfiguration: InfraSourceConfiguration; + timerange: InfraTimerangeInput; + metrics: InfraMetric[]; +} + +export interface InfraMetricsAdapter { + getMetrics( + requestContext: RequestHandlerContext, + options: InfraMetricsRequestOptions, + request: KibanaRequest + ): Promise; +} + +export enum InfraMetricModelQueryType { + lucene = 'lucene', + kuery = 'kuery', +} + +export enum InfraMetricModelMetricType { + avg = 'avg', + max = 'max', + min = 'min', + calculation = 'calculation', + cardinality = 'cardinality', + series_agg = 'series_agg', // eslint-disable-line @typescript-eslint/camelcase + positive_only = 'positive_only', // eslint-disable-line @typescript-eslint/camelcase + derivative = 'derivative', + count = 'count', + sum = 'sum', + cumulative_sum = 'cumulative_sum', // eslint-disable-line @typescript-eslint/camelcase +} + +export interface InfraMetricModel { + id: InfraMetric; + requires: string[]; + index_pattern: string | string[]; + interval: string; + time_field: string; + type: string; + series: InfraMetricModelSeries[]; + filter?: string; + map_field_to?: string; + id_type?: 'cloud' | 'node'; +} + +export interface InfraMetricModelSeries { + id: string; + metrics: InfraMetricModelMetric[]; + split_mode: string; + terms_field?: string; + terms_size?: number; + terms_order_by?: string; + filter?: { query: string; language: InfraMetricModelQueryType }; +} + +export interface InfraMetricModelBasicMetric { + id: string; + field?: string | null; + type: InfraMetricModelMetricType; +} + +export interface InfraMetricModelSeriesAgg { + id: string; + function: string; + type: InfraMetricModelMetricType.series_agg; // eslint-disable-line @typescript-eslint/camelcase +} + +export interface InfraMetricModelDerivative { + id: string; + field: string; + unit: string; + type: InfraMetricModelMetricType; +} + +export interface InfraMetricModelBucketScriptVariable { + field: string; + id: string; + name: string; +} + +export interface InfraMetricModelCount { + id: string; + type: InfraMetricModelMetricType.count; +} + +export interface InfraMetricModelBucketScript { + id: string; + script: string; + type: InfraMetricModelMetricType.calculation; + variables: InfraMetricModelBucketScriptVariable[]; +} + +export type InfraMetricModelMetric = + | InfraMetricModelCount + | InfraMetricModelBasicMetric + | InfraMetricModelBucketScript + | InfraMetricModelDerivative + | InfraMetricModelSeriesAgg; + +export type InfraMetricModelCreator = ( + timeField: string, + indexPattern: string | string[], + interval: string +) => InfraMetricModel; diff --git a/x-pack/plugins/infra/server/lib/adapters/metrics/index.ts b/x-pack/plugins/infra/server/lib/adapters/metrics/index.ts new file mode 100644 index 0000000000000..4e09b5d0e9e2d --- /dev/null +++ b/x-pack/plugins/infra/server/lib/adapters/metrics/index.ts @@ -0,0 +1,7 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export * from './adapter_types'; diff --git a/x-pack/plugins/infra/server/lib/adapters/metrics/kibana_metrics_adapter.ts b/x-pack/plugins/infra/server/lib/adapters/metrics/kibana_metrics_adapter.ts new file mode 100644 index 0000000000000..6acb8afbfb249 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/adapters/metrics/kibana_metrics_adapter.ts @@ -0,0 +1,154 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { i18n } from '@kbn/i18n'; +import { flatten, get } from 'lodash'; +import { KibanaRequest, RequestHandlerContext } from 'src/core/server'; +import { InfraMetric, InfraMetricData } from '../../../graphql/types'; +import { KibanaFramework } from '../framework/kibana_framework_adapter'; +import { InfraMetricsAdapter, InfraMetricsRequestOptions } from './adapter_types'; +import { checkValidNode } from './lib/check_valid_node'; +import { metrics, findInventoryFields } from '../../../../common/inventory_models'; +import { TSVBMetricModelCreator } from '../../../../common/inventory_models/types'; +import { calculateMetricInterval } from '../../../utils/calculate_metric_interval'; + +export class KibanaMetricsAdapter implements InfraMetricsAdapter { + private framework: KibanaFramework; + + constructor(framework: KibanaFramework) { + this.framework = framework; + } + + public async getMetrics( + requestContext: RequestHandlerContext, + options: InfraMetricsRequestOptions, + rawRequest: KibanaRequest + ): Promise { + const indexPattern = `${options.sourceConfiguration.metricAlias},${options.sourceConfiguration.logAlias}`; + const fields = findInventoryFields(options.nodeType, options.sourceConfiguration.fields); + const nodeField = fields.id; + + const search = (searchOptions: object) => + this.framework.callWithRequest<{}, Aggregation>(requestContext, 'search', searchOptions); + + const validNode = await checkValidNode(search, indexPattern, nodeField, options.nodeIds.nodeId); + if (!validNode) { + throw new Error( + i18n.translate('xpack.infra.kibanaMetrics.nodeDoesNotExistErrorMessage', { + defaultMessage: '{nodeId} does not exist.', + values: { + nodeId: options.nodeIds.nodeId, + }, + }) + ); + } + + const requests = options.metrics.map(metricId => + this.makeTSVBRequest(metricId, options, nodeField, requestContext) + ); + + return Promise.all(requests) + .then(results => { + return results.map(result => { + const metricIds = Object.keys(result).filter( + k => !['type', 'uiRestrictions'].includes(k) + ); + + return metricIds.map((id: string) => { + const infraMetricId: InfraMetric = (InfraMetric as any)[id]; + if (!infraMetricId) { + throw new Error( + i18n.translate('xpack.infra.kibanaMetrics.invalidInfraMetricErrorMessage', { + defaultMessage: '{id} is not a valid InfraMetric', + values: { + id, + }, + }) + ); + } + const panel = result[infraMetricId]; + return { + id: infraMetricId, + series: panel.series.map(series => { + return { + id: series.id, + label: series.label, + data: series.data.map(point => ({ timestamp: point[0], value: point[1] })), + }; + }), + }; + }); + }); + }) + .then(result => flatten(result)); + } + + async makeTSVBRequest( + metricId: InfraMetric, + options: InfraMetricsRequestOptions, + nodeField: string, + requestContext: RequestHandlerContext + ) { + const createTSVBModel = get(metrics, ['tsvb', metricId]) as TSVBMetricModelCreator | undefined; + if (!createTSVBModel) { + throw new Error( + i18n.translate('xpack.infra.metrics.missingTSVBModelError', { + defaultMessage: 'The TSVB model for {metricId} does not exist for {nodeType}', + values: { + metricId, + nodeType: options.nodeType, + }, + }) + ); + } + + const indexPattern = `${options.sourceConfiguration.metricAlias},${options.sourceConfiguration.logAlias}`; + const timerange = { + min: options.timerange.from, + max: options.timerange.to, + }; + + const model = createTSVBModel( + options.sourceConfiguration.fields.timestamp, + indexPattern, + options.timerange.interval + ); + const calculatedInterval = await calculateMetricInterval( + this.framework, + requestContext, + { + indexPattern: `${options.sourceConfiguration.logAlias},${options.sourceConfiguration.metricAlias}`, + timestampField: options.sourceConfiguration.fields.timestamp, + timerange: options.timerange, + }, + model.requires + ); + + if (calculatedInterval) { + model.interval = `>=${calculatedInterval}s`; + } + + if (model.id_type === 'cloud' && !options.nodeIds.cloudId) { + throw new Error( + i18n.translate('xpack.infra.kibanaMetrics.cloudIdMissingErrorMessage', { + defaultMessage: + 'Model for {metricId} requires a cloudId, but none was given for {nodeId}.', + values: { + metricId, + nodeId: options.nodeIds.nodeId, + }, + }) + ); + } + const id = + model.id_type === 'cloud' ? (options.nodeIds.cloudId as string) : options.nodeIds.nodeId; + const filters = model.map_field_to + ? [{ match: { [model.map_field_to]: id } }] + : [{ match: { [nodeField]: id } }]; + + return this.framework.makeTSVBRequest(requestContext, model, timerange, filters); + } +} diff --git a/x-pack/plugins/infra/server/lib/adapters/metrics/lib/check_valid_node.ts b/x-pack/plugins/infra/server/lib/adapters/metrics/lib/check_valid_node.ts new file mode 100644 index 0000000000000..bca509334b692 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/adapters/metrics/lib/check_valid_node.ts @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { InfraDatabaseSearchResponse } from '../../framework'; + +export const checkValidNode = async ( + search: (options: object) => Promise>, + indexPattern: string | string[], + field: string, + id: string +): Promise => { + const params = { + allowNoIndices: true, + ignoreUnavailable: true, + index: indexPattern, + terminateAfter: 1, + body: { + size: 0, + query: { + match: { + [field]: id, + }, + }, + }, + }; + + const result = await search(params); + return result && result.hits && result.hits.total && result.hits.total.value > 0; +}; diff --git a/x-pack/plugins/infra/server/lib/adapters/metrics/lib/errors.ts b/x-pack/plugins/infra/server/lib/adapters/metrics/lib/errors.ts new file mode 100644 index 0000000000000..750858f3ce1fa --- /dev/null +++ b/x-pack/plugins/infra/server/lib/adapters/metrics/lib/errors.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { ApolloError } from 'apollo-server-errors'; +import { InfraMetricsErrorCodes } from '../../../../../common/errors'; + +export class InvalidNodeError extends ApolloError { + constructor(message: string) { + super(message, InfraMetricsErrorCodes.invalid_node); + Object.defineProperty(this, 'name', { value: 'InvalidNodeError' }); + } +} diff --git a/x-pack/plugins/infra/server/lib/adapters/source_status/elasticsearch_source_status_adapter.ts b/x-pack/plugins/infra/server/lib/adapters/source_status/elasticsearch_source_status_adapter.ts new file mode 100644 index 0000000000000..635f6ff9762c5 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/adapters/source_status/elasticsearch_source_status_adapter.ts @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { RequestHandlerContext } from 'src/core/server'; +import { InfraSourceStatusAdapter } from '../../source_status'; +import { InfraDatabaseGetIndicesResponse } from '../framework'; +import { KibanaFramework } from '../framework/kibana_framework_adapter'; + +export class InfraElasticsearchSourceStatusAdapter implements InfraSourceStatusAdapter { + constructor(private readonly framework: KibanaFramework) {} + + public async getIndexNames(requestContext: RequestHandlerContext, aliasName: string) { + const indexMaps = await Promise.all([ + this.framework + .callWithRequest(requestContext, 'indices.getAlias', { + name: aliasName, + filterPath: '*.settings.index.uuid', // to keep the response size as small as possible + }) + .catch(withDefaultIfNotFound({})), + this.framework + .callWithRequest(requestContext, 'indices.get', { + index: aliasName, + filterPath: '*.settings.index.uuid', // to keep the response size as small as possible + }) + .catch(withDefaultIfNotFound({})), + ]); + + return indexMaps.reduce( + (indexNames, indexMap) => [...indexNames, ...Object.keys(indexMap)], + [] as string[] + ); + } + + public async hasAlias(requestContext: RequestHandlerContext, aliasName: string) { + return await this.framework.callWithRequest(requestContext, 'indices.existsAlias', { + name: aliasName, + }); + } + + public async hasIndices(requestContext: RequestHandlerContext, indexNames: string) { + return await this.framework + .callWithRequest(requestContext, 'search', { + ignore_unavailable: true, + allow_no_indices: true, + index: indexNames, + size: 0, + terminate_after: 1, + }) + .then( + response => response._shards.total > 0, + err => { + if (err.status === 404) { + return false; + } + throw err; + } + ); + } +} + +const withDefaultIfNotFound = (defaultValue: DefaultValue) => ( + error: any +): DefaultValue => { + if (error && error.status === 404) { + return defaultValue; + } + throw error; +}; diff --git a/x-pack/plugins/infra/server/lib/adapters/source_status/index.ts b/x-pack/plugins/infra/server/lib/adapters/source_status/index.ts new file mode 100644 index 0000000000000..f5adfe190f805 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/adapters/source_status/index.ts @@ -0,0 +1,7 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export { InfraElasticsearchSourceStatusAdapter } from './elasticsearch_source_status_adapter'; diff --git a/x-pack/plugins/infra/server/lib/compose/kibana.ts b/x-pack/plugins/infra/server/lib/compose/kibana.ts new file mode 100644 index 0000000000000..305841aa52d36 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/compose/kibana.ts @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import { FrameworkFieldsAdapter } from '../adapters/fields/framework_fields_adapter'; +import { KibanaFramework } from '../adapters/framework/kibana_framework_adapter'; +import { InfraKibanaLogEntriesAdapter } from '../adapters/log_entries/kibana_log_entries_adapter'; +import { KibanaMetricsAdapter } from '../adapters/metrics/kibana_metrics_adapter'; +import { InfraElasticsearchSourceStatusAdapter } from '../adapters/source_status'; +import { InfraFieldsDomain } from '../domains/fields_domain'; +import { InfraLogEntriesDomain } from '../domains/log_entries_domain'; +import { InfraMetricsDomain } from '../domains/metrics_domain'; +import { InfraBackendLibs, InfraDomainLibs } from '../infra_types'; +import { InfraLogAnalysis } from '../log_analysis'; +import { InfraSnapshot } from '../snapshot'; +import { InfraSourceStatus } from '../source_status'; +import { InfraSources } from '../sources'; +import { InfraConfig } from '../../../../../../plugins/infra/server'; +import { CoreSetup } from '../../../../../../../src/core/server'; +import { InfraServerPluginDeps } from '../adapters/framework/adapter_types'; + +export function compose(core: CoreSetup, config: InfraConfig, plugins: InfraServerPluginDeps) { + const framework = new KibanaFramework(core, config, plugins); + const sources = new InfraSources({ + config, + }); + const sourceStatus = new InfraSourceStatus(new InfraElasticsearchSourceStatusAdapter(framework), { + sources, + }); + const snapshot = new InfraSnapshot({ sources, framework }); + const logAnalysis = new InfraLogAnalysis({ framework }); + + // TODO: separate these out individually and do away with "domains" as a temporary group + const domainLibs: InfraDomainLibs = { + fields: new InfraFieldsDomain(new FrameworkFieldsAdapter(framework), { + sources, + }), + logEntries: new InfraLogEntriesDomain(new InfraKibanaLogEntriesAdapter(framework), { + sources, + }), + metrics: new InfraMetricsDomain(new KibanaMetricsAdapter(framework)), + }; + + const libs: InfraBackendLibs = { + configuration: config, // NP_TODO: Do we ever use this anywhere? + framework, + logAnalysis, + snapshot, + sources, + sourceStatus, + ...domainLibs, + }; + + return libs; +} diff --git a/x-pack/plugins/infra/server/lib/constants.ts b/x-pack/plugins/infra/server/lib/constants.ts new file mode 100644 index 0000000000000..0765256c4160c --- /dev/null +++ b/x-pack/plugins/infra/server/lib/constants.ts @@ -0,0 +1,7 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export const CLOUD_METRICS_MODULES = ['aws']; diff --git a/x-pack/plugins/infra/server/lib/domains/fields_domain.ts b/x-pack/plugins/infra/server/lib/domains/fields_domain.ts new file mode 100644 index 0000000000000..a00c76216da4c --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/fields_domain.ts @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { RequestHandlerContext } from 'src/core/server'; +import { InfraIndexField, InfraIndexType } from '../../graphql/types'; +import { FieldsAdapter } from '../adapters/fields'; +import { InfraSources } from '../sources'; + +export class InfraFieldsDomain { + constructor( + private readonly adapter: FieldsAdapter, + private readonly libs: { sources: InfraSources } + ) {} + + public async getFields( + requestContext: RequestHandlerContext, + sourceId: string, + indexType: InfraIndexType + ): Promise { + const { configuration } = await this.libs.sources.getSourceConfiguration( + requestContext, + sourceId + ); + const includeMetricIndices = [InfraIndexType.ANY, InfraIndexType.METRICS].includes(indexType); + const includeLogIndices = [InfraIndexType.ANY, InfraIndexType.LOGS].includes(indexType); + + const fields = await this.adapter.getIndexFields( + requestContext, + `${includeMetricIndices ? configuration.metricAlias : ''},${ + includeLogIndices ? configuration.logAlias : '' + }`, + configuration.fields.timestamp + ); + + return fields; + } +} diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_apache2.test.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_apache2.test.ts new file mode 100644 index 0000000000000..367ae6a0cae89 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_apache2.test.ts @@ -0,0 +1,263 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { getBuiltinRules } from '.'; +import { compileFormattingRules } from '../message'; + +const { format } = compileFormattingRules(getBuiltinRules([])); + +describe('Filebeat Rules', () => { + describe('in ECS format', () => { + test('Apache2 Access', () => { + const flattenedDocument = { + '@timestamp': '2016-12-26T16:22:13.000Z', + 'ecs.version': '1.0.0-beta2', + 'event.dataset': 'apache.access', + 'event.module': 'apache', + 'fileset.name': 'access', + 'http.request.method': 'GET', + 'http.request.referrer': '-', + 'http.response.body.bytes': 499, + 'http.response.status_code': 404, + 'http.version': '1.1', + 'input.type': 'log', + 'log.offset': 73, + 'service.type': 'apache', + 'source.address': '192.168.33.1', + 'source.ip': '192.168.33.1', + 'url.original': '/hello', + 'user.name': '-', + 'user_agent.device': 'Other', + 'user_agent.major': '50', + 'user_agent.minor': '0', + 'user_agent.name': 'Firefox', + 'user_agent.original': + 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:50.0) Gecko/20100101 Firefox/50.0', + 'user_agent.os.full_name': 'Mac OS X 10.12', + 'user_agent.os.major': '10', + 'user_agent.os.minor': '12', + 'user_agent.os.name': 'Mac OS X', + }; + const highlights = { + 'http.request.method': ['GET'], + }; + + expect(format(flattenedDocument, highlights)).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[", + }, + Object { + "field": "event.module", + "highlights": Array [], + "value": "apache", + }, + Object { + "constant": "][access] ", + }, + Object { + "field": "source.ip", + "highlights": Array [], + "value": "192.168.33.1", + }, + Object { + "constant": " ", + }, + Object { + "field": "user.name", + "highlights": Array [], + "value": "-", + }, + Object { + "constant": " \\"", + }, + Object { + "field": "http.request.method", + "highlights": Array [ + "GET", + ], + "value": "GET", + }, + Object { + "constant": " ", + }, + Object { + "field": "url.original", + "highlights": Array [], + "value": "/hello", + }, + Object { + "constant": " HTTP/", + }, + Object { + "field": "http.version", + "highlights": Array [], + "value": "1.1", + }, + Object { + "constant": "\\" ", + }, + Object { + "field": "http.response.status_code", + "highlights": Array [], + "value": "404", + }, + Object { + "constant": " ", + }, + Object { + "field": "http.response.body.bytes", + "highlights": Array [], + "value": "499", + }, +] +`); + }); + + test('Apache2 Error', () => { + const flattenedDocument = { + '@timestamp': '2016-12-26T16:22:08.000Z', + 'ecs.version': '1.0.0-beta2', + 'event.dataset': 'apache.error', + 'event.module': 'apache', + 'fileset.name': 'error', + 'input.type': 'log', + 'log.level': 'error', + 'log.offset': 0, + message: 'File does not exist: /var/www/favicon.ico', + 'service.type': 'apache', + 'source.address': '192.168.33.1', + 'source.ip': '192.168.33.1', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[apache][", + }, + Object { + "field": "log.level", + "highlights": Array [], + "value": "error", + }, + Object { + "constant": "] ", + }, + Object { + "field": "message", + "highlights": Array [], + "value": "File does not exist: /var/www/favicon.ico", + }, +] +`); + }); + }); + + describe('in pre-ECS format', () => { + test('Apache2 Access', () => { + const flattenedDocument = { + 'apache2.access': true, + 'apache2.access.remote_ip': '192.168.1.42', + 'apache2.access.user_name': 'admin', + 'apache2.access.method': 'GET', + 'apache2.access.url': '/faqs', + 'apache2.access.http_version': '1.1', + 'apache2.access.response_code': '200', + 'apache2.access.body_sent.bytes': 1024, + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[apache][access] ", + }, + Object { + "field": "apache2.access.remote_ip", + "highlights": Array [], + "value": "192.168.1.42", + }, + Object { + "constant": " ", + }, + Object { + "field": "apache2.access.user_name", + "highlights": Array [], + "value": "admin", + }, + Object { + "constant": " \\"", + }, + Object { + "field": "apache2.access.method", + "highlights": Array [], + "value": "GET", + }, + Object { + "constant": " ", + }, + Object { + "field": "apache2.access.url", + "highlights": Array [], + "value": "/faqs", + }, + Object { + "constant": " HTTP/", + }, + Object { + "field": "apache2.access.http_version", + "highlights": Array [], + "value": "1.1", + }, + Object { + "constant": "\\" ", + }, + Object { + "field": "apache2.access.response_code", + "highlights": Array [], + "value": "200", + }, + Object { + "constant": " ", + }, + Object { + "field": "apache2.access.body_sent.bytes", + "highlights": Array [], + "value": "1024", + }, +] +`); + }); + + test('Apache2 Error', () => { + const flattenedDocument = { + 'apache2.error.message': + 'AH00489: Apache/2.4.18 (Ubuntu) configured -- resuming normal operations', + 'apache2.error.level': 'notice', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[apache][", + }, + Object { + "field": "apache2.error.level", + "highlights": Array [], + "value": "notice", + }, + Object { + "constant": "] ", + }, + Object { + "field": "apache2.error.message", + "highlights": Array [], + "value": "AH00489: Apache/2.4.18 (Ubuntu) configured -- resuming normal operations", + }, +] +`); + }); + }); +}); diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_apache2.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_apache2.ts new file mode 100644 index 0000000000000..fe7ebffe91329 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_apache2.ts @@ -0,0 +1,100 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export const filebeatApache2Rules = [ + { + // pre-ECS + when: { + exists: ['apache2.access'], + }, + format: [ + { + constant: '[apache][access] ', + }, + { + field: 'apache2.access.remote_ip', + }, + { + constant: ' ', + }, + { + field: 'apache2.access.user_name', + }, + { + constant: ' "', + }, + { + field: 'apache2.access.method', + }, + { + constant: ' ', + }, + { + field: 'apache2.access.url', + }, + { + constant: ' HTTP/', + }, + { + field: 'apache2.access.http_version', + }, + { + constant: '" ', + }, + { + field: 'apache2.access.response_code', + }, + { + constant: ' ', + }, + { + field: 'apache2.access.body_sent.bytes', + }, + ], + }, + { + // ECS + when: { + values: { + 'event.dataset': 'apache.error', + }, + }, + format: [ + { + constant: '[apache][', + }, + { + field: 'log.level', + }, + { + constant: '] ', + }, + { + field: 'message', + }, + ], + }, + { + // pre-ECS + when: { + exists: ['apache2.error.message'], + }, + format: [ + { + constant: '[apache][', + }, + { + field: 'apache2.error.level', + }, + { + constant: '] ', + }, + { + field: 'apache2.error.message', + }, + ], + }, +]; diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_auditd.test.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_auditd.test.ts new file mode 100644 index 0000000000000..aa490c595d9fd --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_auditd.test.ts @@ -0,0 +1,359 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { compileFormattingRules } from '../message'; +import { filebeatAuditdRules } from './filebeat_auditd'; + +const { format } = compileFormattingRules(filebeatAuditdRules); + +describe('Filebeat Rules', () => { + describe('in ECS format', () => { + test('auditd log with outcome', () => { + const flattenedDocument = { + '@timestamp': '2016-12-07T02:17:21.515Z', + 'auditd.log': { + addr: '96.241.146.97', + cipher: 'chacha20-poly1305@openssh.com', + direction: 'from-server', + ksize: '512', + laddr: '10.142.0.2', + lport: '22', + pfs: 'curve25519-sha256@libssh.org', + rport: '63927', + sequence: 406, + ses: '4294967295', + spid: '1299', + subj: 'system_u:system_r:sshd_t:s0-s0:c0.c1023', + }, + 'ecs.version': '1.0.0-beta2', + 'event.action': 'crypto_session', + 'event.dataset': 'auditd.log', + 'event.module': 'auditd', + 'event.outcome': 'success', + 'fileset.name': 'log', + 'input.type': 'log', + 'log.offset': 783, + message: 'op=start', + process: { executable: '/usr/sbin/sshd', pid: 1298 }, + 'service.type': 'auditd', + user: { 'audit.id': '4294967295', id: '0', 'saved.id': '74' }, + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[AuditD][", + }, + Object { + "field": "event.action", + "highlights": Array [], + "value": "crypto_session", + }, + Object { + "constant": "]", + }, + Object { + "constant": " ", + }, + Object { + "field": "event.outcome", + "highlights": Array [], + "value": "success", + }, + Object { + "constant": " ", + }, + Object { + "constant": "user", + }, + Object { + "constant": "=", + }, + Object { + "field": "user", + "highlights": Array [], + "value": "{\\"audit.id\\":\\"4294967295\\",\\"id\\":\\"0\\",\\"saved.id\\":\\"74\\"}", + }, + Object { + "constant": " ", + }, + Object { + "constant": "process", + }, + Object { + "constant": "=", + }, + Object { + "field": "process", + "highlights": Array [], + "value": "{\\"executable\\":\\"/usr/sbin/sshd\\",\\"pid\\":1298}", + }, + Object { + "constant": " ", + }, + Object { + "field": "auditd.log", + "highlights": Array [], + "value": "{\\"addr\\":\\"96.241.146.97\\",\\"cipher\\":\\"chacha20-poly1305@openssh.com\\",\\"direction\\":\\"from-server\\",\\"ksize\\":\\"512\\",\\"laddr\\":\\"10.142.0.2\\",\\"lport\\":\\"22\\",\\"pfs\\":\\"curve25519-sha256@libssh.org\\",\\"rport\\":\\"63927\\",\\"sequence\\":406,\\"ses\\":\\"4294967295\\",\\"spid\\":\\"1299\\",\\"subj\\":\\"system_u:system_r:sshd_t:s0-s0:c0.c1023\\"}", + }, + Object { + "constant": " ", + }, + Object { + "field": "message", + "highlights": Array [], + "value": "op=start", + }, +] +`); + }); + + test('auditd log without outcome', () => { + const flattenedDocument = { + '@timestamp': '2017-01-31T20:17:14.891Z', + 'auditd.log': { + a0: '9', + a1: '7f564b2672a0', + a2: 'b8', + a3: '0', + exit: '184', + items: '0', + sequence: 18877199, + ses: '4294967295', + success: 'yes', + syscall: '44', + tty: '(none)', + }, + 'ecs.version': '1.0.0-beta2', + 'event.action': 'syscall', + 'event.dataset': 'auditd.log', + 'event.module': 'auditd', + 'fileset.name': 'log', + 'host.architecture': 'x86_64', + 'input.type': 'log', + 'log.offset': 174, + process: { + executable: '/usr/libexec/strongswan/charon (deleted)', + name: 'charon', + pid: 1281, + ppid: 1240, + }, + 'service.type': 'auditd', + user: { + 'audit.id': '4294967295', + 'effective.group.id': '0', + 'effective.id': '0', + 'filesystem.group.id': '0', + 'filesystem.id': '0', + 'group.id': '0', + id: '0', + 'saved.group.id': '0', + 'saved.id': '0', + }, + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[AuditD][", + }, + Object { + "field": "event.action", + "highlights": Array [], + "value": "syscall", + }, + Object { + "constant": "]", + }, + Object { + "constant": " ", + }, + Object { + "constant": "user", + }, + Object { + "constant": "=", + }, + Object { + "field": "user", + "highlights": Array [], + "value": "{\\"audit.id\\":\\"4294967295\\",\\"effective.group.id\\":\\"0\\",\\"effective.id\\":\\"0\\",\\"filesystem.group.id\\":\\"0\\",\\"filesystem.id\\":\\"0\\",\\"group.id\\":\\"0\\",\\"id\\":\\"0\\",\\"saved.group.id\\":\\"0\\",\\"saved.id\\":\\"0\\"}", + }, + Object { + "constant": " ", + }, + Object { + "constant": "process", + }, + Object { + "constant": "=", + }, + Object { + "field": "process", + "highlights": Array [], + "value": "{\\"executable\\":\\"/usr/libexec/strongswan/charon (deleted)\\",\\"name\\":\\"charon\\",\\"pid\\":1281,\\"ppid\\":1240}", + }, + Object { + "constant": " ", + }, + Object { + "field": "auditd.log", + "highlights": Array [], + "value": "{\\"a0\\":\\"9\\",\\"a1\\":\\"7f564b2672a0\\",\\"a2\\":\\"b8\\",\\"a3\\":\\"0\\",\\"exit\\":\\"184\\",\\"items\\":\\"0\\",\\"sequence\\":18877199,\\"ses\\":\\"4294967295\\",\\"success\\":\\"yes\\",\\"syscall\\":\\"44\\",\\"tty\\":\\"(none)\\"}", + }, + Object { + "constant": " ", + }, + Object { + "field": "message", + "highlights": Array [], + "value": "undefined", + }, +] +`); + }); + }); + + describe('in pre-ECS format', () => { + test('auditd IPSEC rule', () => { + const event = { + '@timestamp': '2017-01-31T20:17:14.891Z', + 'auditd.log.auid': '4294967295', + 'auditd.log.dst': '192.168.0.0', + 'auditd.log.dst_prefixlen': '16', + 'auditd.log.op': 'SPD-delete', + 'auditd.log.record_type': 'MAC_IPSEC_EVENT', + 'auditd.log.res': '1', + 'auditd.log.sequence': 18877201, + 'auditd.log.ses': '4294967295', + 'auditd.log.src': '192.168.2.0', + 'auditd.log.src_prefixlen': '24', + 'ecs.version': '1.0.0-beta2', + 'event.dataset': 'auditd.log', + 'event.module': 'auditd', + 'fileset.name': 'log', + 'input.type': 'log', + 'log.offset': 0, + }; + const message = format(event, {}); + expect(message).toEqual([ + { constant: '[AuditD][' }, + { field: 'auditd.log.record_type', highlights: [], value: 'MAC_IPSEC_EVENT' }, + { constant: '] src:' }, + { field: 'auditd.log.src', highlights: [], value: '192.168.2.0' }, + { constant: ' dst:' }, + { field: 'auditd.log.dst', highlights: [], value: '192.168.0.0' }, + { constant: ' op:' }, + { field: 'auditd.log.op', highlights: [], value: 'SPD-delete' }, + ]); + }); + + test('AuditD SYSCALL rule', () => { + const event = { + '@timestamp': '2017-01-31T20:17:14.891Z', + 'auditd.log.a0': '9', + 'auditd.log.a1': '7f564b2672a0', + 'auditd.log.a2': 'b8', + 'auditd.log.a3': '0', + 'auditd.log.arch': 'x86_64', + 'auditd.log.auid': '4294967295', + 'auditd.log.comm': 'charon', + 'auditd.log.egid': '0', + 'auditd.log.euid': '0', + 'auditd.log.exe': '/usr/libexec/strongswan/charon (deleted)', + 'auditd.log.exit': '184', + 'auditd.log.fsgid': '0', + 'auditd.log.fsuid': '0', + 'auditd.log.gid': '0', + 'auditd.log.items': '0', + 'auditd.log.pid': '1281', + 'auditd.log.ppid': '1240', + 'auditd.log.record_type': 'SYSCALL', + 'auditd.log.sequence': 18877199, + 'auditd.log.ses': '4294967295', + 'auditd.log.sgid': '0', + 'auditd.log.success': 'yes', + 'auditd.log.suid': '0', + 'auditd.log.syscall': '44', + 'auditd.log.tty': '(none)', + 'auditd.log.uid': '0', + 'ecs.version': '1.0.0-beta2', + 'event.dataset': 'auditd.log', + 'event.module': 'auditd', + 'fileset.name': 'log', + 'input.type': 'log', + 'log.offset': 174, + }; + const message = format(event, {}); + expect(message).toEqual([ + { constant: '[AuditD][' }, + { field: 'auditd.log.record_type', highlights: [], value: 'SYSCALL' }, + { constant: '] exe:' }, + { + field: 'auditd.log.exe', + highlights: [], + value: '/usr/libexec/strongswan/charon (deleted)', + }, + { constant: ' gid:' }, + { field: 'auditd.log.gid', highlights: [], value: '0' }, + { constant: ' uid:' }, + { field: 'auditd.log.uid', highlights: [], value: '0' }, + { constant: ' tty:' }, + { field: 'auditd.log.tty', highlights: [], value: '(none)' }, + { constant: ' pid:' }, + { field: 'auditd.log.pid', highlights: [], value: '1281' }, + { constant: ' ppid:' }, + { field: 'auditd.log.ppid', highlights: [], value: '1240' }, + ]); + }); + + test('AuditD events with msg rule', () => { + const event = { + '@timestamp': '2017-01-31T20:17:14.891Z', + 'auditd.log.auid': '4294967295', + 'auditd.log.record_type': 'EXAMPLE', + 'auditd.log.msg': 'some kind of message', + 'ecs.version': '1.0.0-beta2', + 'event.dataset': 'auditd.log', + 'event.module': 'auditd', + 'fileset.name': 'log', + 'input.type': 'log', + 'log.offset': 174, + }; + const message = format(event, {}); + expect(message).toEqual([ + { constant: '[AuditD][' }, + { field: 'auditd.log.record_type', highlights: [], value: 'EXAMPLE' }, + { constant: '] ' }, + { + field: 'auditd.log.msg', + highlights: [], + value: 'some kind of message', + }, + ]); + }); + + test('AuditD catchall rule', () => { + const event = { + '@timestamp': '2017-01-31T20:17:14.891Z', + 'auditd.log.auid': '4294967295', + 'auditd.log.record_type': 'EXAMPLE', + 'ecs.version': '1.0.0-beta2', + 'event.dataset': 'auditd.log', + 'event.module': 'auditd', + 'fileset.name': 'log', + 'input.type': 'log', + 'log.offset': 174, + }; + const message = format(event, {}); + expect(message).toEqual([ + { constant: '[AuditD][' }, + { field: 'auditd.log.record_type', highlights: [], value: 'EXAMPLE' }, + { constant: '] Event without message.' }, + ]); + }); + }); +}); diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_auditd.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_auditd.ts new file mode 100644 index 0000000000000..d2557cf1599ce --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_auditd.ts @@ -0,0 +1,119 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { labelField } from './helpers'; + +const commonActionField = [{ constant: '[AuditD][' }, { field: 'event.action' }, { constant: ']' }]; +const commonOutcomeField = [{ constant: ' ' }, { field: 'event.outcome' }]; + +export const filebeatAuditdRules = [ + { + // ECS format with outcome + when: { + exists: ['ecs.version', 'event.action', 'event.outcome', 'auditd.log'], + }, + format: [ + ...commonActionField, + ...commonOutcomeField, + ...labelField('user', 'user'), + ...labelField('process', 'process'), + { constant: ' ' }, + { field: 'auditd.log' }, + { constant: ' ' }, + { field: 'message' }, + ], + }, + { + // ECS format without outcome + when: { + exists: ['ecs.version', 'event.action', 'auditd.log'], + }, + format: [ + ...commonActionField, + ...labelField('user', 'user'), + ...labelField('process', 'process'), + { constant: ' ' }, + { field: 'auditd.log' }, + { constant: ' ' }, + { field: 'message' }, + ], + }, + { + // pre-ECS IPSEC_EVENT Rule + when: { + exists: ['auditd.log.record_type', 'auditd.log.src', 'auditd.log.dst', 'auditd.log.op'], + values: { + 'auditd.log.record_type': 'MAC_IPSEC_EVENT', + }, + }, + format: [ + { constant: '[AuditD][' }, + { field: 'auditd.log.record_type' }, + { constant: '] src:' }, + { field: 'auditd.log.src' }, + { constant: ' dst:' }, + { field: 'auditd.log.dst' }, + { constant: ' op:' }, + { field: 'auditd.log.op' }, + ], + }, + { + // pre-ECS SYSCALL Rule + when: { + exists: [ + 'auditd.log.record_type', + 'auditd.log.exe', + 'auditd.log.gid', + 'auditd.log.uid', + 'auditd.log.tty', + 'auditd.log.pid', + 'auditd.log.ppid', + ], + values: { + 'auditd.log.record_type': 'SYSCALL', + }, + }, + format: [ + { constant: '[AuditD][' }, + { field: 'auditd.log.record_type' }, + { constant: '] exe:' }, + { field: 'auditd.log.exe' }, + { constant: ' gid:' }, + { field: 'auditd.log.gid' }, + { constant: ' uid:' }, + { field: 'auditd.log.uid' }, + { constant: ' tty:' }, + { field: 'auditd.log.tty' }, + { constant: ' pid:' }, + { field: 'auditd.log.pid' }, + { constant: ' ppid:' }, + { field: 'auditd.log.ppid' }, + ], + }, + { + // pre-ECS Events with `msg` Rule + when: { + exists: ['auditd.log.record_type', 'auditd.log.msg'], + }, + format: [ + { constant: '[AuditD][' }, + { field: 'auditd.log.record_type' }, + { constant: '] ' }, + { field: 'auditd.log.msg' }, + ], + }, + { + // pre-ECS Events with `msg` Rule + when: { + exists: ['auditd.log.record_type'], + }, + format: [ + { constant: '[AuditD][' }, + { field: 'auditd.log.record_type' }, + { constant: '] Event without message.' }, + ], + }, +]; diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_haproxy.test.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_haproxy.test.ts new file mode 100644 index 0000000000000..752b61684887e --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_haproxy.test.ts @@ -0,0 +1,791 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { compileFormattingRules } from '../message'; +import { filebeatHaproxyRules } from './filebeat_haproxy'; + +const { format } = compileFormattingRules(filebeatHaproxyRules); + +describe('Filebeat Rules', () => { + describe('in ECS format', () => { + test('haproxy default log', () => { + const flattenedDocument = { + 'destination.ip': '1.2.3.4', + 'destination.port': 5000, + 'ecs.version': '1.0.0-beta2', + 'event.dataset': 'haproxy.log', + 'event.module': 'haproxy', + 'fileset.name': 'log', + 'haproxy.frontend_name': 'main', + 'haproxy.mode': 'HTTP', + 'haproxy.source': '1.2.3.4', + 'input.type': 'log', + 'log.offset': 0, + 'process.name': 'haproxy', + 'process.pid': 24551, + 'service.type': 'haproxy', + 'source.address': '1.2.3.4', + 'source.geo.continent_name': 'North America', + 'source.geo.country_iso_code': 'US', + 'source.geo.location.lat': 37.751, + 'source.geo.location.lon': -97.822, + 'source.ip': '1.2.3.4', + 'source.port': 40780, + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[HAProxy] ", + }, + Object { + "field": "source.address", + "highlights": Array [], + "value": "1.2.3.4", + }, + Object { + "constant": ":", + }, + Object { + "field": "source.port", + "highlights": Array [], + "value": "40780", + }, + Object { + "constant": " ", + }, + Object { + "field": "haproxy.frontend_name", + "highlights": Array [], + "value": "main", + }, +] +`); + }); + + test('haproxy tcp log', () => { + const flattenedDocument = { + 'ecs.version': '1.0.0-beta2', + 'event.dataset': 'haproxy.log', + 'event.duration': 1000000, + 'event.module': 'haproxy', + 'fileset.name': 'log', + 'haproxy.backend_name': 'app', + 'haproxy.backend_queue': 0, + 'haproxy.bytes_read': 212, + 'haproxy.connection_wait_time_ms': -1, + 'haproxy.connections.active': 1, + 'haproxy.connections.backend': 0, + 'haproxy.connections.frontend': 1, + 'haproxy.connections.retries': 0, + 'haproxy.connections.server': 0, + 'haproxy.frontend_name': 'main', + 'haproxy.server_name': '', + 'haproxy.server_queue': 0, + 'haproxy.source': '127.0.0.1', + 'haproxy.termination_state': 'SC', + 'haproxy.total_waiting_time_ms': -1, + 'input.type': 'log', + 'log.offset': 0, + 'process.name': 'haproxy', + 'process.pid': 25457, + 'service.type': 'haproxy', + 'source.address': '127.0.0.1', + 'source.ip': '127.0.0.1', + 'source.port': 40962, + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[HAProxy][tcp] ", + }, + Object { + "field": "source.address", + "highlights": Array [], + "value": "127.0.0.1", + }, + Object { + "constant": ":", + }, + Object { + "field": "source.port", + "highlights": Array [], + "value": "40962", + }, + Object { + "constant": " ", + }, + Object { + "field": "haproxy.frontend_name", + "highlights": Array [], + "value": "main", + }, + Object { + "constant": " -> ", + }, + Object { + "field": "haproxy.backend_name", + "highlights": Array [], + "value": "app", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.server_name", + "highlights": Array [], + "value": "", + }, + Object { + "constant": " ", + }, + Object { + "field": "haproxy.connections.active", + "highlights": Array [], + "value": "1", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.connections.frontend", + "highlights": Array [], + "value": "1", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.connections.backend", + "highlights": Array [], + "value": "0", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.connections.server", + "highlights": Array [], + "value": "0", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.connections.retries", + "highlights": Array [], + "value": "0", + }, + Object { + "constant": " ", + }, + Object { + "field": "haproxy.server_queue", + "highlights": Array [], + "value": "0", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.backend_queue", + "highlights": Array [], + "value": "0", + }, +] +`); + }); + + test('haproxy http log', () => { + const flattenedDocument = { + 'ecs.version': '1.0.0-beta2', + 'event.dataset': 'haproxy.log', + 'event.duration': 2000000, + 'event.module': 'haproxy', + 'fileset.name': 'log', + 'haproxy.backend_name': 'docs_microservice', + 'haproxy.backend_queue': 0, + 'haproxy.bytes_read': 168, + 'haproxy.connection_wait_time_ms': 1, + 'haproxy.connections.active': 6, + 'haproxy.connections.backend': 0, + 'haproxy.connections.frontend': 6, + 'haproxy.connections.retries': 0, + 'haproxy.connections.server': 0, + 'haproxy.frontend_name': 'incoming~', + 'haproxy.http.request.captured_cookie': '-', + 'haproxy.http.request.captured_headers': ['docs.example.internal'], + 'haproxy.http.request.raw_request_line': + 'GET /component---src-pages-index-js-4b15624544f97cf0bb8f.js HTTP/1.1', + 'haproxy.http.request.time_wait_ms': 0, + 'haproxy.http.request.time_wait_without_data_ms': 0, + 'haproxy.http.response.captured_cookie': '-', + 'haproxy.http.response.captured_headers': [], + 'haproxy.server_name': 'docs', + 'haproxy.server_queue': 0, + 'haproxy.termination_state': '----', + 'haproxy.total_waiting_time_ms': 0, + 'http.response.bytes': 168, + 'http.response.status_code': 304, + 'input.type': 'log', + 'log.offset': 0, + 'process.name': 'haproxy', + 'process.pid': 32450, + 'service.type': 'haproxy', + 'source.address': '1.2.3.4', + 'source.geo.continent_name': 'North America', + 'source.geo.country_iso_code': 'US', + 'source.geo.location.lat': 37.751, + 'source.geo.location.lon': -97.822, + 'source.ip': '1.2.3.4', + 'source.port': 38862, + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[HAProxy][http] ", + }, + Object { + "field": "source.address", + "highlights": Array [], + "value": "1.2.3.4", + }, + Object { + "constant": ":", + }, + Object { + "field": "source.port", + "highlights": Array [], + "value": "38862", + }, + Object { + "constant": " ", + }, + Object { + "field": "haproxy.frontend_name", + "highlights": Array [], + "value": "incoming~", + }, + Object { + "constant": " -> ", + }, + Object { + "field": "haproxy.backend_name", + "highlights": Array [], + "value": "docs_microservice", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.server_name", + "highlights": Array [], + "value": "docs", + }, + Object { + "constant": " \\"", + }, + Object { + "field": "haproxy.http.request.raw_request_line", + "highlights": Array [], + "value": "GET /component---src-pages-index-js-4b15624544f97cf0bb8f.js HTTP/1.1", + }, + Object { + "constant": "\\" ", + }, + Object { + "field": "http.response.status_code", + "highlights": Array [], + "value": "304", + }, + Object { + "constant": " ", + }, + Object { + "field": "haproxy.http.request.time_wait_ms", + "highlights": Array [], + "value": "0", + }, + Object { + "constant": "/", + }, + Object { + "field": "event.duration", + "highlights": Array [], + "value": "2000000", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.connection_wait_time_ms", + "highlights": Array [], + "value": "1", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.http.request.time_wait_without_data_ms", + "highlights": Array [], + "value": "0", + }, + Object { + "constant": "/", + }, + Object { + "field": "event.duration", + "highlights": Array [], + "value": "2000000", + }, + Object { + "constant": " ", + }, + Object { + "field": "haproxy.connections.active", + "highlights": Array [], + "value": "6", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.connections.frontend", + "highlights": Array [], + "value": "6", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.connections.backend", + "highlights": Array [], + "value": "0", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.connections.server", + "highlights": Array [], + "value": "0", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.connections.retries", + "highlights": Array [], + "value": "0", + }, + Object { + "constant": " ", + }, + Object { + "field": "haproxy.server_queue", + "highlights": Array [], + "value": "0", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.backend_queue", + "highlights": Array [], + "value": "0", + }, +] +`); + }); + }); + + describe('in pre-ECS format', () => { + test('haproxy default log', () => { + const flattenedDocument = { + 'event.dataset': 'haproxy.log', + 'fileset.module': 'haproxy', + 'fileset.name': 'log', + 'haproxy.client.ip': '1.2.3.4', + 'haproxy.client.port': '40780', + 'haproxy.destination.ip': '1.2.3.4', + 'haproxy.destination.port': '5000', + 'haproxy.frontend_name': 'main', + 'haproxy.geoip.continent_name': 'North America', + 'haproxy.geoip.country_iso_code': 'US', + 'haproxy.geoip.location.lat': 37.751, + 'haproxy.geoip.location.lon': -97.822, + 'haproxy.mode': 'HTTP', + 'haproxy.pid': '24551', + 'haproxy.process_name': 'haproxy', + 'haproxy.source': '1.2.3.4', + 'input.type': 'log', + offset: 0, + 'prospector.type': 'log', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[HAProxy] ", + }, + Object { + "field": "haproxy.client.ip", + "highlights": Array [], + "value": "1.2.3.4", + }, + Object { + "constant": ":", + }, + Object { + "field": "haproxy.client.port", + "highlights": Array [], + "value": "40780", + }, + Object { + "constant": " ", + }, + Object { + "field": "haproxy.frontend_name", + "highlights": Array [], + "value": "main", + }, +] +`); + }); + + test('haproxy tcp log', () => { + const flattenedDocument = { + 'event.dataset': 'haproxy.log', + 'fileset.module': 'haproxy', + 'fileset.name': 'log', + 'haproxy.backend_name': 'app', + 'haproxy.backend_queue': 0, + 'haproxy.bytes_read': 212, + 'haproxy.client.ip': '127.0.0.1', + 'haproxy.client.port': 40962, + 'haproxy.connection_wait_time_ms': -1, + 'haproxy.connections.active': 1, + 'haproxy.connections.backend': 0, + 'haproxy.connections.frontend': 1, + 'haproxy.connections.retries': 0, + 'haproxy.connections.server': 0, + 'haproxy.frontend_name': 'main', + 'haproxy.pid': 25457, + 'haproxy.process_name': 'haproxy', + 'haproxy.server_name': '', + 'haproxy.server_queue': 0, + 'haproxy.source': '127.0.0.1', + 'haproxy.tcp.processing_time_ms': 0, + 'haproxy.termination_state': 'SC', + 'haproxy.total_waiting_time_ms': -1, + 'input.type': 'log', + offset: 0, + 'prospector.type': 'log', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[HAProxy][tcp] ", + }, + Object { + "field": "haproxy.client.ip", + "highlights": Array [], + "value": "127.0.0.1", + }, + Object { + "constant": ":", + }, + Object { + "field": "haproxy.client.port", + "highlights": Array [], + "value": "40962", + }, + Object { + "constant": " ", + }, + Object { + "field": "haproxy.frontend_name", + "highlights": Array [], + "value": "main", + }, + Object { + "constant": " -> ", + }, + Object { + "field": "haproxy.backend_name", + "highlights": Array [], + "value": "app", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.server_name", + "highlights": Array [], + "value": "", + }, + Object { + "constant": " ", + }, + Object { + "field": "haproxy.connections.active", + "highlights": Array [], + "value": "1", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.connections.frontend", + "highlights": Array [], + "value": "1", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.connections.backend", + "highlights": Array [], + "value": "0", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.connections.server", + "highlights": Array [], + "value": "0", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.connections.retries", + "highlights": Array [], + "value": "0", + }, + Object { + "constant": " ", + }, + Object { + "field": "haproxy.server_queue", + "highlights": Array [], + "value": "0", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.backend_queue", + "highlights": Array [], + "value": "0", + }, +] +`); + }); + + test('haproxy http log', () => { + const flattenedDocument = { + 'event.dataset': 'haproxy.log', + 'fileset.module': 'haproxy', + 'fileset.name': 'log', + 'haproxy.backend_name': 'docs_microservice', + 'haproxy.backend_queue': 0, + 'haproxy.bytes_read': 168, + 'haproxy.client.ip': '1.2.3.4', + 'haproxy.client.port': 38862, + 'haproxy.connection_wait_time_ms': 1, + 'haproxy.connections.active': 6, + 'haproxy.connections.backend': 0, + 'haproxy.connections.frontend': 6, + 'haproxy.connections.retries': 0, + 'haproxy.connections.server': 0, + 'haproxy.frontend_name': 'incoming~', + 'haproxy.geoip.continent_name': 'North America', + 'haproxy.geoip.country_iso_code': 'US', + 'haproxy.geoip.location.lat': 37.751, + 'haproxy.geoip.location.lon': -97.822, + 'haproxy.http.request.captured_cookie': '-', + 'haproxy.http.request.raw_request_line': + 'GET /component---src-pages-index-js-4b15624544f97cf0bb8f.js HTTP/1.1', + 'haproxy.http.request.time_active_ms': 2, + 'haproxy.http.request.time_wait_ms': 0, + 'haproxy.http.request.time_wait_without_data_ms': 0, + 'haproxy.http.response.captured_cookie': '-', + 'haproxy.http.response.status_code': 304, + 'haproxy.pid': 32450, + 'haproxy.process_name': 'haproxy', + 'haproxy.server_name': 'docs', + 'haproxy.server_queue': 0, + 'haproxy.termination_state': '----', + 'haproxy.total_waiting_time_ms': 0, + 'input.type': 'log', + offset: 0, + 'prospector.type': 'log', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[HAProxy][http] ", + }, + Object { + "field": "haproxy.client.ip", + "highlights": Array [], + "value": "1.2.3.4", + }, + Object { + "constant": ":", + }, + Object { + "field": "haproxy.client.port", + "highlights": Array [], + "value": "38862", + }, + Object { + "constant": " ", + }, + Object { + "field": "haproxy.frontend_name", + "highlights": Array [], + "value": "incoming~", + }, + Object { + "constant": " -> ", + }, + Object { + "field": "haproxy.backend_name", + "highlights": Array [], + "value": "docs_microservice", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.server_name", + "highlights": Array [], + "value": "docs", + }, + Object { + "constant": " \\"", + }, + Object { + "field": "haproxy.http.request.raw_request_line", + "highlights": Array [], + "value": "GET /component---src-pages-index-js-4b15624544f97cf0bb8f.js HTTP/1.1", + }, + Object { + "constant": "\\" ", + }, + Object { + "field": "haproxy.http.response.status_code", + "highlights": Array [], + "value": "304", + }, + Object { + "constant": " ", + }, + Object { + "field": "haproxy.http.request.time_wait_ms", + "highlights": Array [], + "value": "0", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.total_waiting_time_ms", + "highlights": Array [], + "value": "0", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.connection_wait_time_ms", + "highlights": Array [], + "value": "1", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.http.request.time_wait_without_data_ms", + "highlights": Array [], + "value": "0", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.http.request.time_active_ms", + "highlights": Array [], + "value": "2", + }, + Object { + "constant": " ", + }, + Object { + "field": "haproxy.connections.active", + "highlights": Array [], + "value": "6", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.connections.frontend", + "highlights": Array [], + "value": "6", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.connections.backend", + "highlights": Array [], + "value": "0", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.connections.server", + "highlights": Array [], + "value": "0", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.connections.retries", + "highlights": Array [], + "value": "0", + }, + Object { + "constant": " ", + }, + Object { + "field": "haproxy.server_queue", + "highlights": Array [], + "value": "0", + }, + Object { + "constant": "/", + }, + Object { + "field": "haproxy.backend_queue", + "highlights": Array [], + "value": "0", + }, +] +`); + }); + }); +}); diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_haproxy.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_haproxy.ts new file mode 100644 index 0000000000000..97836b0a8186f --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_haproxy.ts @@ -0,0 +1,329 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +const ecsFrontendFields = [ + { + field: 'source.address', + }, + { + constant: ':', + }, + { + field: 'source.port', + }, + { + constant: ' ', + }, + { + field: 'haproxy.frontend_name', + }, +]; + +const preEcsFrontendFields = [ + { + field: 'haproxy.client.ip', + }, + { + constant: ':', + }, + { + field: 'haproxy.client.port', + }, + { + constant: ' ', + }, + { + field: 'haproxy.frontend_name', + }, +]; + +const commonBackendFields = [ + { + constant: ' -> ', + }, + { + field: 'haproxy.backend_name', + }, + { + constant: '/', + }, + { + field: 'haproxy.server_name', + }, +]; + +const commonConnectionStatsFields = [ + { + field: 'haproxy.connections.active', + }, + { + constant: '/', + }, + { + field: 'haproxy.connections.frontend', + }, + { + constant: '/', + }, + { + field: 'haproxy.connections.backend', + }, + { + constant: '/', + }, + { + field: 'haproxy.connections.server', + }, + { + constant: '/', + }, + { + field: 'haproxy.connections.retries', + }, +]; + +const commonQueueStatsFields = [ + { + field: 'haproxy.server_queue', + }, + { + constant: '/', + }, + { + field: 'haproxy.backend_queue', + }, +]; + +export const filebeatHaproxyRules = [ + { + // ECS + when: { + exists: ['ecs.version', 'haproxy.http.request.raw_request_line'], + }, + format: [ + { + constant: '[HAProxy][http] ', + }, + ...ecsFrontendFields, + ...commonBackendFields, + { + constant: ' "', + }, + { + field: 'haproxy.http.request.raw_request_line', + }, + { + constant: '" ', + }, + { + field: 'http.response.status_code', + }, + { + constant: ' ', + }, + { + field: 'haproxy.http.request.time_wait_ms', + }, + { + constant: '/', + }, + { + field: 'event.duration', + }, + { + constant: '/', + }, + { + field: 'haproxy.connection_wait_time_ms', + }, + { + constant: '/', + }, + { + field: 'haproxy.http.request.time_wait_without_data_ms', + }, + { + constant: '/', + }, + { + field: 'event.duration', + }, + { + constant: ' ', + }, + ...commonConnectionStatsFields, + { + constant: ' ', + }, + ...commonQueueStatsFields, + ], + }, + { + // ECS + when: { + exists: ['ecs.version', 'haproxy.connections.active'], + }, + format: [ + { + constant: '[HAProxy][tcp] ', + }, + ...ecsFrontendFields, + ...commonBackendFields, + { + constant: ' ', + }, + ...commonConnectionStatsFields, + { + constant: ' ', + }, + ...commonQueueStatsFields, + ], + }, + { + // ECS + when: { + exists: ['ecs.version', 'haproxy.error_message'], + }, + format: [ + { + constant: '[HAProxy] ', + }, + ...ecsFrontendFields, + { + constant: ' ', + }, + { + field: 'haproxy.error_message', + }, + ], + }, + { + // ECS + when: { + exists: ['ecs.version', 'haproxy.frontend_name'], + }, + format: [ + { + constant: '[HAProxy] ', + }, + ...ecsFrontendFields, + ], + }, + { + // pre-ECS + when: { + exists: ['haproxy.http.request.raw_request_line'], + }, + format: [ + { + constant: '[HAProxy][http] ', + }, + ...preEcsFrontendFields, + ...commonBackendFields, + { + constant: ' "', + }, + { + field: 'haproxy.http.request.raw_request_line', + }, + { + constant: '" ', + }, + { + field: 'haproxy.http.response.status_code', + }, + { + constant: ' ', + }, + { + field: 'haproxy.http.request.time_wait_ms', + }, + { + constant: '/', + }, + { + field: 'haproxy.total_waiting_time_ms', + }, + { + constant: '/', + }, + { + field: 'haproxy.connection_wait_time_ms', + }, + { + constant: '/', + }, + { + field: 'haproxy.http.request.time_wait_without_data_ms', + }, + { + constant: '/', + }, + { + field: 'haproxy.http.request.time_active_ms', + }, + { + constant: ' ', + }, + ...commonConnectionStatsFields, + { + constant: ' ', + }, + ...commonQueueStatsFields, + ], + }, + { + // pre-ECS + when: { + exists: ['haproxy.connections.active'], + }, + format: [ + { + constant: '[HAProxy][tcp] ', + }, + ...preEcsFrontendFields, + ...commonBackendFields, + { + constant: ' ', + }, + ...commonConnectionStatsFields, + { + constant: ' ', + }, + ...commonQueueStatsFields, + ], + }, + { + // pre-ECS + when: { + exists: ['haproxy.error_message'], + }, + format: [ + { + constant: '[HAProxy] ', + }, + ...preEcsFrontendFields, + { + constant: ' ', + }, + { + field: 'haproxy.error_message', + }, + ], + }, + { + // pre-ECS + when: { + exists: ['haproxy.frontend_name'], + }, + format: [ + { + constant: '[HAProxy] ', + }, + ...preEcsFrontendFields, + ], + }, +]; diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_icinga.test.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_icinga.test.ts new file mode 100644 index 0000000000000..120137f15b883 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_icinga.test.ts @@ -0,0 +1,147 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { compileFormattingRules } from '../message'; +import { filebeatIcingaRules } from './filebeat_icinga'; + +const { format } = compileFormattingRules(filebeatIcingaRules); + +describe('Filebeat Rules', () => { + describe('in pre-ECS format', () => { + test('icinga debug log', () => { + const flattenedDocument = { + '@timestamp': '2017-04-04T11:43:09.000Z', + 'event.dataset': 'icinga.debug', + 'fileset.module': 'icinga', + 'fileset.name': 'debug', + 'icinga.debug.facility': 'GraphiteWriter', + 'icinga.debug.message': + "Add to metric list:'icinga2.demo.services.procs.procs.perfdata.procs.warn 250 1491306189'.", + 'icinga.debug.severity': 'debug', + 'input.type': 'log', + offset: 0, + 'prospector.type': 'log', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[Icinga][", + }, + Object { + "field": "icinga.debug.facility", + "highlights": Array [], + "value": "GraphiteWriter", + }, + Object { + "constant": "][", + }, + Object { + "field": "icinga.debug.severity", + "highlights": Array [], + "value": "debug", + }, + Object { + "constant": "] ", + }, + Object { + "field": "icinga.debug.message", + "highlights": Array [], + "value": "Add to metric list:'icinga2.demo.services.procs.procs.perfdata.procs.warn 250 1491306189'.", + }, +] +`); + }); + + test('icinga main log', () => { + const flattenedDocument = { + '@timestamp': '2017-04-04T09:16:34.000Z', + 'event.dataset': 'icinga.main', + 'fileset.module': 'icinga', + 'fileset.name': 'main', + 'icinga.main.facility': 'Notification', + 'icinga.main.message': + "Sending 'Recovery' notification 'demo!load!mail-icingaadmin for user 'on-call'", + 'icinga.main.severity': 'information', + 'input.type': 'log', + offset: 0, + 'prospector.type': 'log', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[Icinga][", + }, + Object { + "field": "icinga.main.facility", + "highlights": Array [], + "value": "Notification", + }, + Object { + "constant": "][", + }, + Object { + "field": "icinga.main.severity", + "highlights": Array [], + "value": "information", + }, + Object { + "constant": "] ", + }, + Object { + "field": "icinga.main.message", + "highlights": Array [], + "value": "Sending 'Recovery' notification 'demo!load!mail-icingaadmin for user 'on-call'", + }, +] +`); + }); + + test('icinga startup log', () => { + const flattenedDocument = { + 'event.dataset': 'icinga.startup', + 'fileset.module': 'icinga', + 'fileset.name': 'startup', + 'icinga.startup.facility': 'cli', + 'icinga.startup.message': 'Icinga application loader (version: r2.6.3-1)', + 'icinga.startup.severity': 'information', + 'input.type': 'log', + offset: 0, + 'prospector.type': 'log', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[Icinga][", + }, + Object { + "field": "icinga.startup.facility", + "highlights": Array [], + "value": "cli", + }, + Object { + "constant": "][", + }, + Object { + "field": "icinga.startup.severity", + "highlights": Array [], + "value": "information", + }, + Object { + "constant": "] ", + }, + Object { + "field": "icinga.startup.message", + "highlights": Array [], + "value": "Icinga application loader (version: r2.6.3-1)", + }, +] +`); + }); + }); +}); diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_icinga.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_icinga.ts new file mode 100644 index 0000000000000..c04a746e6bf41 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_icinga.ts @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export const filebeatIcingaRules = [ + { + // pre-ECS + when: { + exists: ['icinga.main.message'], + }, + format: [ + { + constant: '[Icinga][', + }, + { + field: 'icinga.main.facility', + }, + { + constant: '][', + }, + { + field: 'icinga.main.severity', + }, + { + constant: '] ', + }, + { + field: 'icinga.main.message', + }, + ], + }, + { + // pre-ECS + when: { + exists: ['icinga.debug.message'], + }, + format: [ + { + constant: '[Icinga][', + }, + { + field: 'icinga.debug.facility', + }, + { + constant: '][', + }, + { + field: 'icinga.debug.severity', + }, + { + constant: '] ', + }, + { + field: 'icinga.debug.message', + }, + ], + }, + { + // pre-ECS + when: { + exists: ['icinga.startup.message'], + }, + format: [ + { + constant: '[Icinga][', + }, + { + field: 'icinga.startup.facility', + }, + { + constant: '][', + }, + { + field: 'icinga.startup.severity', + }, + { + constant: '] ', + }, + { + field: 'icinga.startup.message', + }, + ], + }, +]; diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_iis.test.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_iis.test.ts new file mode 100644 index 0000000000000..72449c03b63a6 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_iis.test.ts @@ -0,0 +1,562 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { getBuiltinRules } from '.'; +import { compileFormattingRules } from '../message'; + +const { format } = compileFormattingRules(getBuiltinRules([])); + +describe('Filebeat Rules', () => { + describe('in ECS format', () => { + test('iis access log', () => { + const flattenedDocument = { + '@timestamp': '2018-01-01T10:11:12.000Z', + 'destination.address': '127.0.0.1', + 'destination.domain': 'example.com', + 'destination.ip': '127.0.0.1', + 'destination.port': 80, + 'ecs.version': '1.0.0-beta2', + 'event.dataset': 'iis.access', + 'event.duration': 789000000, + 'event.module': 'iis', + 'fileset.name': 'access', + 'http.request.body.bytes': 456, + 'http.request.method': 'GET', + 'http.request.referrer': '-', + 'http.response.body.bytes': 123, + 'http.response.status_code': 200, + 'http.version': '1.1', + 'iis.access.cookie': '-', + 'iis.access.server_name': 'MACHINE-NAME', + 'iis.access.site_name': 'W3SVC1', + 'iis.access.sub_status': 0, + 'iis.access.win32_status': 0, + 'input.type': 'log', + 'log.offset': 1204, + 'service.type': 'iis', + 'source.address': '85.181.35.98', + 'source.geo.city_name': 'Berlin', + 'source.geo.continent_name': 'Europe', + 'source.geo.country_iso_code': 'DE', + 'source.geo.location.lat': 52.4908, + 'source.geo.location.lon': 13.3275, + 'source.geo.region_iso_code': 'DE-BE', + 'source.geo.region_name': 'Land Berlin', + 'source.ip': '85.181.35.98', + 'url.path': '/', + 'url.query': 'q=100', + 'user.name': '-', + 'user_agent.device.name': 'Other', + 'user_agent.name': 'Chrome', + 'user_agent.original': + 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36', + 'user_agent.os.full': 'Mac OS X 10.14.0', + 'user_agent.os.name': 'Mac OS X', + 'user_agent.os.version': '10.14.0', + 'user_agent.version': '70.0.3538', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[", + }, + Object { + "field": "event.module", + "highlights": Array [], + "value": "iis", + }, + Object { + "constant": "][access] ", + }, + Object { + "field": "source.ip", + "highlights": Array [], + "value": "85.181.35.98", + }, + Object { + "constant": " ", + }, + Object { + "field": "user.name", + "highlights": Array [], + "value": "-", + }, + Object { + "constant": " \\"", + }, + Object { + "field": "http.request.method", + "highlights": Array [], + "value": "GET", + }, + Object { + "constant": " ", + }, + Object { + "field": "url.path", + "highlights": Array [], + "value": "/", + }, + Object { + "constant": "?", + }, + Object { + "field": "url.query", + "highlights": Array [], + "value": "q=100", + }, + Object { + "constant": " HTTP/", + }, + Object { + "field": "http.version", + "highlights": Array [], + "value": "1.1", + }, + Object { + "constant": "\\" ", + }, + Object { + "field": "http.response.status_code", + "highlights": Array [], + "value": "200", + }, + Object { + "constant": " ", + }, + Object { + "field": "http.response.body.bytes", + "highlights": Array [], + "value": "123", + }, +] +`); + }); + + test('iis 7.5 access log', () => { + const flattenedDocument = { + '@timestamp': '2018-08-28T18:24:25.000Z', + 'destination.address': '10.100.220.70', + 'destination.ip': '10.100.220.70', + 'destination.port': 80, + 'ecs.version': '1.0.0-beta2', + 'event.dataset': 'iis.access', + 'event.duration': 792000000, + 'event.module': 'iis', + 'fileset.name': 'access', + 'http.request.method': 'GET', + 'http.response.status_code': 404, + 'iis.access.sub_status': 4, + 'iis.access.win32_status': 2, + 'input.type': 'log', + 'log.offset': 244, + 'service.type': 'iis', + 'source.address': '10.100.118.31', + 'source.ip': '10.100.118.31', + 'url.path': '/', + 'url.query': 'q=100', + 'user.name': '-', + 'user_agent.device.name': 'Other', + 'user_agent.name': 'IE', + 'user_agent.original': + 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.3; WOW64; Trident/7.0; .NET4.0E; .NET4.0C; .NET CLR 3.5.30729; .NET CLR[ 2.0.50727](tel: 2050727); .NET CLR 3.0.30729)', + 'user_agent.os.name': 'Windows 8.1', + 'user_agent.version': '7.0', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[", + }, + Object { + "field": "event.module", + "highlights": Array [], + "value": "iis", + }, + Object { + "constant": "][access] ", + }, + Object { + "field": "source.ip", + "highlights": Array [], + "value": "10.100.118.31", + }, + Object { + "constant": " ", + }, + Object { + "field": "user.name", + "highlights": Array [], + "value": "-", + }, + Object { + "constant": " \\"", + }, + Object { + "field": "http.request.method", + "highlights": Array [], + "value": "GET", + }, + Object { + "constant": " ", + }, + Object { + "field": "url.path", + "highlights": Array [], + "value": "/", + }, + Object { + "constant": "?", + }, + Object { + "field": "url.query", + "highlights": Array [], + "value": "q=100", + }, + Object { + "constant": " HTTP/", + }, + Object { + "field": "http.version", + "highlights": Array [], + "value": "undefined", + }, + Object { + "constant": "\\" ", + }, + Object { + "field": "http.response.status_code", + "highlights": Array [], + "value": "404", + }, + Object { + "constant": " ", + }, + Object { + "field": "http.response.body.bytes", + "highlights": Array [], + "value": "undefined", + }, +] +`); + }); + + test('iis error log', () => { + const flattenedDocument = { + '@timestamp': '2018-01-01T08:09:10.000Z', + 'destination.address': '172.31.77.6', + 'destination.ip': '172.31.77.6', + 'destination.port': 80, + 'ecs.version': '1.0.0-beta2', + 'event.dataset': 'iis.error', + 'event.module': 'iis', + 'fileset.name': 'error', + 'http.request.method': 'GET', + 'http.response.status_code': 503, + 'http.version': '1.1', + 'iis.error.queue_name': '-', + 'iis.error.reason_phrase': 'ConnLimit', + 'input.type': 'log', + 'log.offset': 186, + 'service.type': 'iis', + 'source.address': '172.31.77.6', + 'source.ip': '172.31.77.6', + 'source.port': 2094, + 'url.original': '/qos/1kbfile.txt', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[iis][error] ", + }, + Object { + "field": "source.ip", + "highlights": Array [], + "value": "172.31.77.6", + }, + Object { + "constant": " ", + }, + Object { + "field": "iis.error.reason_phrase", + "highlights": Array [], + "value": "ConnLimit", + }, +] +`); + }); + }); + + describe('in pre-ECS format', () => { + test('iis access log', () => { + const flattenedDocument = { + '@timestamp': '2018-01-01T08:09:10.000Z', + 'event.dataset': 'iis.access', + 'fileset.module': 'iis', + 'fileset.name': 'access', + 'iis.access.geoip.city_name': 'Berlin', + 'iis.access.geoip.continent_name': 'Europe', + 'iis.access.geoip.country_iso_code': 'DE', + 'iis.access.geoip.location.lat': 52.4908, + 'iis.access.geoip.location.lon': 13.3275, + 'iis.access.geoip.region_iso_code': 'DE-BE', + 'iis.access.geoip.region_name': 'Land Berlin', + 'iis.access.method': 'GET', + 'iis.access.port': '80', + 'iis.access.query_string': 'q=100', + 'iis.access.referrer': '-', + 'iis.access.remote_ip': '85.181.35.98', + 'iis.access.request_time_ms': '123', + 'iis.access.response_code': '200', + 'iis.access.server_ip': '127.0.0.1', + 'iis.access.sub_status': '0', + 'iis.access.url': '/', + 'iis.access.user_agent.device': 'Other', + 'iis.access.user_agent.major': '57', + 'iis.access.user_agent.minor': '0', + 'iis.access.user_agent.name': 'Firefox', + 'iis.access.user_agent.original': + 'Mozilla/5.0+(Windows+NT+6.1;+Win64;+x64;+rv:57.0)+Gecko/20100101+Firefox/57.0', + 'iis.access.user_agent.os': 'Windows', + 'iis.access.user_agent.os_name': 'Windows', + 'iis.access.user_name': '-', + 'iis.access.win32_status': '0', + 'input.type': 'log', + offset: 257, + 'prospector.type': 'log', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[iis][access] ", + }, + Object { + "field": "iis.access.remote_ip", + "highlights": Array [], + "value": "85.181.35.98", + }, + Object { + "constant": " ", + }, + Object { + "field": "iis.access.user_name", + "highlights": Array [], + "value": "-", + }, + Object { + "constant": " \\"", + }, + Object { + "field": "iis.access.method", + "highlights": Array [], + "value": "GET", + }, + Object { + "constant": " ", + }, + Object { + "field": "iis.access.url", + "highlights": Array [], + "value": "/", + }, + Object { + "constant": " HTTP/", + }, + Object { + "field": "iis.access.http_version", + "highlights": Array [], + "value": "undefined", + }, + Object { + "constant": "\\" ", + }, + Object { + "field": "iis.access.response_code", + "highlights": Array [], + "value": "200", + }, + Object { + "constant": " ", + }, + Object { + "field": "iis.access.body_sent.bytes", + "highlights": Array [], + "value": "undefined", + }, +] +`); + }); + + test('iis 7.5 access log', () => { + const flattenedDocument = { + '@timestamp': '2018-08-28T18:24:25.000Z', + 'event.dataset': 'iis.access', + 'fileset.module': 'iis', + 'fileset.name': 'access', + 'iis.access.method': 'GET', + 'iis.access.port': '80', + 'iis.access.query_string': '-', + 'iis.access.remote_ip': '10.100.118.31', + 'iis.access.request_time_ms': '792', + 'iis.access.response_code': '404', + 'iis.access.server_ip': '10.100.220.70', + 'iis.access.sub_status': '4', + 'iis.access.url': '/', + 'iis.access.user_agent.device': 'Other', + 'iis.access.user_agent.name': 'Other', + 'iis.access.user_agent.original': + 'Mozilla/4.0+(compatible;+MSIE+7.0;+Windows+NT+6.3;+WOW64;+Trident/7.0;+.NET4.0E;+.NET4.0C;+.NET+CLR+3.5.30729;+.NET+CLR[+2.0.50727](tel:+2050727);+.NET+CLR+3.0.30729)', + 'iis.access.user_agent.os': 'Windows', + 'iis.access.user_agent.os_name': 'Windows', + 'iis.access.user_name': '-', + 'iis.access.win32_status': '2', + 'input.type': 'log', + offset: 244, + 'prospector.type': 'log', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[iis][access] ", + }, + Object { + "field": "iis.access.remote_ip", + "highlights": Array [], + "value": "10.100.118.31", + }, + Object { + "constant": " ", + }, + Object { + "field": "iis.access.user_name", + "highlights": Array [], + "value": "-", + }, + Object { + "constant": " \\"", + }, + Object { + "field": "iis.access.method", + "highlights": Array [], + "value": "GET", + }, + Object { + "constant": " ", + }, + Object { + "field": "iis.access.url", + "highlights": Array [], + "value": "/", + }, + Object { + "constant": " HTTP/", + }, + Object { + "field": "iis.access.http_version", + "highlights": Array [], + "value": "undefined", + }, + Object { + "constant": "\\" ", + }, + Object { + "field": "iis.access.response_code", + "highlights": Array [], + "value": "404", + }, + Object { + "constant": " ", + }, + Object { + "field": "iis.access.body_sent.bytes", + "highlights": Array [], + "value": "undefined", + }, +] +`); + }); + + test('iis error log', () => { + const flattenedDocument = { + '@timestamp': '2018-01-01T08:09:10.000Z', + 'event.dataset': 'iis.error', + 'fileset.module': 'iis', + 'fileset.name': 'error', + 'iis.error.http_version': '1.1', + 'iis.error.method': 'GET', + 'iis.error.queue_name': '-', + 'iis.error.reason_phrase': 'ConnLimit', + 'iis.error.remote_ip': '172.31.77.6', + 'iis.error.remote_port': '2094', + 'iis.error.response_code': '503', + 'iis.error.server_ip': '172.31.77.6', + 'iis.error.server_port': '80', + 'iis.error.url': '/qos/1kbfile.txt', + 'input.type': 'log', + offset: 186, + 'prospector.type': 'log', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[iis][error] ", + }, + Object { + "field": "iis.error.remote_ip", + "highlights": Array [], + "value": "172.31.77.6", + }, + Object { + "constant": " \\"", + }, + Object { + "field": "iis.error.method", + "highlights": Array [], + "value": "GET", + }, + Object { + "constant": " ", + }, + Object { + "field": "iis.error.url", + "highlights": Array [], + "value": "/qos/1kbfile.txt", + }, + Object { + "constant": " HTTP/", + }, + Object { + "field": "iis.error.http_version", + "highlights": Array [], + "value": "1.1", + }, + Object { + "constant": "\\" ", + }, + Object { + "field": "iis.error.response_code", + "highlights": Array [], + "value": "503", + }, + Object { + "constant": " ", + }, + Object { + "field": "iis.error.reason_phrase", + "highlights": Array [], + "value": "ConnLimit", + }, +] +`); + }); + }); +}); diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_iis.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_iis.ts new file mode 100644 index 0000000000000..ea3485440bb74 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_iis.ts @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export const filebeatIisRules = [ + { + // pre-ECS + when: { + exists: ['iis.access.method'], + }, + format: [ + { + constant: '[iis][access] ', + }, + { + field: 'iis.access.remote_ip', + }, + { + constant: ' ', + }, + { + field: 'iis.access.user_name', + }, + { + constant: ' "', + }, + { + field: 'iis.access.method', + }, + { + constant: ' ', + }, + { + field: 'iis.access.url', + }, + { + constant: ' HTTP/', + }, + { + field: 'iis.access.http_version', + }, + { + constant: '" ', + }, + { + field: 'iis.access.response_code', + }, + { + constant: ' ', + }, + { + field: 'iis.access.body_sent.bytes', + }, + ], + }, + { + // pre-ECS + when: { + exists: ['iis.error.url'], + }, + format: [ + { + constant: '[iis][error] ', + }, + { + field: 'iis.error.remote_ip', + }, + { + constant: ' "', + }, + { + field: 'iis.error.method', + }, + { + constant: ' ', + }, + { + field: 'iis.error.url', + }, + { + constant: ' HTTP/', + }, + { + field: 'iis.error.http_version', + }, + { + constant: '" ', + }, + { + field: 'iis.error.response_code', + }, + { + constant: ' ', + }, + { + field: 'iis.error.reason_phrase', + }, + ], + }, + { + // ECS + when: { + exists: ['ecs.version', 'iis.error.reason_phrase'], + }, + format: [ + { + constant: '[iis][error] ', + }, + { + field: 'source.ip', + }, + { + constant: ' ', + }, + { + field: 'iis.error.reason_phrase', + }, + ], + }, + { + // pre-ECS + when: { + exists: ['iis.error.reason_phrase'], + }, + format: [ + { + constant: '[iis][error] ', + }, + { + field: 'iis.error.remote_ip', + }, + { + constant: ' ', + }, + { + field: 'iis.error.reason_phrase', + }, + ], + }, +]; diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_kafka.test.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_kafka.test.ts new file mode 100644 index 0000000000000..19cb5f6e31118 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_kafka.test.ts @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { getBuiltinRules } from '.'; +import { compileFormattingRules } from '../message'; + +const { format } = compileFormattingRules(getBuiltinRules([])); + +describe('Filebeat Rules', () => { + describe('in ECS format', () => { + test('kafka log', () => { + const flattenedDocument = { + '@timestamp': '2017-08-04T10:48:21.063Z', + 'ecs.version': '1.0.0-beta2', + 'event.dataset': 'kafka.log', + 'event.module': 'kafka', + 'fileset.name': 'log', + 'input.type': 'log', + 'kafka.log.class': 'kafka.controller.KafkaController', + 'kafka.log.component': 'Controller 0', + 'log.level': 'INFO', + 'log.offset': 131, + message: '0 successfully elected as the controller', + 'service.type': 'kafka', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[", + }, + Object { + "field": "event.dataset", + "highlights": Array [], + "value": "kafka.log", + }, + Object { + "constant": "][", + }, + Object { + "field": "log.level", + "highlights": Array [], + "value": "INFO", + }, + Object { + "constant": "] ", + }, + Object { + "field": "message", + "highlights": Array [], + "value": "0 successfully elected as the controller", + }, +] +`); + }); + }); +}); diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_logstash.test.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_logstash.test.ts new file mode 100644 index 0000000000000..edc534d9c345f --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_logstash.test.ts @@ -0,0 +1,206 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { getBuiltinRules } from '.'; +import { compileFormattingRules } from '../message'; + +const { format } = compileFormattingRules(getBuiltinRules([])); + +describe('Filebeat Rules', () => { + describe('in ECS format', () => { + test('logstash log', () => { + const flattenedDocument = { + '@timestamp': '2017-10-23T14:20:12.046Z', + 'ecs.version': '1.0.0-beta2', + 'event.dataset': 'logstash.log', + 'event.module': 'logstash', + 'fileset.name': 'log', + 'input.type': 'log', + 'log.level': 'INFO', + 'log.offset': 0, + 'logstash.log.module': 'logstash.modules.scaffold', + message: + 'Initializing module {:module_name=>"fb_apache", :directory=>"/usr/share/logstash/modules/fb_apache/configuration"}', + 'service.type': 'logstash', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[", + }, + Object { + "field": "event.dataset", + "highlights": Array [], + "value": "logstash.log", + }, + Object { + "constant": "][", + }, + Object { + "field": "log.level", + "highlights": Array [], + "value": "INFO", + }, + Object { + "constant": "] ", + }, + Object { + "field": "message", + "highlights": Array [], + "value": "Initializing module {:module_name=>\\"fb_apache\\", :directory=>\\"/usr/share/logstash/modules/fb_apache/configuration\\"}", + }, +] +`); + }); + + test('logstash slowlog', () => { + const flattenedDocument = { + '@timestamp': '2017-10-30T09:57:58.243Z', + 'ecs.version': '1.0.0-beta2', + 'event.dataset': 'logstash.slowlog', + 'event.duration': 3027675106, + 'event.module': 'logstash', + 'fileset.name': 'slowlog', + 'input.type': 'log', + 'log.level': 'WARN', + 'log.offset': 0, + 'logstash.slowlog': { + event: + '"{\\"@version\\":\\"1\\",\\"@timestamp\\":\\"2017-10-30T13:57:55.130Z\\",\\"host\\":\\"sashimi\\",\\"sequence\\":0,\\"message\\":\\"Hello world!\\"}"', + module: 'slowlog.logstash.filters.sleep', + plugin_name: 'sleep', + plugin_params: + '{"time"=>3, "id"=>"e4e12a4e3082615c5427079bf4250dbfa338ebac10f8ea9912d7b98a14f56b8c"}', + plugin_type: 'filters', + took_in_millis: 3027, + }, + 'service.type': 'logstash', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[Logstash][", + }, + Object { + "field": "log.level", + "highlights": Array [], + "value": "WARN", + }, + Object { + "constant": "] ", + }, + Object { + "field": "logstash.slowlog", + "highlights": Array [], + "value": "{\\"event\\":\\"\\\\\\"{\\\\\\\\\\\\\\"@version\\\\\\\\\\\\\\":\\\\\\\\\\\\\\"1\\\\\\\\\\\\\\",\\\\\\\\\\\\\\"@timestamp\\\\\\\\\\\\\\":\\\\\\\\\\\\\\"2017-10-30T13:57:55.130Z\\\\\\\\\\\\\\",\\\\\\\\\\\\\\"host\\\\\\\\\\\\\\":\\\\\\\\\\\\\\"sashimi\\\\\\\\\\\\\\",\\\\\\\\\\\\\\"sequence\\\\\\\\\\\\\\":0,\\\\\\\\\\\\\\"message\\\\\\\\\\\\\\":\\\\\\\\\\\\\\"Hello world!\\\\\\\\\\\\\\"}\\\\\\"\\",\\"module\\":\\"slowlog.logstash.filters.sleep\\",\\"plugin_name\\":\\"sleep\\",\\"plugin_params\\":\\"{\\\\\\"time\\\\\\"=>3, \\\\\\"id\\\\\\"=>\\\\\\"e4e12a4e3082615c5427079bf4250dbfa338ebac10f8ea9912d7b98a14f56b8c\\\\\\"}\\",\\"plugin_type\\":\\"filters\\",\\"took_in_millis\\":3027}", + }, +] +`); + }); + }); + + describe('in pre-ECS format', () => { + test('logstash log', () => { + const flattenedDocument = { + '@timestamp': '2017-10-23T14:20:12.046Z', + 'event.dataset': 'logstash.log', + 'fileset.module': 'logstash', + 'fileset.name': 'log', + 'input.type': 'log', + 'logstash.log.level': 'INFO', + 'logstash.log.message': + 'Initializing module {:module_name=>"fb_apache", :directory=>"/usr/share/logstash/modules/fb_apache/configuration"}', + 'logstash.log.module': 'logstash.modules.scaffold', + offset: 0, + 'prospector.type': 'log', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[Logstash][", + }, + Object { + "field": "logstash.log.level", + "highlights": Array [], + "value": "INFO", + }, + Object { + "constant": "] ", + }, + Object { + "field": "logstash.log.module", + "highlights": Array [], + "value": "logstash.modules.scaffold", + }, + Object { + "constant": " - ", + }, + Object { + "field": "logstash.log.message", + "highlights": Array [], + "value": "Initializing module {:module_name=>\\"fb_apache\\", :directory=>\\"/usr/share/logstash/modules/fb_apache/configuration\\"}", + }, +] +`); + }); + + test('logstash slowlog', () => { + const flattenedDocument = { + '@timestamp': '2017-10-30T09:57:58.243Z', + 'event.dataset': 'logstash.slowlog', + 'fileset.module': 'logstash', + 'fileset.name': 'slowlog', + 'input.type': 'log', + 'logstash.slowlog.event': + '"{\\"@version\\":\\"1\\",\\"@timestamp\\":\\"2017-10-30T13:57:55.130Z\\",\\"host\\":\\"sashimi\\",\\"sequence\\":0,\\"message\\":\\"Hello world!\\"}"', + 'logstash.slowlog.level': 'WARN', + 'logstash.slowlog.message': + 'event processing time {:plugin_params=>{"time"=>3, "id"=>"e4e12a4e3082615c5427079bf4250dbfa338ebac10f8ea9912d7b98a14f56b8c"}, :took_in_nanos=>3027675106, :took_in_millis=>3027, :event=>"{\\"@version\\":\\"1\\",\\"@timestamp\\":\\"2017-10-30T13:57:55.130Z\\",\\"host\\":\\"sashimi\\",\\"sequence\\":0,\\"message\\":\\"Hello world!\\"}"}', + 'logstash.slowlog.module': 'slowlog.logstash.filters.sleep', + 'logstash.slowlog.plugin_name': 'sleep', + 'logstash.slowlog.plugin_params': + '{"time"=>3, "id"=>"e4e12a4e3082615c5427079bf4250dbfa338ebac10f8ea9912d7b98a14f56b8c"}', + 'logstash.slowlog.plugin_type': 'filters', + 'logstash.slowlog.took_in_millis': 3027, + 'logstash.slowlog.took_in_nanos': 3027675106, + offset: 0, + 'prospector.type': 'log', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[Logstash][", + }, + Object { + "field": "logstash.slowlog.level", + "highlights": Array [], + "value": "WARN", + }, + Object { + "constant": "] ", + }, + Object { + "field": "logstash.slowlog.module", + "highlights": Array [], + "value": "slowlog.logstash.filters.sleep", + }, + Object { + "constant": " - ", + }, + Object { + "field": "logstash.slowlog.message", + "highlights": Array [], + "value": "event processing time {:plugin_params=>{\\"time\\"=>3, \\"id\\"=>\\"e4e12a4e3082615c5427079bf4250dbfa338ebac10f8ea9912d7b98a14f56b8c\\"}, :took_in_nanos=>3027675106, :took_in_millis=>3027, :event=>\\"{\\\\\\"@version\\\\\\":\\\\\\"1\\\\\\",\\\\\\"@timestamp\\\\\\":\\\\\\"2017-10-30T13:57:55.130Z\\\\\\",\\\\\\"host\\\\\\":\\\\\\"sashimi\\\\\\",\\\\\\"sequence\\\\\\":0,\\\\\\"message\\\\\\":\\\\\\"Hello world!\\\\\\"}\\"}", + }, +] +`); + }); + }); +}); diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_logstash.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_logstash.ts new file mode 100644 index 0000000000000..39b2058ca7cdb --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_logstash.ts @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export const filebeatLogstashRules = [ + { + // pre-ECS + when: { + exists: ['logstash.log.message'], + }, + format: [ + { + constant: '[Logstash][', + }, + { + field: 'logstash.log.level', + }, + { + constant: '] ', + }, + { + field: 'logstash.log.module', + }, + { + constant: ' - ', + }, + { + field: 'logstash.log.message', + }, + ], + }, + { + // ECS + when: { + exists: ['ecs.version', 'logstash.slowlog'], + }, + format: [ + { + constant: '[Logstash][', + }, + { + field: 'log.level', + }, + { + constant: '] ', + }, + { + field: 'logstash.slowlog', + }, + ], + }, + { + // pre-ECS + when: { + exists: ['logstash.slowlog.message'], + }, + format: [ + { + constant: '[Logstash][', + }, + { + field: 'logstash.slowlog.level', + }, + { + constant: '] ', + }, + { + field: 'logstash.slowlog.module', + }, + { + constant: ' - ', + }, + { + field: 'logstash.slowlog.message', + }, + ], + }, +]; diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mongodb.test.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mongodb.test.ts new file mode 100644 index 0000000000000..3df7ebec241cc --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mongodb.test.ts @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { compileFormattingRules } from '../message'; +import { filebeatMongodbRules } from './filebeat_mongodb'; + +const { format } = compileFormattingRules(filebeatMongodbRules); + +describe('Filebeat Rules', () => { + describe('in pre-ECS format', () => { + test('mongodb log', () => { + const flattenedDocument = { + '@timestamp': '2018-02-05T12:44:56.677Z', + 'event.dataset': 'mongodb.log', + 'fileset.module': 'mongodb', + 'fileset.name': 'log', + 'input.type': 'log', + 'mongodb.log.component': 'STORAGE', + 'mongodb.log.context': 'initandlisten', + 'mongodb.log.message': + 'wiredtiger_open config: create,cache_size=8G,session_max=20000,eviction=(threads_max=4),config_base=false,statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000),checkpoint=(wait=60,log_size=2GB),statistics_log=(wait=0),', + 'mongodb.log.severity': 'I', + offset: 281, + 'prospector.type': 'log', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[MongoDB][", + }, + Object { + "field": "mongodb.log.component", + "highlights": Array [], + "value": "STORAGE", + }, + Object { + "constant": "] ", + }, + Object { + "field": "mongodb.log.message", + "highlights": Array [], + "value": "wiredtiger_open config: create,cache_size=8G,session_max=20000,eviction=(threads_max=4),config_base=false,statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000),checkpoint=(wait=60,log_size=2GB),statistics_log=(wait=0),", + }, +] +`); + }); + }); +}); diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mongodb.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mongodb.ts new file mode 100644 index 0000000000000..06a4964875898 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mongodb.ts @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export const filebeatMongodbRules = [ + { + // pre-ECS + when: { + exists: ['mongodb.log.message'], + }, + format: [ + { + constant: '[MongoDB][', + }, + { + field: 'mongodb.log.component', + }, + { + constant: '] ', + }, + { + field: 'mongodb.log.message', + }, + ], + }, +]; diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mysql.test.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mysql.test.ts new file mode 100644 index 0000000000000..0329d53f92d08 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mysql.test.ts @@ -0,0 +1,219 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { getBuiltinRules } from '.'; +import { compileFormattingRules } from '../message'; + +const { format } = compileFormattingRules(getBuiltinRules([])); + +describe('Filebeat Rules', () => { + describe('in ECS format', () => { + test('mysql error log', () => { + const flattenedDocument = { + '@timestamp': '2016-12-09T12:08:33.335Z', + 'ecs.version': '1.0.0-beta2', + 'event.dataset': 'mysql.error', + 'event.module': 'mysql', + 'fileset.name': 'error', + 'input.type': 'log', + 'log.level': 'Warning', + 'log.offset': 92, + message: + 'TIMESTAMP with implicit DEFAULT value is deprecated. Please use --explicit_defaults_for_timestamp server option (see documentation for more details).', + 'mysql.thread_id': 0, + 'service.type': 'mysql', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[", + }, + Object { + "field": "event.dataset", + "highlights": Array [], + "value": "mysql.error", + }, + Object { + "constant": "][", + }, + Object { + "field": "log.level", + "highlights": Array [], + "value": "Warning", + }, + Object { + "constant": "] ", + }, + Object { + "field": "message", + "highlights": Array [], + "value": "TIMESTAMP with implicit DEFAULT value is deprecated. Please use --explicit_defaults_for_timestamp server option (see documentation for more details).", + }, +] +`); + }); + + test('mysql slowlog', () => { + const flattenedDocument = { + '@timestamp': '2018-08-07T08:27:47.000Z', + 'ecs.version': '1.0.0-beta2', + 'event.dataset': 'mysql.slowlog', + 'event.duration': 4071491000, + 'event.module': 'mysql', + 'fileset.name': 'slowlog', + 'input.type': 'log', + 'log.flags': ['multiline'], + 'log.offset': 526, + 'mysql.slowlog.current_user': 'appuser', + 'mysql.slowlog.lock_time.sec': 0.000212, + 'mysql.slowlog.query': + 'SELECT mcu.mcu_guid, mcu.cus_guid, mcu.mcu_url, mcu.mcu_crawlelements, mcu.mcu_order, GROUP_CONCAT(mca.mca_guid SEPARATOR ";") as mca_guid\n FROM kat_mailcustomerurl mcu, kat_customer cus, kat_mailcampaign mca\n WHERE cus.cus_guid = mcu.cus_guid\n AND cus.pro_code = \'CYB\'\n AND cus.cus_offline = 0\n AND mca.cus_guid = cus.cus_guid\n AND (mcu.mcu_date IS NULL OR mcu.mcu_date < CURDATE())\n AND mcu.mcu_crawlelements IS NOT NULL\n GROUP BY mcu.mcu_guid\n ORDER BY mcu.mcu_order ASC\n LIMIT 1000;', + 'mysql.slowlog.rows_examined': 1489615, + 'mysql.slowlog.rows_sent': 1000, + 'mysql.thread_id': 10997316, + 'service.type': 'mysql', + 'source.domain': 'apphost', + 'source.ip': '1.1.1.1', + 'user.name': 'appuser', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[MySQL][slowlog] ", + }, + Object { + "field": "user.name", + "highlights": Array [], + "value": "appuser", + }, + Object { + "constant": "@", + }, + Object { + "field": "source.domain", + "highlights": Array [], + "value": "apphost", + }, + Object { + "constant": " [", + }, + Object { + "field": "source.ip", + "highlights": Array [], + "value": "1.1.1.1", + }, + Object { + "constant": "] ", + }, + Object { + "constant": " - ", + }, + Object { + "field": "event.duration", + "highlights": Array [], + "value": "4071491000", + }, + Object { + "constant": " ns - ", + }, + Object { + "field": "mysql.slowlog.query", + "highlights": Array [], + "value": "SELECT mcu.mcu_guid, mcu.cus_guid, mcu.mcu_url, mcu.mcu_crawlelements, mcu.mcu_order, GROUP_CONCAT(mca.mca_guid SEPARATOR \\";\\") as mca_guid + FROM kat_mailcustomerurl mcu, kat_customer cus, kat_mailcampaign mca + WHERE cus.cus_guid = mcu.cus_guid + AND cus.pro_code = 'CYB' + AND cus.cus_offline = 0 + AND mca.cus_guid = cus.cus_guid + AND (mcu.mcu_date IS NULL OR mcu.mcu_date < CURDATE()) + AND mcu.mcu_crawlelements IS NOT NULL + GROUP BY mcu.mcu_guid + ORDER BY mcu.mcu_order ASC + LIMIT 1000;", + }, +] +`); + }); + }); + + describe('in pre-ECS format', () => { + test('mysql error log', () => { + const errorDoc = { + 'mysql.error.message': + "Access denied for user 'petclinicdd'@'47.153.152.234' (using password: YES)", + }; + const message = format(errorDoc, {}); + expect(message).toEqual([ + { + constant: '[MySQL][error] ', + }, + { + field: 'mysql.error.message', + highlights: [], + value: "Access denied for user 'petclinicdd'@'47.153.152.234' (using password: YES)", + }, + ]); + }); + + test('mysql slow log', () => { + const errorDoc = { + 'mysql.slowlog.query': 'select * from hosts', + 'mysql.slowlog.query_time.sec': 5, + 'mysql.slowlog.user': 'admin', + 'mysql.slowlog.ip': '192.168.1.42', + 'mysql.slowlog.host': 'webserver-01', + }; + const message = format(errorDoc, {}); + expect(message).toEqual([ + { + constant: '[MySQL][slowlog] ', + }, + { + field: 'mysql.slowlog.user', + highlights: [], + value: 'admin', + }, + { + constant: '@', + }, + { + field: 'mysql.slowlog.host', + highlights: [], + value: 'webserver-01', + }, + { + constant: ' [', + }, + { + field: 'mysql.slowlog.ip', + highlights: [], + value: '192.168.1.42', + }, + { + constant: '] ', + }, + { + constant: ' - ', + }, + { + field: 'mysql.slowlog.query_time.sec', + highlights: [], + value: '5', + }, + { + constant: ' s - ', + }, + { + field: 'mysql.slowlog.query', + highlights: [], + value: 'select * from hosts', + }, + ]); + }); + }); +}); diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mysql.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mysql.ts new file mode 100644 index 0000000000000..e90977f9bf8fa --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mysql.ts @@ -0,0 +1,104 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export const filebeatMySQLRules = [ + { + // pre-ECS + when: { + exists: ['mysql.error.message'], + }, + format: [ + { + constant: '[MySQL][error] ', + }, + { + field: 'mysql.error.message', + }, + ], + }, + { + // ECS + when: { + exists: ['ecs.version', 'mysql.slowlog.query'], + }, + format: [ + { + constant: '[MySQL][slowlog] ', + }, + { + field: 'user.name', + }, + { + constant: '@', + }, + { + field: 'source.domain', + }, + { + constant: ' [', + }, + { + field: 'source.ip', + }, + { + constant: '] ', + }, + { + constant: ' - ', + }, + { + field: 'event.duration', + }, + { + constant: ' ns - ', + }, + { + field: 'mysql.slowlog.query', + }, + ], + }, + { + // pre-ECS + when: { + exists: ['mysql.slowlog.user', 'mysql.slowlog.query_time.sec', 'mysql.slowlog.query'], + }, + format: [ + { + constant: '[MySQL][slowlog] ', + }, + { + field: 'mysql.slowlog.user', + }, + { + constant: '@', + }, + { + field: 'mysql.slowlog.host', + }, + { + constant: ' [', + }, + { + field: 'mysql.slowlog.ip', + }, + { + constant: '] ', + }, + { + constant: ' - ', + }, + { + field: 'mysql.slowlog.query_time.sec', + }, + { + constant: ' s - ', + }, + { + field: 'mysql.slowlog.query', + }, + ], + }, +]; diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_nginx.test.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_nginx.test.ts new file mode 100644 index 0000000000000..0bc8ae1e907b8 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_nginx.test.ts @@ -0,0 +1,264 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { getBuiltinRules } from '.'; +import { compileFormattingRules } from '../message'; + +const { format } = compileFormattingRules(getBuiltinRules([])); + +describe('Filebeat Rules', () => { + describe('in ECS format', () => { + test('Nginx Access', () => { + const flattenedDocument = { + '@timestamp': '2017-05-29T19:02:48.000Z', + 'ecs.version': '1.0.0-beta2', + 'event.dataset': 'nginx.access', + 'event.module': 'nginx', + 'fileset.name': 'access', + 'http.request.method': 'GET', + 'http.request.referrer': '-', + 'http.response.body.bytes': 612, + 'http.response.status_code': 404, + 'http.version': '1.1', + 'input.type': 'log', + 'log.offset': 183, + 'service.type': 'nginx', + 'source.ip': '172.17.0.1', + 'url.original': '/stringpatch', + 'user.name': '-', + 'user_agent.device': 'Other', + 'user_agent.major': '15', + 'user_agent.minor': '0', + 'user_agent.name': 'Firefox Alpha', + 'user_agent.original': + 'Mozilla/5.0 (Windows NT 6.1; rv:15.0) Gecko/20120716 Firefox/15.0a2', + 'user_agent.os.full_name': 'Windows 7', + 'user_agent.os.name': 'Windows 7', + 'user_agent.patch': 'a2', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[", + }, + Object { + "field": "event.module", + "highlights": Array [], + "value": "nginx", + }, + Object { + "constant": "][access] ", + }, + Object { + "field": "source.ip", + "highlights": Array [], + "value": "172.17.0.1", + }, + Object { + "constant": " ", + }, + Object { + "field": "user.name", + "highlights": Array [], + "value": "-", + }, + Object { + "constant": " \\"", + }, + Object { + "field": "http.request.method", + "highlights": Array [], + "value": "GET", + }, + Object { + "constant": " ", + }, + Object { + "field": "url.original", + "highlights": Array [], + "value": "/stringpatch", + }, + Object { + "constant": " HTTP/", + }, + Object { + "field": "http.version", + "highlights": Array [], + "value": "1.1", + }, + Object { + "constant": "\\" ", + }, + Object { + "field": "http.response.status_code", + "highlights": Array [], + "value": "404", + }, + Object { + "constant": " ", + }, + Object { + "field": "http.response.body.bytes", + "highlights": Array [], + "value": "612", + }, +] +`); + }); + + test('Nginx Error', () => { + const flattenedDocument = { + '@timestamp': '2016-10-25T14:49:34.000Z', + 'ecs.version': '1.0.0-beta2', + 'event.dataset': 'nginx.error', + 'event.module': 'nginx', + 'fileset.name': 'error', + 'input.type': 'log', + 'log.level': 'error', + 'log.offset': 0, + message: + 'open() "/usr/local/Cellar/nginx/1.10.2_1/html/favicon.ico" failed (2: No such file or directory), client: 127.0.0.1, server: localhost, request: "GET /favicon.ico HTTP/1.1", host: "localhost:8080", referrer: "http://localhost:8080/"', + 'nginx.error.connection_id': 1, + 'process.pid': 54053, + 'process.thread.id': 0, + 'service.type': 'nginx', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[nginx]", + }, + Object { + "constant": "[", + }, + Object { + "field": "log.level", + "highlights": Array [], + "value": "error", + }, + Object { + "constant": "] ", + }, + Object { + "field": "message", + "highlights": Array [], + "value": "open() \\"/usr/local/Cellar/nginx/1.10.2_1/html/favicon.ico\\" failed (2: No such file or directory), client: 127.0.0.1, server: localhost, request: \\"GET /favicon.ico HTTP/1.1\\", host: \\"localhost:8080\\", referrer: \\"http://localhost:8080/\\"", + }, +] +`); + }); + }); + + describe('in pre-ECS format', () => { + test('Nginx Access', () => { + const flattenedDocument = { + 'nginx.access': true, + 'nginx.access.remote_ip': '192.168.1.42', + 'nginx.access.user_name': 'admin', + 'nginx.access.method': 'GET', + 'nginx.access.url': '/faq', + 'nginx.access.http_version': '1.1', + 'nginx.access.body_sent.bytes': 1024, + 'nginx.access.response_code': 200, + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[nginx][access] ", + }, + Object { + "field": "nginx.access.remote_ip", + "highlights": Array [], + "value": "192.168.1.42", + }, + Object { + "constant": " ", + }, + Object { + "field": "nginx.access.user_name", + "highlights": Array [], + "value": "admin", + }, + Object { + "constant": " \\"", + }, + Object { + "field": "nginx.access.method", + "highlights": Array [], + "value": "GET", + }, + Object { + "constant": " ", + }, + Object { + "field": "nginx.access.url", + "highlights": Array [], + "value": "/faq", + }, + Object { + "constant": " HTTP/", + }, + Object { + "field": "nginx.access.http_version", + "highlights": Array [], + "value": "1.1", + }, + Object { + "constant": "\\" ", + }, + Object { + "field": "nginx.access.response_code", + "highlights": Array [], + "value": "200", + }, + Object { + "constant": " ", + }, + Object { + "field": "nginx.access.body_sent.bytes", + "highlights": Array [], + "value": "1024", + }, +] +`); + }); + + test('Nginx Error', () => { + const flattenedDocument = { + 'nginx.error.message': + 'connect() failed (111: Connection refused) while connecting to upstream, client: 127.0.0.1, server: localhost, request: "GET /php-status?json= HTTP/1.1", upstream: "fastcgi://[::1]:9000", host: "localhost"', + 'nginx.error.level': 'error', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[nginx]", + }, + Object { + "constant": "[", + }, + Object { + "field": "nginx.error.level", + "highlights": Array [], + "value": "error", + }, + Object { + "constant": "] ", + }, + Object { + "field": "nginx.error.message", + "highlights": Array [], + "value": "connect() failed (111: Connection refused) while connecting to upstream, client: 127.0.0.1, server: localhost, request: \\"GET /php-status?json= HTTP/1.1\\", upstream: \\"fastcgi://[::1]:9000\\", host: \\"localhost\\"", + }, +] +`); + }); + }); +}); diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_nginx.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_nginx.ts new file mode 100644 index 0000000000000..0fd70dc25bb88 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_nginx.ts @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export const filebeatNginxRules = [ + { + // pre-ECS + when: { + exists: ['nginx.access.method'], + }, + format: [ + { + constant: '[nginx][access] ', + }, + { + field: 'nginx.access.remote_ip', + }, + { + constant: ' ', + }, + { + field: 'nginx.access.user_name', + }, + { + constant: ' "', + }, + { + field: 'nginx.access.method', + }, + { + constant: ' ', + }, + { + field: 'nginx.access.url', + }, + { + constant: ' HTTP/', + }, + { + field: 'nginx.access.http_version', + }, + { + constant: '" ', + }, + { + field: 'nginx.access.response_code', + }, + { + constant: ' ', + }, + { + field: 'nginx.access.body_sent.bytes', + }, + ], + }, + { + // ECS + when: { + values: { + 'event.dataset': 'nginx.error', + }, + }, + format: [ + { + constant: '[nginx]', + }, + { + constant: '[', + }, + { + field: 'log.level', + }, + { + constant: '] ', + }, + { + field: 'message', + }, + ], + }, + { + // pre-ECS + when: { + exists: ['nginx.error.message'], + }, + format: [ + { + constant: '[nginx]', + }, + { + constant: '[', + }, + { + field: 'nginx.error.level', + }, + { + constant: '] ', + }, + { + field: 'nginx.error.message', + }, + ], + }, +]; diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_osquery.test.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_osquery.test.ts new file mode 100644 index 0000000000000..8dc70053e2022 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_osquery.test.ts @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { compileFormattingRules } from '../message'; +import { filebeatOsqueryRules } from './filebeat_osquery'; + +const { format } = compileFormattingRules(filebeatOsqueryRules); + +describe('Filebeat Rules', () => { + describe('in pre-ECS format', () => { + test('osquery result log', () => { + const flattenedDocument = { + '@timestamp': '2017-12-28T14:40:08.000Z', + 'event.dataset': 'osquery.result', + 'fileset.module': 'osquery', + 'fileset.name': 'result', + 'input.type': 'log', + offset: 0, + 'osquery.result.action': 'removed', + 'osquery.result.calendar_time': 'Thu Dec 28 14:40:08 2017 UTC', + 'osquery.result.columns': { + blocks: '122061322', + blocks_available: '75966945', + blocks_free: '121274885', + blocks_size: '4096', + device: '/dev/disk1s4', + device_alias: '/dev/disk1s4', + flags: '345018372', + inodes: '9223372036854775807', + inodes_free: '9223372036854775804', + path: '/private/var/vm', + type: 'apfs', + }, + 'osquery.result.counter': '1', + 'osquery.result.decorations.host_uuid': '4AB2906D-5516-5794-AF54-86D1D7F533F3', + 'osquery.result.decorations.username': 'tsg', + 'osquery.result.epoch': '0', + 'osquery.result.host_identifier': '192-168-0-4.rdsnet.ro', + 'osquery.result.name': 'pack_it-compliance_mounts', + 'osquery.result.unix_time': '1514472008', + 'prospector.type': 'log', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[Osquery][", + }, + Object { + "field": "osquery.result.action", + "highlights": Array [], + "value": "removed", + }, + Object { + "constant": "] ", + }, + Object { + "field": "osquery.result.host_identifier", + "highlights": Array [], + "value": "192-168-0-4.rdsnet.ro", + }, + Object { + "constant": " ", + }, + Object { + "field": "osquery.result.columns", + "highlights": Array [], + "value": "{\\"blocks\\":\\"122061322\\",\\"blocks_available\\":\\"75966945\\",\\"blocks_free\\":\\"121274885\\",\\"blocks_size\\":\\"4096\\",\\"device\\":\\"/dev/disk1s4\\",\\"device_alias\\":\\"/dev/disk1s4\\",\\"flags\\":\\"345018372\\",\\"inodes\\":\\"9223372036854775807\\",\\"inodes_free\\":\\"9223372036854775804\\",\\"path\\":\\"/private/var/vm\\",\\"type\\":\\"apfs\\"}", + }, +] +`); + }); + }); +}); diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_osquery.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_osquery.ts new file mode 100644 index 0000000000000..b3a6ee8c5cb47 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_osquery.ts @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export const filebeatOsqueryRules = [ + { + // pre-ECS + when: { + exists: ['osquery.result.name'], + }, + format: [ + { + constant: '[Osquery][', + }, + { + field: 'osquery.result.action', + }, + { + constant: '] ', + }, + { + field: 'osquery.result.host_identifier', + }, + { + constant: ' ', + }, + { + field: 'osquery.result.columns', + }, + ], + }, +]; diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_redis.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_redis.ts new file mode 100644 index 0000000000000..788c65f92c4b4 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_redis.ts @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export const filebeatRedisRules = [ + { + when: { + exists: ['redis.log.message'], + }, + format: [ + { + constant: '[Redis]', + }, + { + constant: '[', + }, + { + field: 'redis.log.level', + }, + { + constant: '] ', + }, + { + field: 'redis.log.message', + }, + ], + }, +]; diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_system.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_system.ts new file mode 100644 index 0000000000000..cb695abcccdc8 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_system.ts @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export const filebeatSystemRules = [ + { + when: { + exists: ['system.syslog.message'], + }, + format: [ + { + constant: '[System][syslog] ', + }, + { + field: 'system.syslog.program', + }, + { + constant: ' - ', + }, + { + field: 'system.syslog.message', + }, + ], + }, + { + when: { + exists: ['system.auth.message'], + }, + format: [ + { + constant: '[System][auth] ', + }, + { + field: 'system.auth.program', + }, + { + constant: ' - ', + }, + { + field: 'system.auth.message', + }, + ], + }, + { + when: { + exists: ['system.auth.ssh.event'], + }, + format: [ + { + constant: '[System][auth][ssh]', + }, + { + constant: ' ', + }, + { + field: 'system.auth.ssh.event', + }, + { + constant: ' user ', + }, + { + field: 'system.auth.user', + }, + { + constant: ' from ', + }, + { + field: 'system.auth.ssh.ip', + }, + ], + }, + { + when: { + exists: ['system.auth.ssh.dropped_ip'], + }, + format: [ + { + constant: '[System][auth][ssh]', + }, + { + constant: ' Dropped connection from ', + }, + { + field: 'system.auth.ssh.dropped_ip', + }, + ], + }, +]; diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_traefik.test.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_traefik.test.ts new file mode 100644 index 0000000000000..b19124558fdd0 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_traefik.test.ts @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { compileFormattingRules } from '../message'; +import { filebeatTraefikRules } from './filebeat_traefik'; + +const { format } = compileFormattingRules(filebeatTraefikRules); + +describe('Filebeat Rules', () => { + describe('in pre-ECS format', () => { + test('traefik access log', () => { + const flattenedDocument = { + '@timestamp': '2017-10-02T20:22:08.000Z', + 'event.dataset': 'traefik.access', + 'fileset.module': 'traefik', + 'fileset.name': 'access', + 'input.type': 'log', + offset: 280, + 'prospector.type': 'log', + 'traefik.access.backend_url': 'http://172.19.0.3:5601', + 'traefik.access.body_sent.bytes': 0, + 'traefik.access.duration': 3, + 'traefik.access.frontend_name': 'Host-host1', + 'traefik.access.geoip.city_name': 'Berlin', + 'traefik.access.geoip.continent_name': 'Europe', + 'traefik.access.geoip.country_iso_code': 'DE', + 'traefik.access.geoip.location.lat': 52.4908, + 'traefik.access.geoip.location.lon': 13.3275, + 'traefik.access.geoip.region_iso_code': 'DE-BE', + 'traefik.access.geoip.region_name': 'Land Berlin', + 'traefik.access.http_version': '1.1', + 'traefik.access.method': 'GET', + 'traefik.access.referrer': 'http://example.com/login', + 'traefik.access.remote_ip': '85.181.35.98', + 'traefik.access.request_count': 271, + 'traefik.access.response_code': '304', + 'traefik.access.url': '/ui/favicons/favicon.ico', + 'traefik.access.user_agent.device': 'Other', + 'traefik.access.user_agent.major': '61', + 'traefik.access.user_agent.minor': '0', + 'traefik.access.user_agent.name': 'Chrome', + 'traefik.access.user_agent.original': + 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36', + 'traefik.access.user_agent.os': 'Linux', + 'traefik.access.user_agent.os_name': 'Linux', + 'traefik.access.user_agent.patch': '3163', + 'traefik.access.user_identifier': '-', + 'traefik.access.user_name': '-', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[traefik][access] ", + }, + Object { + "field": "traefik.access.remote_ip", + "highlights": Array [], + "value": "85.181.35.98", + }, + Object { + "constant": " ", + }, + Object { + "field": "traefik.access.frontend_name", + "highlights": Array [], + "value": "Host-host1", + }, + Object { + "constant": " -> ", + }, + Object { + "field": "traefik.access.backend_url", + "highlights": Array [], + "value": "http://172.19.0.3:5601", + }, + Object { + "constant": " \\"", + }, + Object { + "field": "traefik.access.method", + "highlights": Array [], + "value": "GET", + }, + Object { + "constant": " ", + }, + Object { + "field": "traefik.access.url", + "highlights": Array [], + "value": "/ui/favicons/favicon.ico", + }, + Object { + "constant": " HTTP/", + }, + Object { + "field": "traefik.access.http_version", + "highlights": Array [], + "value": "1.1", + }, + Object { + "constant": "\\" ", + }, + Object { + "field": "traefik.access.response_code", + "highlights": Array [], + "value": "304", + }, + Object { + "constant": " ", + }, + Object { + "field": "traefik.access.body_sent.bytes", + "highlights": Array [], + "value": "0", + }, +] +`); + }); + }); +}); diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_traefik.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_traefik.ts new file mode 100644 index 0000000000000..e62c688b9c22f --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_traefik.ts @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export const filebeatTraefikRules = [ + { + // pre-ECS + when: { + exists: ['traefik.access.method'], + }, + format: [ + { + constant: '[traefik][access] ', + }, + { + field: 'traefik.access.remote_ip', + }, + { + constant: ' ', + }, + { + field: 'traefik.access.frontend_name', + }, + { + constant: ' -> ', + }, + { + field: 'traefik.access.backend_url', + }, + { + constant: ' "', + }, + { + field: 'traefik.access.method', + }, + { + constant: ' ', + }, + { + field: 'traefik.access.url', + }, + { + constant: ' HTTP/', + }, + { + field: 'traefik.access.http_version', + }, + { + constant: '" ', + }, + { + field: 'traefik.access.response_code', + }, + { + constant: ' ', + }, + { + field: 'traefik.access.body_sent.bytes', + }, + ], + }, +]; diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic.test.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic.test.ts new file mode 100644 index 0000000000000..d168273626cfa --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic.test.ts @@ -0,0 +1,168 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { getBuiltinRules } from '.'; +import { compileFormattingRules } from '../message'; + +const { format } = compileFormattingRules( + getBuiltinRules(['first_generic_message', 'second_generic_message']) +); + +describe('Generic Rules', () => { + describe('configurable message rules', () => { + test('includes the event.dataset and log.level if present', () => { + const flattenedDocument = { + '@timestamp': '2016-12-26T16:22:13.000Z', + 'event.dataset': 'generic.test', + 'log.level': 'TEST_LEVEL', + first_generic_message: 'TEST_MESSAGE', + }; + const highlights = { + first_generic_message: ['TEST'], + }; + + expect(format(flattenedDocument, highlights)).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[", + }, + Object { + "field": "event.dataset", + "highlights": Array [], + "value": "generic.test", + }, + Object { + "constant": "][", + }, + Object { + "field": "log.level", + "highlights": Array [], + "value": "TEST_LEVEL", + }, + Object { + "constant": "] ", + }, + Object { + "field": "first_generic_message", + "highlights": Array [ + "TEST", + ], + "value": "TEST_MESSAGE", + }, +] +`); + }); + + test('includes the log.level if present', () => { + const flattenedDocument = { + '@timestamp': '2016-12-26T16:22:13.000Z', + 'log.level': 'TEST_LEVEL', + first_generic_message: 'TEST_MESSAGE', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[", + }, + Object { + "field": "log.level", + "highlights": Array [], + "value": "TEST_LEVEL", + }, + Object { + "constant": "] ", + }, + Object { + "field": "first_generic_message", + "highlights": Array [], + "value": "TEST_MESSAGE", + }, +] +`); + }); + + test('includes the message', () => { + const firstFlattenedDocument = { + '@timestamp': '2016-12-26T16:22:13.000Z', + first_generic_message: 'FIRST_TEST_MESSAGE', + }; + + expect(format(firstFlattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "field": "first_generic_message", + "highlights": Array [], + "value": "FIRST_TEST_MESSAGE", + }, +] +`); + + const secondFlattenedDocument = { + '@timestamp': '2016-12-26T16:22:13.000Z', + second_generic_message: 'SECOND_TEST_MESSAGE', + }; + + expect(format(secondFlattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "field": "second_generic_message", + "highlights": Array [], + "value": "SECOND_TEST_MESSAGE", + }, +] +`); + }); + }); + + describe('log.original fallback', () => { + test('includes the event.dataset if present', () => { + const flattenedDocument = { + '@timestamp': '2016-12-26T16:22:13.000Z', + 'event.dataset': 'generic.test', + 'log.original': 'TEST_MESSAGE', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "constant": "[", + }, + Object { + "field": "event.dataset", + "highlights": Array [], + "value": "generic.test", + }, + Object { + "constant": "] ", + }, + Object { + "field": "log.original", + "highlights": Array [], + "value": "TEST_MESSAGE", + }, +] +`); + }); + + test('includes the original message', () => { + const flattenedDocument = { + '@timestamp': '2016-12-26T16:22:13.000Z', + 'log.original': 'TEST_MESSAGE', + }; + + expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` +Array [ + Object { + "field": "log.original", + "highlights": Array [], + "value": "TEST_MESSAGE", + }, +] +`); + }); + }); +}); diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic.ts new file mode 100644 index 0000000000000..941cfc72afce6 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic.ts @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { LogMessageFormattingRule } from '../rule_types'; + +const BUILTIN_GENERIC_MESSAGE_FIELDS = ['message', '@message']; + +export const getGenericRules = (genericMessageFields: string[]) => [ + ...Array.from(new Set([...genericMessageFields, ...BUILTIN_GENERIC_MESSAGE_FIELDS])).reduce< + LogMessageFormattingRule[] + >((genericRules, fieldName) => [...genericRules, ...createGenericRulesForField(fieldName)], []), + { + when: { + exists: ['event.dataset', 'log.original'], + }, + format: [ + { + constant: '[', + }, + { + field: 'event.dataset', + }, + { + constant: '] ', + }, + { + field: 'log.original', + }, + ], + }, + { + when: { + exists: ['log.original'], + }, + format: [ + { + field: 'log.original', + }, + ], + }, +]; + +const createGenericRulesForField = (fieldName: string) => [ + { + when: { + exists: ['event.dataset', 'log.level', fieldName], + }, + format: [ + { + constant: '[', + }, + { + field: 'event.dataset', + }, + { + constant: '][', + }, + { + field: 'log.level', + }, + { + constant: '] ', + }, + { + field: fieldName, + }, + ], + }, + { + when: { + exists: ['log.level', fieldName], + }, + format: [ + { + constant: '[', + }, + { + field: 'log.level', + }, + { + constant: '] ', + }, + { + field: fieldName, + }, + ], + }, + { + when: { + exists: [fieldName], + }, + format: [ + { + field: fieldName, + }, + ], + }, +]; diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic_webserver.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic_webserver.ts new file mode 100644 index 0000000000000..50f38ad0515b2 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic_webserver.ts @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +const commonPrefixFields = [ + { constant: '[' }, + { field: 'event.module' }, + { constant: '][access] ' }, +]; + +export const genericWebserverRules = [ + { + // ECS with parsed url + when: { + exists: ['ecs.version', 'http.response.status_code', 'url.path'], + }, + format: [ + ...commonPrefixFields, + { + field: 'source.ip', + }, + { + constant: ' ', + }, + { + field: 'user.name', + }, + { + constant: ' "', + }, + { + field: 'http.request.method', + }, + { + constant: ' ', + }, + { + field: 'url.path', + }, + { + constant: '?', + }, + { + field: 'url.query', + }, + { + constant: ' HTTP/', + }, + { + field: 'http.version', + }, + { + constant: '" ', + }, + { + field: 'http.response.status_code', + }, + { + constant: ' ', + }, + { + field: 'http.response.body.bytes', + }, + ], + }, + { + // ECS with original url + when: { + exists: ['ecs.version', 'http.response.status_code'], + }, + format: [ + ...commonPrefixFields, + { + field: 'source.ip', + }, + { + constant: ' ', + }, + { + field: 'user.name', + }, + { + constant: ' "', + }, + { + field: 'http.request.method', + }, + { + constant: ' ', + }, + { + field: 'url.original', + }, + { + constant: ' HTTP/', + }, + { + field: 'http.version', + }, + { + constant: '" ', + }, + { + field: 'http.response.status_code', + }, + { + constant: ' ', + }, + { + field: 'http.response.body.bytes', + }, + ], + }, +]; diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/helpers.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/helpers.ts new file mode 100644 index 0000000000000..9a6fa30e17e89 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/helpers.ts @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export const labelField = (label: string, field: string) => [ + { constant: ' ' }, + { constant: label }, + { constant: '=' }, + { field }, +]; diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/index.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/index.ts new file mode 100644 index 0000000000000..3f4d7eb901212 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/index.ts @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { filebeatApache2Rules } from './filebeat_apache2'; +import { filebeatAuditdRules } from './filebeat_auditd'; +import { filebeatHaproxyRules } from './filebeat_haproxy'; +import { filebeatIcingaRules } from './filebeat_icinga'; +import { filebeatIisRules } from './filebeat_iis'; +import { filebeatLogstashRules } from './filebeat_logstash'; +import { filebeatMongodbRules } from './filebeat_mongodb'; +import { filebeatMySQLRules } from './filebeat_mysql'; +import { filebeatNginxRules } from './filebeat_nginx'; +import { filebeatOsqueryRules } from './filebeat_osquery'; +import { filebeatRedisRules } from './filebeat_redis'; +import { filebeatSystemRules } from './filebeat_system'; +import { filebeatTraefikRules } from './filebeat_traefik'; + +import { getGenericRules } from './generic'; +import { genericWebserverRules } from './generic_webserver'; + +export const getBuiltinRules = (genericMessageFields: string[]) => [ + ...filebeatApache2Rules, + ...filebeatNginxRules, + ...filebeatRedisRules, + ...filebeatSystemRules, + ...filebeatMySQLRules, + ...filebeatAuditdRules, + ...filebeatHaproxyRules, + ...filebeatIcingaRules, + ...filebeatIisRules, + ...filebeatLogstashRules, + ...filebeatMongodbRules, + ...filebeatOsqueryRules, + ...filebeatTraefikRules, + ...genericWebserverRules, + ...getGenericRules(genericMessageFields), + { + when: { + exists: ['log.path'], + }, + format: [ + { + constant: 'failed to format message from ', + }, + { + field: 'log.path', + }, + ], + }, + { + when: { + exists: [], + }, + format: [ + { + constant: 'failed to find message', + }, + ], + }, +]; diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/convert_document_source_to_log_item_fields.test.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/convert_document_source_to_log_item_fields.test.ts new file mode 100644 index 0000000000000..98d1e2cd89b01 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/convert_document_source_to_log_item_fields.test.ts @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { convertDocumentSourceToLogItemFields } from './convert_document_source_to_log_item_fields'; + +describe('convertDocumentSourceToLogItemFields', () => { + test('should convert document', () => { + const doc = { + agent: { + hostname: 'demo-stack-client-01', + id: '7adef8b6-2ab7-45cd-a0d5-b3baad735f1b', + type: 'filebeat', + ephemeral_id: 'a0c8164b-3564-4e32-b0bf-f4db5a7ae566', + version: '7.0.0', + }, + tags: ['prod', 'web'], + metadata: [ + { key: 'env', value: 'prod' }, + { key: 'stack', value: 'web' }, + ], + host: { + hostname: 'packer-virtualbox-iso-1546820004', + name: 'demo-stack-client-01', + }, + }; + + const fields = convertDocumentSourceToLogItemFields(doc); + expect(fields).toEqual([ + { + field: 'agent.hostname', + value: 'demo-stack-client-01', + }, + { + field: 'agent.id', + value: '7adef8b6-2ab7-45cd-a0d5-b3baad735f1b', + }, + { + field: 'agent.type', + value: 'filebeat', + }, + { + field: 'agent.ephemeral_id', + value: 'a0c8164b-3564-4e32-b0bf-f4db5a7ae566', + }, + { + field: 'agent.version', + value: '7.0.0', + }, + { + field: 'tags', + value: '["prod","web"]', + }, + { + field: 'metadata', + value: '[{"key":"env","value":"prod"},{"key":"stack","value":"web"}]', + }, + { + field: 'host.hostname', + value: 'packer-virtualbox-iso-1546820004', + }, + { + field: 'host.name', + value: 'demo-stack-client-01', + }, + ]); + }); +}); diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/convert_document_source_to_log_item_fields.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/convert_document_source_to_log_item_fields.ts new file mode 100644 index 0000000000000..099e7c3b5038c --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/convert_document_source_to_log_item_fields.ts @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import stringify from 'json-stable-stringify'; +import { isArray, isPlainObject } from 'lodash'; + +import { JsonObject } from '../../../../common/typed_json'; +import { LogEntriesItemField } from '../../../../common/http_api'; + +const isJsonObject = (subject: any): subject is JsonObject => { + return isPlainObject(subject); +}; + +const serializeValue = (value: any): string => { + if (isArray(value) || isPlainObject(value)) { + return stringify(value); + } + return `${value}`; +}; + +export const convertDocumentSourceToLogItemFields = ( + source: JsonObject, + path: string[] = [], + fields: LogEntriesItemField[] = [] +): LogEntriesItemField[] => { + return Object.keys(source).reduce((acc, key) => { + const value = source[key]; + const nextPath = [...path, key]; + if (isJsonObject(value)) { + return convertDocumentSourceToLogItemFields(value, nextPath, acc); + } + const field = { field: nextPath.join('.'), value: serializeValue(value) }; + return [...acc, field]; + }, fields); +}; diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/index.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/index.ts new file mode 100644 index 0000000000000..2cb8140febdcd --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/index.ts @@ -0,0 +1,7 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export * from './log_entries_domain'; diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/log_entries_domain.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/log_entries_domain.ts new file mode 100644 index 0000000000000..347f0dcf795bc --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/log_entries_domain.ts @@ -0,0 +1,436 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import stringify from 'json-stable-stringify'; +import { sortBy } from 'lodash'; + +import { RequestHandlerContext } from 'src/core/server'; +import { TimeKey } from '../../../../common/time'; +import { JsonObject } from '../../../../common/typed_json'; +import { + LogEntriesSummaryBucket, + LogEntriesSummaryHighlightsBucket, + LogEntriesItem, +} from '../../../../common/http_api'; +import { InfraLogEntry, InfraLogMessageSegment } from '../../../graphql/types'; +import { + InfraSourceConfiguration, + InfraSources, + SavedSourceConfigurationFieldColumnRuntimeType, + SavedSourceConfigurationMessageColumnRuntimeType, + SavedSourceConfigurationTimestampColumnRuntimeType, +} from '../../sources'; +import { getBuiltinRules } from './builtin_rules'; +import { convertDocumentSourceToLogItemFields } from './convert_document_source_to_log_item_fields'; +import { + CompiledLogMessageFormattingRule, + Fields, + Highlights, + compileFormattingRules, +} from './message'; + +export class InfraLogEntriesDomain { + constructor( + private readonly adapter: LogEntriesAdapter, + private readonly libs: { sources: InfraSources } + ) {} + + public async getLogEntriesAround( + requestContext: RequestHandlerContext, + sourceId: string, + key: TimeKey, + maxCountBefore: number, + maxCountAfter: number, + filterQuery?: LogEntryQuery, + highlightQuery?: LogEntryQuery + ): Promise<{ entriesBefore: InfraLogEntry[]; entriesAfter: InfraLogEntry[] }> { + if (maxCountBefore <= 0 && maxCountAfter <= 0) { + return { + entriesBefore: [], + entriesAfter: [], + }; + } + + const { configuration } = await this.libs.sources.getSourceConfiguration( + requestContext, + sourceId + ); + const messageFormattingRules = compileFormattingRules( + getBuiltinRules(configuration.fields.message) + ); + const requiredFields = getRequiredFields(configuration, messageFormattingRules); + + const documentsBefore = await this.adapter.getAdjacentLogEntryDocuments( + requestContext, + configuration, + requiredFields, + key, + 'desc', + Math.max(maxCountBefore, 1), + filterQuery, + highlightQuery + ); + const lastKeyBefore = + documentsBefore.length > 0 + ? documentsBefore[documentsBefore.length - 1].key + : { + time: key.time - 1, + tiebreaker: 0, + }; + + const documentsAfter = await this.adapter.getAdjacentLogEntryDocuments( + requestContext, + configuration, + requiredFields, + lastKeyBefore, + 'asc', + maxCountAfter, + filterQuery, + highlightQuery + ); + + return { + entriesBefore: (maxCountBefore > 0 ? documentsBefore : []).map( + convertLogDocumentToEntry(sourceId, configuration.logColumns, messageFormattingRules.format) + ), + entriesAfter: documentsAfter.map( + convertLogDocumentToEntry(sourceId, configuration.logColumns, messageFormattingRules.format) + ), + }; + } + + public async getLogEntriesBetween( + requestContext: RequestHandlerContext, + sourceId: string, + startKey: TimeKey, + endKey: TimeKey, + filterQuery?: LogEntryQuery, + highlightQuery?: LogEntryQuery + ): Promise { + const { configuration } = await this.libs.sources.getSourceConfiguration( + requestContext, + sourceId + ); + const messageFormattingRules = compileFormattingRules( + getBuiltinRules(configuration.fields.message) + ); + const requiredFields = getRequiredFields(configuration, messageFormattingRules); + const documents = await this.adapter.getContainedLogEntryDocuments( + requestContext, + configuration, + requiredFields, + startKey, + endKey, + filterQuery, + highlightQuery + ); + const entries = documents.map( + convertLogDocumentToEntry(sourceId, configuration.logColumns, messageFormattingRules.format) + ); + return entries; + } + + public async getLogEntryHighlights( + requestContext: RequestHandlerContext, + sourceId: string, + startKey: TimeKey, + endKey: TimeKey, + highlights: Array<{ + query: string; + countBefore: number; + countAfter: number; + }>, + filterQuery?: LogEntryQuery + ): Promise { + const { configuration } = await this.libs.sources.getSourceConfiguration( + requestContext, + sourceId + ); + const messageFormattingRules = compileFormattingRules( + getBuiltinRules(configuration.fields.message) + ); + const requiredFields = getRequiredFields(configuration, messageFormattingRules); + + const documentSets = await Promise.all( + highlights.map(async highlight => { + const highlightQuery = createHighlightQueryDsl(highlight.query, requiredFields); + const query = filterQuery + ? { + bool: { + filter: [filterQuery, highlightQuery], + }, + } + : highlightQuery; + const [documentsBefore, documents, documentsAfter] = await Promise.all([ + this.adapter.getAdjacentLogEntryDocuments( + requestContext, + configuration, + requiredFields, + startKey, + 'desc', + highlight.countBefore, + query, + highlightQuery + ), + this.adapter.getContainedLogEntryDocuments( + requestContext, + configuration, + requiredFields, + startKey, + endKey, + query, + highlightQuery + ), + this.adapter.getAdjacentLogEntryDocuments( + requestContext, + configuration, + requiredFields, + endKey, + 'asc', + highlight.countAfter, + query, + highlightQuery + ), + ]); + const entries = [...documentsBefore, ...documents, ...documentsAfter].map( + convertLogDocumentToEntry( + sourceId, + configuration.logColumns, + messageFormattingRules.format + ) + ); + + return entries; + }) + ); + + return documentSets; + } + + public async getLogSummaryBucketsBetween( + requestContext: RequestHandlerContext, + sourceId: string, + start: number, + end: number, + bucketSize: number, + filterQuery?: LogEntryQuery + ): Promise { + const { configuration } = await this.libs.sources.getSourceConfiguration( + requestContext, + sourceId + ); + const dateRangeBuckets = await this.adapter.getContainedLogSummaryBuckets( + requestContext, + configuration, + start, + end, + bucketSize, + filterQuery + ); + return dateRangeBuckets; + } + + public async getLogSummaryHighlightBucketsBetween( + requestContext: RequestHandlerContext, + sourceId: string, + start: number, + end: number, + bucketSize: number, + highlightQueries: string[], + filterQuery?: LogEntryQuery + ): Promise { + const { configuration } = await this.libs.sources.getSourceConfiguration( + requestContext, + sourceId + ); + const messageFormattingRules = compileFormattingRules( + getBuiltinRules(configuration.fields.message) + ); + const requiredFields = getRequiredFields(configuration, messageFormattingRules); + + const summaries = await Promise.all( + highlightQueries.map(async highlightQueryPhrase => { + const highlightQuery = createHighlightQueryDsl(highlightQueryPhrase, requiredFields); + const query = filterQuery + ? { + bool: { + must: [filterQuery, highlightQuery], + }, + } + : highlightQuery; + const summaryBuckets = await this.adapter.getContainedLogSummaryBuckets( + requestContext, + configuration, + start, + end, + bucketSize, + query + ); + const summaryHighlightBuckets = summaryBuckets + .filter(logSummaryBucketHasEntries) + .map(convertLogSummaryBucketToSummaryHighlightBucket); + return summaryHighlightBuckets; + }) + ); + + return summaries; + } + + public async getLogItem( + requestContext: RequestHandlerContext, + id: string, + sourceConfiguration: InfraSourceConfiguration + ): Promise { + const document = await this.adapter.getLogItem(requestContext, id, sourceConfiguration); + const defaultFields = [ + { field: '_index', value: document._index }, + { field: '_id', value: document._id }, + ]; + + return { + id: document._id, + index: document._index, + key: { + time: document.sort[0], + tiebreaker: document.sort[1], + }, + fields: sortBy( + [...defaultFields, ...convertDocumentSourceToLogItemFields(document._source)], + 'field' + ), + }; + } +} + +interface LogItemHit { + _index: string; + _id: string; + _source: JsonObject; + sort: [number, number]; +} + +export interface LogEntriesAdapter { + getAdjacentLogEntryDocuments( + requestContext: RequestHandlerContext, + sourceConfiguration: InfraSourceConfiguration, + fields: string[], + start: TimeKey, + direction: 'asc' | 'desc', + maxCount: number, + filterQuery?: LogEntryQuery, + highlightQuery?: LogEntryQuery + ): Promise; + + getContainedLogEntryDocuments( + requestContext: RequestHandlerContext, + sourceConfiguration: InfraSourceConfiguration, + fields: string[], + start: TimeKey, + end: TimeKey, + filterQuery?: LogEntryQuery, + highlightQuery?: LogEntryQuery + ): Promise; + + getContainedLogSummaryBuckets( + requestContext: RequestHandlerContext, + sourceConfiguration: InfraSourceConfiguration, + start: number, + end: number, + bucketSize: number, + filterQuery?: LogEntryQuery + ): Promise; + + getLogItem( + requestContext: RequestHandlerContext, + id: string, + source: InfraSourceConfiguration + ): Promise; +} + +export type LogEntryQuery = JsonObject; + +export interface LogEntryDocument { + fields: Fields; + gid: string; + highlights: Highlights; + key: TimeKey; +} + +export interface LogSummaryBucket { + entriesCount: number; + start: number; + end: number; + topEntryKeys: TimeKey[]; +} + +const convertLogDocumentToEntry = ( + sourceId: string, + logColumns: InfraSourceConfiguration['logColumns'], + formatLogMessage: (fields: Fields, highlights: Highlights) => InfraLogMessageSegment[] +) => (document: LogEntryDocument): InfraLogEntry => ({ + key: document.key, + gid: document.gid, + source: sourceId, + columns: logColumns.map(logColumn => { + if (SavedSourceConfigurationTimestampColumnRuntimeType.is(logColumn)) { + return { + columnId: logColumn.timestampColumn.id, + timestamp: document.key.time, + }; + } else if (SavedSourceConfigurationMessageColumnRuntimeType.is(logColumn)) { + return { + columnId: logColumn.messageColumn.id, + message: formatLogMessage(document.fields, document.highlights), + }; + } else { + return { + columnId: logColumn.fieldColumn.id, + field: logColumn.fieldColumn.field, + highlights: document.highlights[logColumn.fieldColumn.field] || [], + value: stringify(document.fields[logColumn.fieldColumn.field] || null), + }; + } + }), +}); + +const logSummaryBucketHasEntries = (bucket: LogSummaryBucket) => + bucket.entriesCount > 0 && bucket.topEntryKeys.length > 0; + +const convertLogSummaryBucketToSummaryHighlightBucket = ( + bucket: LogSummaryBucket +): LogEntriesSummaryHighlightsBucket => ({ + entriesCount: bucket.entriesCount, + start: bucket.start, + end: bucket.end, + representativeKey: bucket.topEntryKeys[0], +}); + +const getRequiredFields = ( + configuration: InfraSourceConfiguration, + messageFormattingRules: CompiledLogMessageFormattingRule +): string[] => { + const fieldsFromCustomColumns = configuration.logColumns.reduce( + (accumulatedFields, logColumn) => { + if (SavedSourceConfigurationFieldColumnRuntimeType.is(logColumn)) { + return [...accumulatedFields, logColumn.fieldColumn.field]; + } + return accumulatedFields; + }, + [] + ); + const fieldsFromFormattingRules = messageFormattingRules.requiredFields; + + return Array.from(new Set([...fieldsFromCustomColumns, ...fieldsFromFormattingRules])); +}; + +const createHighlightQueryDsl = (phrase: string, fields: string[]) => ({ + multi_match: { + fields, + lenient: true, + query: phrase, + type: 'phrase', + }, +}); diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/message.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/message.ts new file mode 100644 index 0000000000000..58cffc7584979 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/message.ts @@ -0,0 +1,190 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import stringify from 'json-stable-stringify'; + +import { InfraLogMessageSegment } from '../../../graphql/types'; +import { + LogMessageFormattingCondition, + LogMessageFormattingInstruction, + LogMessageFormattingRule, +} from './rule_types'; + +export function compileFormattingRules( + rules: LogMessageFormattingRule[] +): CompiledLogMessageFormattingRule { + const compiledRules = rules.map(compileRule); + + return { + requiredFields: Array.from( + new Set( + compiledRules.reduce( + (combinedRequiredFields, { requiredFields }) => [ + ...combinedRequiredFields, + ...requiredFields, + ], + [] as string[] + ) + ) + ), + format(fields, highlights): InfraLogMessageSegment[] { + for (const compiledRule of compiledRules) { + if (compiledRule.fulfillsCondition(fields)) { + return compiledRule.format(fields, highlights); + } + } + + return []; + }, + fulfillsCondition() { + return true; + }, + }; +} + +const compileRule = (rule: LogMessageFormattingRule): CompiledLogMessageFormattingRule => { + const { conditionFields, fulfillsCondition } = compileCondition(rule.when); + const { formattingFields, format } = compileFormattingInstructions(rule.format); + + return { + requiredFields: [...conditionFields, ...formattingFields], + fulfillsCondition, + format, + }; +}; + +const compileCondition = ( + condition: LogMessageFormattingCondition +): CompiledLogMessageFormattingCondition => + [compileExistsCondition, compileFieldValueCondition].reduce( + (compiledCondition, compile) => compile(condition) || compiledCondition, + catchAllCondition + ); + +const catchAllCondition: CompiledLogMessageFormattingCondition = { + conditionFields: [] as string[], + fulfillsCondition: () => false, +}; + +const compileExistsCondition = (condition: LogMessageFormattingCondition) => + 'exists' in condition + ? { + conditionFields: condition.exists, + fulfillsCondition: (fields: Fields) => + condition.exists.every(fieldName => fieldName in fields), + } + : null; + +const compileFieldValueCondition = (condition: LogMessageFormattingCondition) => + 'values' in condition + ? { + conditionFields: Object.keys(condition.values), + fulfillsCondition: (fields: Fields) => + Object.entries(condition.values).every( + ([fieldName, expectedValue]) => fields[fieldName] === expectedValue + ), + } + : null; + +const compileFormattingInstructions = ( + formattingInstructions: LogMessageFormattingInstruction[] +): CompiledLogMessageFormattingInstruction => + formattingInstructions.reduce( + (combinedFormattingInstructions, formattingInstruction) => { + const compiledFormattingInstruction = compileFormattingInstruction(formattingInstruction); + + return { + formattingFields: [ + ...combinedFormattingInstructions.formattingFields, + ...compiledFormattingInstruction.formattingFields, + ], + format: (fields: Fields, highlights: Highlights) => [ + ...combinedFormattingInstructions.format(fields, highlights), + ...compiledFormattingInstruction.format(fields, highlights), + ], + }; + }, + { + formattingFields: [], + format: () => [], + } as CompiledLogMessageFormattingInstruction + ); + +const compileFormattingInstruction = ( + formattingInstruction: LogMessageFormattingInstruction +): CompiledLogMessageFormattingInstruction => + [compileFieldReferenceFormattingInstruction, compileConstantFormattingInstruction].reduce( + (compiledFormattingInstruction, compile) => + compile(formattingInstruction) || compiledFormattingInstruction, + catchAllFormattingInstruction + ); + +const catchAllFormattingInstruction: CompiledLogMessageFormattingInstruction = { + formattingFields: [], + format: () => [ + { + constant: 'invalid format', + }, + ], +}; + +const compileFieldReferenceFormattingInstruction = ( + formattingInstruction: LogMessageFormattingInstruction +): CompiledLogMessageFormattingInstruction | null => + 'field' in formattingInstruction + ? { + formattingFields: [formattingInstruction.field], + format: (fields, highlights) => { + const value = fields[formattingInstruction.field]; + const highlightedValues = highlights[formattingInstruction.field]; + return [ + { + field: formattingInstruction.field, + value: typeof value === 'object' ? stringify(value) : `${value}`, + highlights: highlightedValues || [], + }, + ]; + }, + } + : null; + +const compileConstantFormattingInstruction = ( + formattingInstruction: LogMessageFormattingInstruction +): CompiledLogMessageFormattingInstruction | null => + 'constant' in formattingInstruction + ? { + formattingFields: [] as string[], + format: () => [ + { + constant: formattingInstruction.constant, + }, + ], + } + : null; + +export interface Fields { + [fieldName: string]: string | number | object | boolean | null; +} + +export interface Highlights { + [fieldName: string]: string[]; +} + +export interface CompiledLogMessageFormattingRule { + requiredFields: string[]; + fulfillsCondition(fields: Fields): boolean; + format(fields: Fields, highlights: Highlights): InfraLogMessageSegment[]; +} + +export interface CompiledLogMessageFormattingCondition { + conditionFields: string[]; + fulfillsCondition(fields: Fields): boolean; +} + +export interface CompiledLogMessageFormattingInstruction { + formattingFields: string[]; + format(fields: Fields, highlights: Highlights): InfraLogMessageSegment[]; +} diff --git a/x-pack/plugins/infra/server/lib/domains/log_entries_domain/rule_types.ts b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/rule_types.ts new file mode 100644 index 0000000000000..6107fc362f8e3 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/log_entries_domain/rule_types.ts @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export interface LogMessageFormattingRule { + when: LogMessageFormattingCondition; + format: LogMessageFormattingInstruction[]; +} + +export type LogMessageFormattingCondition = + | LogMessageFormattingExistsCondition + | LogMessageFormattingFieldValueCondition; + +export interface LogMessageFormattingExistsCondition { + exists: string[]; +} + +export interface LogMessageFormattingFieldValueCondition { + values: { + [fieldName: string]: string | number | boolean | null; + }; +} + +export type LogMessageFormattingInstruction = + | LogMessageFormattingFieldReference + | LogMessageFormattingConstant; + +export interface LogMessageFormattingFieldReference { + field: string; +} + +export interface LogMessageFormattingConstant { + constant: string; +} diff --git a/x-pack/plugins/infra/server/lib/domains/metrics_domain.ts b/x-pack/plugins/infra/server/lib/domains/metrics_domain.ts new file mode 100644 index 0000000000000..e53e45afae5c4 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/domains/metrics_domain.ts @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { KibanaRequest, RequestHandlerContext } from 'src/core/server'; +import { InfraMetricData } from '../../graphql/types'; +import { InfraMetricsAdapter, InfraMetricsRequestOptions } from '../adapters/metrics/adapter_types'; + +export class InfraMetricsDomain { + private adapter: InfraMetricsAdapter; + + constructor(adapter: InfraMetricsAdapter) { + this.adapter = adapter; + } + + public async getMetrics( + requestContext: RequestHandlerContext, + options: InfraMetricsRequestOptions, + rawRequest: KibanaRequest + ): Promise { + return await this.adapter.getMetrics(requestContext, options, rawRequest); + } +} diff --git a/x-pack/plugins/infra/server/lib/infra_types.ts b/x-pack/plugins/infra/server/lib/infra_types.ts new file mode 100644 index 0000000000000..46d32885600df --- /dev/null +++ b/x-pack/plugins/infra/server/lib/infra_types.ts @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { InfraSourceConfiguration } from '../../public/graphql/types'; +import { InfraFieldsDomain } from './domains/fields_domain'; +import { InfraLogEntriesDomain } from './domains/log_entries_domain'; +import { InfraMetricsDomain } from './domains/metrics_domain'; +import { InfraLogAnalysis } from './log_analysis/log_analysis'; +import { InfraSnapshot } from './snapshot'; +import { InfraSources } from './sources'; +import { InfraSourceStatus } from './source_status'; +import { InfraConfig } from '../../../../../plugins/infra/server'; +import { KibanaFramework } from './adapters/framework/kibana_framework_adapter'; + +// NP_TODO: We shouldn't need this context anymore but I am +// not sure how the graphql stuff uses it, so we can't remove it yet +export interface InfraContext { + req: any; + rawReq?: any; +} + +export interface InfraDomainLibs { + fields: InfraFieldsDomain; + logEntries: InfraLogEntriesDomain; + metrics: InfraMetricsDomain; +} + +export interface InfraBackendLibs extends InfraDomainLibs { + configuration: InfraConfig; + framework: KibanaFramework; + logAnalysis: InfraLogAnalysis; + snapshot: InfraSnapshot; + sources: InfraSources; + sourceStatus: InfraSourceStatus; +} + +export interface InfraConfiguration { + enabled: boolean; + query: { + partitionSize: number; + partitionFactor: number; + }; + sources: { + default: InfraSourceConfiguration; + }; +} diff --git a/x-pack/plugins/infra/server/lib/log_analysis/errors.ts b/x-pack/plugins/infra/server/lib/log_analysis/errors.ts new file mode 100644 index 0000000000000..dc5c87c61fdce --- /dev/null +++ b/x-pack/plugins/infra/server/lib/log_analysis/errors.ts @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export class NoLogRateResultsIndexError extends Error { + constructor(message?: string) { + super(message); + Object.setPrototypeOf(this, new.target.prototype); + } +} diff --git a/x-pack/plugins/infra/server/lib/log_analysis/index.ts b/x-pack/plugins/infra/server/lib/log_analysis/index.ts new file mode 100644 index 0000000000000..0b58c71c1db7b --- /dev/null +++ b/x-pack/plugins/infra/server/lib/log_analysis/index.ts @@ -0,0 +1,8 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export * from './errors'; +export * from './log_analysis'; diff --git a/x-pack/plugins/infra/server/lib/log_analysis/log_analysis.ts b/x-pack/plugins/infra/server/lib/log_analysis/log_analysis.ts new file mode 100644 index 0000000000000..fac49a7980f26 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/log_analysis/log_analysis.ts @@ -0,0 +1,143 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { pipe } from 'fp-ts/lib/pipeable'; +import { map, fold } from 'fp-ts/lib/Either'; +import { identity } from 'fp-ts/lib/function'; +import { getJobId } from '../../../common/log_analysis'; +import { throwErrors, createPlainError } from '../../../common/runtime_types'; +import { KibanaFramework } from '../adapters/framework/kibana_framework_adapter'; +import { NoLogRateResultsIndexError } from './errors'; +import { + logRateModelPlotResponseRT, + createLogEntryRateQuery, + LogRateModelPlotBucket, + CompositeTimestampPartitionKey, +} from './queries'; +import { RequestHandlerContext, KibanaRequest } from '../../../../../../../src/core/server'; + +const COMPOSITE_AGGREGATION_BATCH_SIZE = 1000; + +export class InfraLogAnalysis { + constructor( + private readonly libs: { + framework: KibanaFramework; + } + ) {} + + public getJobIds(request: KibanaRequest, sourceId: string) { + return { + logEntryRate: getJobId(this.libs.framework.getSpaceId(request), sourceId, 'log-entry-rate'), + }; + } + + public async getLogEntryRateBuckets( + requestContext: RequestHandlerContext, + sourceId: string, + startTime: number, + endTime: number, + bucketDuration: number, + request: KibanaRequest + ) { + const logRateJobId = this.getJobIds(request, sourceId).logEntryRate; + let mlModelPlotBuckets: LogRateModelPlotBucket[] = []; + let afterLatestBatchKey: CompositeTimestampPartitionKey | undefined; + + while (true) { + const mlModelPlotResponse = await this.libs.framework.callWithRequest( + requestContext, + 'search', + createLogEntryRateQuery( + logRateJobId, + startTime, + endTime, + bucketDuration, + COMPOSITE_AGGREGATION_BATCH_SIZE, + afterLatestBatchKey + ) + ); + + if (mlModelPlotResponse._shards.total === 0) { + throw new NoLogRateResultsIndexError( + `Failed to find ml result index for job ${logRateJobId}.` + ); + } + + const { after_key: afterKey, buckets: latestBatchBuckets } = pipe( + logRateModelPlotResponseRT.decode(mlModelPlotResponse), + map(response => response.aggregations.timestamp_partition_buckets), + fold(throwErrors(createPlainError), identity) + ); + + mlModelPlotBuckets = [...mlModelPlotBuckets, ...latestBatchBuckets]; + afterLatestBatchKey = afterKey; + + if (latestBatchBuckets.length < COMPOSITE_AGGREGATION_BATCH_SIZE) { + break; + } + } + + return mlModelPlotBuckets.reduce< + Array<{ + partitions: Array<{ + analysisBucketCount: number; + anomalies: Array<{ + actualLogEntryRate: number; + anomalyScore: number; + duration: number; + startTime: number; + typicalLogEntryRate: number; + }>; + averageActualLogEntryRate: number; + maximumAnomalyScore: number; + numberOfLogEntries: number; + partitionId: string; + }>; + startTime: number; + }> + >((histogramBuckets, timestampPartitionBucket) => { + const previousHistogramBucket = histogramBuckets[histogramBuckets.length - 1]; + const partition = { + analysisBucketCount: timestampPartitionBucket.filter_model_plot.doc_count, + anomalies: timestampPartitionBucket.filter_records.top_hits_record.hits.hits.map( + ({ _source: record }) => ({ + actualLogEntryRate: record.actual[0], + anomalyScore: record.record_score, + duration: record.bucket_span * 1000, + startTime: record.timestamp, + typicalLogEntryRate: record.typical[0], + }) + ), + averageActualLogEntryRate: + timestampPartitionBucket.filter_model_plot.average_actual.value || 0, + maximumAnomalyScore: + timestampPartitionBucket.filter_records.maximum_record_score.value || 0, + numberOfLogEntries: timestampPartitionBucket.filter_model_plot.sum_actual.value || 0, + partitionId: timestampPartitionBucket.key.partition, + }; + if ( + previousHistogramBucket && + previousHistogramBucket.startTime === timestampPartitionBucket.key.timestamp + ) { + return [ + ...histogramBuckets.slice(0, -1), + { + ...previousHistogramBucket, + partitions: [...previousHistogramBucket.partitions, partition], + }, + ]; + } else { + return [ + ...histogramBuckets, + { + partitions: [partition], + startTime: timestampPartitionBucket.key.timestamp, + }, + ]; + } + }, []); + } +} diff --git a/x-pack/plugins/infra/server/lib/log_analysis/queries/index.ts b/x-pack/plugins/infra/server/lib/log_analysis/queries/index.ts new file mode 100644 index 0000000000000..1749421277719 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/log_analysis/queries/index.ts @@ -0,0 +1,7 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export * from './log_entry_rate'; diff --git a/x-pack/plugins/infra/server/lib/log_analysis/queries/log_entry_rate.ts b/x-pack/plugins/infra/server/lib/log_analysis/queries/log_entry_rate.ts new file mode 100644 index 0000000000000..2dd0880cbf8cb --- /dev/null +++ b/x-pack/plugins/infra/server/lib/log_analysis/queries/log_entry_rate.ts @@ -0,0 +1,182 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import * as rt from 'io-ts'; + +const ML_ANOMALY_INDEX_PREFIX = '.ml-anomalies-'; + +export const createLogEntryRateQuery = ( + logRateJobId: string, + startTime: number, + endTime: number, + bucketDuration: number, + size: number, + afterKey?: CompositeTimestampPartitionKey +) => ({ + allowNoIndices: true, + body: { + query: { + bool: { + filter: [ + { + range: { + timestamp: { + gte: startTime, + lt: endTime, + }, + }, + }, + { + terms: { + result_type: ['model_plot', 'record'], + }, + }, + { + term: { + detector_index: { + value: 0, + }, + }, + }, + ], + }, + }, + aggs: { + timestamp_partition_buckets: { + composite: { + after: afterKey, + size, + sources: [ + { + timestamp: { + date_histogram: { + field: 'timestamp', + fixed_interval: `${bucketDuration}ms`, + order: 'asc', + }, + }, + }, + { + partition: { + terms: { + field: 'partition_field_value', + order: 'asc', + }, + }, + }, + ], + }, + aggs: { + filter_model_plot: { + filter: { + term: { + result_type: 'model_plot', + }, + }, + aggs: { + average_actual: { + avg: { + field: 'actual', + }, + }, + sum_actual: { + sum: { + field: 'actual', + }, + }, + }, + }, + filter_records: { + filter: { + term: { + result_type: 'record', + }, + }, + aggs: { + maximum_record_score: { + max: { + field: 'record_score', + }, + }, + top_hits_record: { + top_hits: { + _source: Object.keys(logRateMlRecordRT.props), + size: 100, + sort: [ + { + timestamp: 'asc', + }, + ], + }, + }, + }, + }, + }, + }, + }, + }, + ignoreUnavailable: true, + index: `${ML_ANOMALY_INDEX_PREFIX}${logRateJobId}`, + size: 0, + trackScores: false, + trackTotalHits: false, +}); + +const logRateMlRecordRT = rt.type({ + actual: rt.array(rt.number), + bucket_span: rt.number, + record_score: rt.number, + timestamp: rt.number, + typical: rt.array(rt.number), +}); + +const metricAggregationRT = rt.type({ + value: rt.union([rt.number, rt.null]), +}); + +const compositeTimestampPartitionKeyRT = rt.type({ + partition: rt.string, + timestamp: rt.number, +}); + +export type CompositeTimestampPartitionKey = rt.TypeOf; + +export const logRateModelPlotBucketRT = rt.type({ + key: compositeTimestampPartitionKeyRT, + filter_records: rt.type({ + doc_count: rt.number, + maximum_record_score: metricAggregationRT, + top_hits_record: rt.type({ + hits: rt.type({ + hits: rt.array( + rt.type({ + _source: logRateMlRecordRT, + }) + ), + }), + }), + }), + filter_model_plot: rt.type({ + doc_count: rt.number, + average_actual: metricAggregationRT, + sum_actual: metricAggregationRT, + }), +}); + +export type LogRateModelPlotBucket = rt.TypeOf; + +export const logRateModelPlotResponseRT = rt.type({ + aggregations: rt.type({ + timestamp_partition_buckets: rt.intersection([ + rt.type({ + buckets: rt.array(logRateModelPlotBucketRT), + }), + rt.partial({ + after_key: compositeTimestampPartitionKeyRT, + }), + ]), + }), +}); diff --git a/x-pack/plugins/infra/server/lib/snapshot/constants.ts b/x-pack/plugins/infra/server/lib/snapshot/constants.ts new file mode 100644 index 0000000000000..0420878dbcf50 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/snapshot/constants.ts @@ -0,0 +1,9 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +// TODO: Make SNAPSHOT_COMPOSITE_REQUEST_SIZE configurable from kibana.yml + +export const SNAPSHOT_COMPOSITE_REQUEST_SIZE = 75; diff --git a/x-pack/plugins/infra/server/lib/snapshot/create_timerange_with_interval.ts b/x-pack/plugins/infra/server/lib/snapshot/create_timerange_with_interval.ts new file mode 100644 index 0000000000000..6c27e54a78bee --- /dev/null +++ b/x-pack/plugins/infra/server/lib/snapshot/create_timerange_with_interval.ts @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { uniq } from 'lodash'; +import { RequestHandlerContext } from 'kibana/server'; +import { InfraSnapshotRequestOptions } from './types'; +import { InfraTimerangeInput } from '../../../public/graphql/types'; +import { getMetricsAggregations } from './query_helpers'; +import { calculateMetricInterval } from '../../utils/calculate_metric_interval'; +import { SnapshotModel, SnapshotModelMetricAggRT } from '../../../common/inventory_models/types'; +import { KibanaFramework } from '../adapters/framework/kibana_framework_adapter'; + +export const createTimeRangeWithInterval = async ( + framework: KibanaFramework, + requestContext: RequestHandlerContext, + options: InfraSnapshotRequestOptions +): Promise => { + const aggregations = getMetricsAggregations(options); + const modules = aggregationsToModules(aggregations); + const interval = + (await calculateMetricInterval( + framework, + requestContext, + { + indexPattern: options.sourceConfiguration.metricAlias, + timestampField: options.sourceConfiguration.fields.timestamp, + timerange: { from: options.timerange.from, to: options.timerange.to }, + }, + modules, + options.nodeType + )) || 60000; + return { + interval: `${interval}s`, + from: options.timerange.to - interval * 5000, // We need at least 5 buckets worth of data + to: options.timerange.to, + }; +}; + +const aggregationsToModules = (aggregations: SnapshotModel): string[] => { + return uniq( + Object.values(aggregations) + .reduce((modules, agg) => { + if (SnapshotModelMetricAggRT.is(agg)) { + return modules.concat(Object.values(agg).map(a => a?.field)); + } + return modules; + }, [] as Array) + .filter(v => v) + .map(field => + field! + .split(/\./) + .slice(0, 2) + .join('.') + ) + ) as string[]; +}; diff --git a/x-pack/plugins/infra/server/lib/snapshot/index.ts b/x-pack/plugins/infra/server/lib/snapshot/index.ts new file mode 100644 index 0000000000000..8db54da803648 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/snapshot/index.ts @@ -0,0 +1,7 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export * from './snapshot'; diff --git a/x-pack/plugins/infra/server/lib/snapshot/query_helpers.ts b/x-pack/plugins/infra/server/lib/snapshot/query_helpers.ts new file mode 100644 index 0000000000000..44d32c7b915a8 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/snapshot/query_helpers.ts @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { i18n } from '@kbn/i18n'; +import { findInventoryModel, findInventoryFields } from '../../../common/inventory_models/index'; +import { InfraSnapshotRequestOptions } from './types'; +import { getIntervalInSeconds } from '../../utils/get_interval_in_seconds'; +import { SnapshotModelRT, SnapshotModel } from '../../../common/inventory_models/types'; + +interface GroupBySource { + [id: string]: { + terms: { + field: string | null | undefined; + missing_bucket?: boolean; + }; + }; +} + +export const getFieldByNodeType = (options: InfraSnapshotRequestOptions) => { + const inventoryFields = findInventoryFields(options.nodeType, options.sourceConfiguration.fields); + return inventoryFields.id; +}; + +export const getGroupedNodesSources = (options: InfraSnapshotRequestOptions) => { + const fields = findInventoryFields(options.nodeType, options.sourceConfiguration.fields); + const sources: GroupBySource[] = options.groupBy.map(gb => { + return { [`${gb.field}`]: { terms: { field: gb.field } } }; + }); + sources.push({ + id: { + terms: { field: fields.id }, + }, + }); + sources.push({ + name: { terms: { field: fields.name, missing_bucket: true } }, + }); + return sources; +}; + +export const getMetricsSources = (options: InfraSnapshotRequestOptions) => { + const fields = findInventoryFields(options.nodeType, options.sourceConfiguration.fields); + return [{ id: { terms: { field: fields.id } } }]; +}; + +export const getMetricsAggregations = (options: InfraSnapshotRequestOptions): SnapshotModel => { + const inventoryModel = findInventoryModel(options.nodeType); + const aggregation = inventoryModel.metrics.snapshot?.[options.metric.type]; + if (!SnapshotModelRT.is(aggregation)) { + throw new Error( + i18n.translate('xpack.infra.snapshot.missingSnapshotMetricError', { + defaultMessage: 'The aggregation for {metric} for {nodeType} is not available.', + values: { + nodeType: options.nodeType, + metric: options.metric.type, + }, + }) + ); + } + return aggregation; +}; + +export const getDateHistogramOffset = (options: InfraSnapshotRequestOptions): string => { + const { from, interval } = options.timerange; + const fromInSeconds = Math.floor(from / 1000); + const bucketSizeInSeconds = getIntervalInSeconds(interval); + + // negative offset to align buckets with full intervals (e.g. minutes) + const offset = (fromInSeconds % bucketSizeInSeconds) - bucketSizeInSeconds; + return `${offset}s`; +}; diff --git a/x-pack/plugins/infra/server/lib/snapshot/response_helpers.test.ts b/x-pack/plugins/infra/server/lib/snapshot/response_helpers.test.ts new file mode 100644 index 0000000000000..28146624a8a89 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/snapshot/response_helpers.test.ts @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { isIPv4, getIPFromBucket, InfraSnapshotNodeGroupByBucket } from './response_helpers'; +import { InfraNodeType } from '../../graphql/types'; + +describe('InfraOps ResponseHelpers', () => { + describe('isIPv4', () => { + it('should return true for IPv4', () => { + expect(isIPv4('192.168.2.4')).toBe(true); + }); + it('should return false for anything else', () => { + expect(isIPv4('0:0:0:0:0:0:0:1')).toBe(false); + }); + }); + + describe('getIPFromBucket', () => { + it('should return IPv4 address', () => { + const bucket: InfraSnapshotNodeGroupByBucket = { + key: { + id: 'example-01', + name: 'example-01', + }, + ip: { + hits: { + total: { value: 1 }, + hits: [ + { + _index: 'metricbeat-2019-01-01', + _type: '_doc', + _id: '29392939', + _score: null, + sort: [], + _source: { + host: { + ip: ['2001:db8:85a3::8a2e:370:7334', '192.168.1.4'], + }, + }, + }, + ], + }, + }, + }; + expect(getIPFromBucket(InfraNodeType.host, bucket)).toBe('192.168.1.4'); + }); + it('should NOT return ipv6 address', () => { + const bucket: InfraSnapshotNodeGroupByBucket = { + key: { + id: 'example-01', + name: 'example-01', + }, + ip: { + hits: { + total: { value: 1 }, + hits: [ + { + _index: 'metricbeat-2019-01-01', + _type: '_doc', + _id: '29392939', + _score: null, + sort: [], + _source: { + host: { + ip: ['2001:db8:85a3::8a2e:370:7334'], + }, + }, + }, + ], + }, + }, + }; + expect(getIPFromBucket(InfraNodeType.host, bucket)).toBe(null); + }); + }); +}); diff --git a/x-pack/plugins/infra/server/lib/snapshot/response_helpers.ts b/x-pack/plugins/infra/server/lib/snapshot/response_helpers.ts new file mode 100644 index 0000000000000..d22f41ff152f7 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/snapshot/response_helpers.ts @@ -0,0 +1,173 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { isNumber, last, max, sum, get } from 'lodash'; +import moment from 'moment'; + +import { + InfraSnapshotMetricType, + InfraSnapshotNodePath, + InfraSnapshotNodeMetric, + InfraNodeType, +} from '../../graphql/types'; +import { getIntervalInSeconds } from '../../utils/get_interval_in_seconds'; +import { InfraSnapshotRequestOptions } from './types'; +import { findInventoryModel } from '../../../common/inventory_models'; + +export interface InfraSnapshotNodeMetricsBucket { + key: { id: string }; + histogram: { + buckets: InfraSnapshotMetricsBucket[]; + }; +} + +// Jumping through TypeScript hoops here: +// We need an interface that has the known members 'key' and 'doc_count' and also +// an unknown number of members with unknown names but known format, containing the +// metrics. +// This union type is the only way I found to express this that TypeScript accepts. +export interface InfraSnapshotBucketWithKey { + key: string | number; + doc_count: number; +} + +export interface InfraSnapshotBucketWithValues { + [name: string]: { value: number; normalized_value?: number }; +} + +export type InfraSnapshotMetricsBucket = InfraSnapshotBucketWithKey & InfraSnapshotBucketWithValues; + +interface InfraSnapshotIpHit { + _index: string; + _type: string; + _id: string; + _score: number | null; + _source: { + host: { + ip: string[] | string; + }; + }; + sort: number[]; +} + +export interface InfraSnapshotNodeGroupByBucket { + key: { + id: string; + name: string; + [groupByField: string]: string; + }; + ip: { + hits: { + total: { value: number }; + hits: InfraSnapshotIpHit[]; + }; + }; +} + +export const isIPv4 = (subject: string) => /^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$/.test(subject); + +export const getIPFromBucket = ( + nodeType: InfraNodeType, + bucket: InfraSnapshotNodeGroupByBucket +): string | null => { + const inventoryModel = findInventoryModel(nodeType); + if (!inventoryModel.fields.ip) { + return null; + } + const ip = get(bucket, `ip.hits.hits[0]._source.${inventoryModel.fields.ip}`, null) as + | string[] + | null; + if (Array.isArray(ip)) { + return ip.find(isIPv4) || null; + } else if (typeof ip === 'string') { + return ip; + } + + return null; +}; + +export const getNodePath = ( + groupBucket: InfraSnapshotNodeGroupByBucket, + options: InfraSnapshotRequestOptions +): InfraSnapshotNodePath[] => { + const node = groupBucket.key; + const path = options.groupBy.map(gb => { + return { value: node[`${gb.field}`], label: node[`${gb.field}`] } as InfraSnapshotNodePath; + }); + const ip = getIPFromBucket(options.nodeType, groupBucket); + path.push({ value: node.id, label: node.name || node.id, ip }); + return path; +}; + +interface NodeMetricsForLookup { + [nodeId: string]: InfraSnapshotMetricsBucket[]; +} + +export const getNodeMetricsForLookup = ( + metrics: InfraSnapshotNodeMetricsBucket[] +): NodeMetricsForLookup => { + return metrics.reduce((acc: NodeMetricsForLookup, metric) => { + acc[`${metric.key.id}`] = metric.histogram.buckets; + return acc; + }, {}); +}; + +// In the returned object, +// value contains the value from the last bucket spanning a full interval +// max and avg are calculated from all buckets returned for the timerange +export const getNodeMetrics = ( + nodeBuckets: InfraSnapshotMetricsBucket[], + options: InfraSnapshotRequestOptions +): InfraSnapshotNodeMetric => { + if (!nodeBuckets) { + return { + name: options.metric.type, + value: null, + max: null, + avg: null, + }; + } + const lastBucket = findLastFullBucket(nodeBuckets, options); + const result = { + name: options.metric.type, + value: getMetricValueFromBucket(options.metric.type, lastBucket), + max: calculateMax(nodeBuckets, options.metric.type), + avg: calculateAvg(nodeBuckets, options.metric.type), + }; + return result; +}; + +const findLastFullBucket = ( + buckets: InfraSnapshotMetricsBucket[], + options: InfraSnapshotRequestOptions +) => { + const to = moment.utc(options.timerange.to); + const bucketSize = getIntervalInSeconds(options.timerange.interval); + return buckets.reduce((current, item) => { + const itemKey = isNumber(item.key) ? item.key : parseInt(item.key, 10); + const date = moment.utc(itemKey + bucketSize * 1000); + if (!date.isAfter(to) && item.doc_count > 0) { + return item; + } + return current; + }, last(buckets)); +}; + +const getMetricValueFromBucket = ( + type: InfraSnapshotMetricType, + bucket: InfraSnapshotMetricsBucket +) => { + const metric = bucket[type]; + return (metric && (metric.normalized_value || metric.value)) || 0; +}; + +function calculateMax(buckets: InfraSnapshotMetricsBucket[], type: InfraSnapshotMetricType) { + return max(buckets.map(bucket => getMetricValueFromBucket(type, bucket))) || 0; +} + +function calculateAvg(buckets: InfraSnapshotMetricsBucket[], type: InfraSnapshotMetricType) { + return sum(buckets.map(bucket => getMetricValueFromBucket(type, bucket))) / buckets.length || 0; +} diff --git a/x-pack/plugins/infra/server/lib/snapshot/snapshot.ts b/x-pack/plugins/infra/server/lib/snapshot/snapshot.ts new file mode 100644 index 0000000000000..1a724673608a2 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/snapshot/snapshot.ts @@ -0,0 +1,237 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { RequestHandlerContext } from 'src/core/server'; +import { InfraSnapshotNode } from '../../graphql/types'; +import { InfraDatabaseSearchResponse } from '../adapters/framework'; +import { KibanaFramework } from '../adapters/framework/kibana_framework_adapter'; +import { InfraSources } from '../sources'; + +import { JsonObject } from '../../../common/typed_json'; +import { SNAPSHOT_COMPOSITE_REQUEST_SIZE } from './constants'; +import { + getGroupedNodesSources, + getMetricsAggregations, + getMetricsSources, + getDateHistogramOffset, +} from './query_helpers'; +import { + getNodeMetrics, + getNodeMetricsForLookup, + getNodePath, + InfraSnapshotNodeGroupByBucket, + InfraSnapshotNodeMetricsBucket, +} from './response_helpers'; +import { getAllCompositeData } from '../../utils/get_all_composite_data'; +import { createAfterKeyHandler } from '../../utils/create_afterkey_handler'; +import { findInventoryModel } from '../../../common/inventory_models'; +import { InfraSnapshotRequestOptions } from './types'; +import { createTimeRangeWithInterval } from './create_timerange_with_interval'; + +export class InfraSnapshot { + constructor(private readonly libs: { sources: InfraSources; framework: KibanaFramework }) {} + + public async getNodes( + requestContext: RequestHandlerContext, + options: InfraSnapshotRequestOptions + ): Promise<{ nodes: InfraSnapshotNode[]; interval: string }> { + // Both requestGroupedNodes and requestNodeMetrics may send several requests to elasticsearch + // in order to page through the results of their respective composite aggregations. + // Both chains of requests are supposed to run in parallel, and their results be merged + // when they have both been completed. + const timeRangeWithIntervalApplied = await createTimeRangeWithInterval( + this.libs.framework, + requestContext, + options + ); + const optionsWithTimerange = { ...options, timerange: timeRangeWithIntervalApplied }; + const groupedNodesPromise = requestGroupedNodes( + requestContext, + optionsWithTimerange, + this.libs.framework + ); + const nodeMetricsPromise = requestNodeMetrics( + requestContext, + optionsWithTimerange, + this.libs.framework + ); + + const groupedNodeBuckets = await groupedNodesPromise; + const nodeMetricBuckets = await nodeMetricsPromise; + return { + nodes: mergeNodeBuckets(groupedNodeBuckets, nodeMetricBuckets, options), + interval: timeRangeWithIntervalApplied.interval, + }; + } +} + +const bucketSelector = ( + response: InfraDatabaseSearchResponse<{}, InfraSnapshotAggregationResponse> +) => (response.aggregations && response.aggregations.nodes.buckets) || []; + +const handleAfterKey = createAfterKeyHandler( + 'body.aggregations.nodes.composite.after', + input => input?.aggregations?.nodes?.after_key +); + +const requestGroupedNodes = async ( + requestContext: RequestHandlerContext, + options: InfraSnapshotRequestOptions, + framework: KibanaFramework +): Promise => { + const inventoryModel = findInventoryModel(options.nodeType); + const query = { + allowNoIndices: true, + index: `${options.sourceConfiguration.logAlias},${options.sourceConfiguration.metricAlias}`, + ignoreUnavailable: true, + body: { + query: { + bool: { + filter: buildFilters(options), + }, + }, + size: 0, + aggregations: { + nodes: { + composite: { + size: SNAPSHOT_COMPOSITE_REQUEST_SIZE, + sources: getGroupedNodesSources(options), + }, + aggs: { + ip: { + top_hits: { + sort: [{ [options.sourceConfiguration.fields.timestamp]: { order: 'desc' } }], + _source: { + includes: inventoryModel.fields.ip ? [inventoryModel.fields.ip] : [], + }, + size: 1, + }, + }, + }, + }, + }, + }, + }; + + return await getAllCompositeData< + InfraSnapshotAggregationResponse, + InfraSnapshotNodeGroupByBucket + >(framework, requestContext, query, bucketSelector, handleAfterKey); +}; + +const requestNodeMetrics = async ( + requestContext: RequestHandlerContext, + options: InfraSnapshotRequestOptions, + framework: KibanaFramework +): Promise => { + const index = + options.metric.type === 'logRate' + ? `${options.sourceConfiguration.logAlias}` + : `${options.sourceConfiguration.metricAlias}`; + + const query = { + allowNoIndices: true, + index, + ignoreUnavailable: true, + body: { + query: { + bool: { + filter: buildFilters(options, false), + }, + }, + size: 0, + aggregations: { + nodes: { + composite: { + size: SNAPSHOT_COMPOSITE_REQUEST_SIZE, + sources: getMetricsSources(options), + }, + aggregations: { + histogram: { + date_histogram: { + field: options.sourceConfiguration.fields.timestamp, + interval: options.timerange.interval || '1m', + offset: getDateHistogramOffset(options), + extended_bounds: { + min: options.timerange.from, + max: options.timerange.to, + }, + }, + aggregations: getMetricsAggregations(options), + }, + }, + }, + }, + }, + }; + return await getAllCompositeData< + InfraSnapshotAggregationResponse, + InfraSnapshotNodeMetricsBucket + >(framework, requestContext, query, bucketSelector, handleAfterKey); +}; + +// buckets can be InfraSnapshotNodeGroupByBucket[] or InfraSnapshotNodeMetricsBucket[] +// but typing this in a way that makes TypeScript happy is unreadable (if possible at all) +interface InfraSnapshotAggregationResponse { + nodes: { + buckets: any[]; + after_key: { [id: string]: string }; + }; +} + +const mergeNodeBuckets = ( + nodeGroupByBuckets: InfraSnapshotNodeGroupByBucket[], + nodeMetricsBuckets: InfraSnapshotNodeMetricsBucket[], + options: InfraSnapshotRequestOptions +): InfraSnapshotNode[] => { + const nodeMetricsForLookup = getNodeMetricsForLookup(nodeMetricsBuckets); + + return nodeGroupByBuckets.map(node => { + return { + path: getNodePath(node, options), + metric: getNodeMetrics(nodeMetricsForLookup[node.key.id], options), + }; + }); +}; + +const createQueryFilterClauses = (filterQuery: JsonObject | undefined) => + filterQuery ? [filterQuery] : []; + +const buildFilters = (options: InfraSnapshotRequestOptions, withQuery = true) => { + let filters: any = [ + { + range: { + [options.sourceConfiguration.fields.timestamp]: { + gte: options.timerange.from, + lte: options.timerange.to, + format: 'epoch_millis', + }, + }, + }, + ]; + + if (withQuery) { + filters = [...createQueryFilterClauses(options.filterQuery), ...filters]; + } + + if (options.accountId) { + filters.push({ + term: { + 'cloud.account.id': options.accountId, + }, + }); + } + + if (options.region) { + filters.push({ + term: { + 'cloud.region': options.region, + }, + }); + } + + return filters; +}; diff --git a/x-pack/plugins/infra/server/lib/snapshot/types.ts b/x-pack/plugins/infra/server/lib/snapshot/types.ts new file mode 100644 index 0000000000000..31823b2811121 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/snapshot/types.ts @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { JsonObject } from '../../../common/typed_json'; +import { + InfraNodeType, + InfraSourceConfiguration, + InfraTimerangeInput, + InfraSnapshotGroupbyInput, + InfraSnapshotMetricInput, +} from '../../../public/graphql/types'; + +export interface InfraSnapshotRequestOptions { + nodeType: InfraNodeType; + sourceConfiguration: InfraSourceConfiguration; + timerange: InfraTimerangeInput; + groupBy: InfraSnapshotGroupbyInput[]; + metric: InfraSnapshotMetricInput; + filterQuery: JsonObject | undefined; + accountId?: string; + region?: string; +} diff --git a/x-pack/plugins/infra/server/lib/source_status.ts b/x-pack/plugins/infra/server/lib/source_status.ts new file mode 100644 index 0000000000000..1f0845b6b223f --- /dev/null +++ b/x-pack/plugins/infra/server/lib/source_status.ts @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { RequestHandlerContext } from 'src/core/server'; +import { InfraSources } from './sources'; + +export class InfraSourceStatus { + constructor( + private readonly adapter: InfraSourceStatusAdapter, + private readonly libs: { sources: InfraSources } + ) {} + + public async getLogIndexNames( + requestContext: RequestHandlerContext, + sourceId: string + ): Promise { + const sourceConfiguration = await this.libs.sources.getSourceConfiguration( + requestContext, + sourceId + ); + const indexNames = await this.adapter.getIndexNames( + requestContext, + sourceConfiguration.configuration.logAlias + ); + return indexNames; + } + public async getMetricIndexNames( + requestContext: RequestHandlerContext, + sourceId: string + ): Promise { + const sourceConfiguration = await this.libs.sources.getSourceConfiguration( + requestContext, + sourceId + ); + const indexNames = await this.adapter.getIndexNames( + requestContext, + sourceConfiguration.configuration.metricAlias + ); + return indexNames; + } + public async hasLogAlias( + requestContext: RequestHandlerContext, + sourceId: string + ): Promise { + const sourceConfiguration = await this.libs.sources.getSourceConfiguration( + requestContext, + sourceId + ); + const hasAlias = await this.adapter.hasAlias( + requestContext, + sourceConfiguration.configuration.logAlias + ); + return hasAlias; + } + public async hasMetricAlias( + requestContext: RequestHandlerContext, + sourceId: string + ): Promise { + const sourceConfiguration = await this.libs.sources.getSourceConfiguration( + requestContext, + sourceId + ); + const hasAlias = await this.adapter.hasAlias( + requestContext, + sourceConfiguration.configuration.metricAlias + ); + return hasAlias; + } + public async hasLogIndices( + requestContext: RequestHandlerContext, + sourceId: string + ): Promise { + const sourceConfiguration = await this.libs.sources.getSourceConfiguration( + requestContext, + sourceId + ); + const hasIndices = await this.adapter.hasIndices( + requestContext, + sourceConfiguration.configuration.logAlias + ); + return hasIndices; + } + public async hasMetricIndices( + requestContext: RequestHandlerContext, + sourceId: string + ): Promise { + const sourceConfiguration = await this.libs.sources.getSourceConfiguration( + requestContext, + sourceId + ); + const hasIndices = await this.adapter.hasIndices( + requestContext, + sourceConfiguration.configuration.metricAlias + ); + return hasIndices; + } +} + +export interface InfraSourceStatusAdapter { + getIndexNames(requestContext: RequestHandlerContext, aliasName: string): Promise; + hasAlias(requestContext: RequestHandlerContext, aliasName: string): Promise; + hasIndices(requestContext: RequestHandlerContext, indexNames: string): Promise; +} diff --git a/x-pack/plugins/infra/server/lib/sources/defaults.ts b/x-pack/plugins/infra/server/lib/sources/defaults.ts new file mode 100644 index 0000000000000..b9ead0d169ee6 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/sources/defaults.ts @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { InfraSourceConfiguration } from './types'; + +export const defaultSourceConfiguration: InfraSourceConfiguration = { + name: 'Default', + description: '', + metricAlias: 'metricbeat-*', + logAlias: 'filebeat-*,kibana_sample_data_logs*', + fields: { + container: 'container.id', + host: 'host.name', + message: ['message', '@message'], + pod: 'kubernetes.pod.uid', + tiebreaker: '_doc', + timestamp: '@timestamp', + }, + logColumns: [ + { + timestampColumn: { + id: '5e7f964a-be8a-40d8-88d2-fbcfbdca0e2f', + }, + }, + { + fieldColumn: { + id: ' eb9777a8-fcd3-420e-ba7d-172fff6da7a2', + field: 'event.dataset', + }, + }, + { + messageColumn: { + id: 'b645d6da-824b-4723-9a2a-e8cece1645c0', + }, + }, + ], +}; diff --git a/x-pack/plugins/infra/server/lib/sources/errors.ts b/x-pack/plugins/infra/server/lib/sources/errors.ts new file mode 100644 index 0000000000000..9f835f21443c6 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/sources/errors.ts @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export class NotFoundError extends Error { + constructor(message?: string) { + super(message); + Object.setPrototypeOf(this, new.target.prototype); + } +} diff --git a/x-pack/plugins/infra/server/lib/sources/index.ts b/x-pack/plugins/infra/server/lib/sources/index.ts new file mode 100644 index 0000000000000..6837f953ea18a --- /dev/null +++ b/x-pack/plugins/infra/server/lib/sources/index.ts @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export * from './defaults'; +export * from './saved_object_mappings'; +export * from './sources'; +export * from './types'; diff --git a/x-pack/plugins/infra/server/lib/sources/saved_object_mappings.ts b/x-pack/plugins/infra/server/lib/sources/saved_object_mappings.ts new file mode 100644 index 0000000000000..973a790eeedaf --- /dev/null +++ b/x-pack/plugins/infra/server/lib/sources/saved_object_mappings.ts @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { ElasticsearchMappingOf } from '../../utils/typed_elasticsearch_mappings'; +import { InfraSavedSourceConfiguration } from './types'; + +export const infraSourceConfigurationSavedObjectType = 'infrastructure-ui-source'; + +export const infraSourceConfigurationSavedObjectMappings: { + [infraSourceConfigurationSavedObjectType]: ElasticsearchMappingOf; +} = { + [infraSourceConfigurationSavedObjectType]: { + properties: { + name: { + type: 'text', + }, + description: { + type: 'text', + }, + metricAlias: { + type: 'keyword', + }, + logAlias: { + type: 'keyword', + }, + fields: { + properties: { + container: { + type: 'keyword', + }, + host: { + type: 'keyword', + }, + pod: { + type: 'keyword', + }, + tiebreaker: { + type: 'keyword', + }, + timestamp: { + type: 'keyword', + }, + }, + }, + logColumns: { + type: 'nested', + properties: { + timestampColumn: { + properties: { + id: { + type: 'keyword', + }, + }, + }, + messageColumn: { + properties: { + id: { + type: 'keyword', + }, + }, + }, + fieldColumn: { + properties: { + id: { + type: 'keyword', + }, + field: { + type: 'keyword', + }, + }, + }, + }, + }, + }, + }, +}; diff --git a/x-pack/plugins/infra/server/lib/sources/sources.test.ts b/x-pack/plugins/infra/server/lib/sources/sources.test.ts new file mode 100644 index 0000000000000..4a83ca730ff83 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/sources/sources.test.ts @@ -0,0 +1,153 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import { InfraSources } from './sources'; + +describe('the InfraSources lib', () => { + describe('getSourceConfiguration method', () => { + test('returns a source configuration if it exists', async () => { + const sourcesLib = new InfraSources({ + config: createMockStaticConfiguration({}), + }); + + const request: any = createRequestContext({ + id: 'TEST_ID', + version: 'foo', + updated_at: '2000-01-01T00:00:00.000Z', + attributes: { + metricAlias: 'METRIC_ALIAS', + logAlias: 'LOG_ALIAS', + fields: { + container: 'CONTAINER', + host: 'HOST', + pod: 'POD', + tiebreaker: 'TIEBREAKER', + timestamp: 'TIMESTAMP', + }, + }, + }); + + expect(await sourcesLib.getSourceConfiguration(request, 'TEST_ID')).toMatchObject({ + id: 'TEST_ID', + version: 'foo', + updatedAt: 946684800000, + configuration: { + metricAlias: 'METRIC_ALIAS', + logAlias: 'LOG_ALIAS', + fields: { + container: 'CONTAINER', + host: 'HOST', + pod: 'POD', + tiebreaker: 'TIEBREAKER', + timestamp: 'TIMESTAMP', + }, + }, + }); + }); + + test('adds missing attributes from the static configuration to a source configuration', async () => { + const sourcesLib = new InfraSources({ + config: createMockStaticConfiguration({ + default: { + metricAlias: 'METRIC_ALIAS', + logAlias: 'LOG_ALIAS', + fields: { + host: 'HOST', + pod: 'POD', + tiebreaker: 'TIEBREAKER', + timestamp: 'TIMESTAMP', + }, + }, + }), + }); + + const request: any = createRequestContext({ + id: 'TEST_ID', + version: 'foo', + updated_at: '2000-01-01T00:00:00.000Z', + attributes: { + fields: { + container: 'CONTAINER', + }, + }, + }); + + expect(await sourcesLib.getSourceConfiguration(request, 'TEST_ID')).toMatchObject({ + id: 'TEST_ID', + version: 'foo', + updatedAt: 946684800000, + configuration: { + metricAlias: 'METRIC_ALIAS', + logAlias: 'LOG_ALIAS', + fields: { + container: 'CONTAINER', + host: 'HOST', + pod: 'POD', + tiebreaker: 'TIEBREAKER', + timestamp: 'TIMESTAMP', + }, + }, + }); + }); + + test('adds missing attributes from the default configuration to a source configuration', async () => { + const sourcesLib = new InfraSources({ + config: createMockStaticConfiguration({}), + }); + + const request: any = createRequestContext({ + id: 'TEST_ID', + version: 'foo', + updated_at: '2000-01-01T00:00:00.000Z', + attributes: {}, + }); + + expect(await sourcesLib.getSourceConfiguration(request, 'TEST_ID')).toMatchObject({ + id: 'TEST_ID', + version: 'foo', + updatedAt: 946684800000, + configuration: { + metricAlias: expect.any(String), + logAlias: expect.any(String), + fields: { + container: expect.any(String), + host: expect.any(String), + pod: expect.any(String), + tiebreaker: expect.any(String), + timestamp: expect.any(String), + }, + }, + }); + }); + }); +}); + +const createMockStaticConfiguration = (sources: any) => ({ + enabled: true, + query: { + partitionSize: 1, + partitionFactor: 1, + }, + sources, +}); + +const createRequestContext = (savedObject?: any) => { + return { + core: { + savedObjects: { + client: { + async get() { + return savedObject; + }, + errors: { + isNotFoundError() { + return typeof savedObject === 'undefined'; + }, + }, + }, + }, + }, + }; +}; diff --git a/x-pack/plugins/infra/server/lib/sources/sources.ts b/x-pack/plugins/infra/server/lib/sources/sources.ts new file mode 100644 index 0000000000000..2b38d81e4a8d5 --- /dev/null +++ b/x-pack/plugins/infra/server/lib/sources/sources.ts @@ -0,0 +1,247 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import * as runtimeTypes from 'io-ts'; +import { failure } from 'io-ts/lib/PathReporter'; +import { identity, constant } from 'fp-ts/lib/function'; +import { pipe } from 'fp-ts/lib/pipeable'; +import { map, fold } from 'fp-ts/lib/Either'; +import { RequestHandlerContext } from 'src/core/server'; +import { defaultSourceConfiguration } from './defaults'; +import { NotFoundError } from './errors'; +import { infraSourceConfigurationSavedObjectType } from './saved_object_mappings'; +import { + InfraSavedSourceConfiguration, + InfraSourceConfiguration, + InfraStaticSourceConfiguration, + pickSavedSourceConfiguration, + SourceConfigurationSavedObjectRuntimeType, + StaticSourceConfigurationRuntimeType, +} from './types'; +import { InfraConfig } from '../../../../../../plugins/infra/server'; + +interface Libs { + config: InfraConfig; +} + +export class InfraSources { + private internalSourceConfigurations: Map = new Map(); + private readonly libs: Libs; + + constructor(libs: Libs) { + this.libs = libs; + } + + public async getSourceConfiguration(requestContext: RequestHandlerContext, sourceId: string) { + const staticDefaultSourceConfiguration = await this.getStaticDefaultSourceConfiguration(); + + const savedSourceConfiguration = await this.getInternalSourceConfiguration(sourceId) + .then(internalSourceConfiguration => ({ + id: sourceId, + version: undefined, + updatedAt: undefined, + origin: 'internal' as 'internal', + configuration: mergeSourceConfiguration( + staticDefaultSourceConfiguration, + internalSourceConfiguration + ), + })) + .catch(err => + err instanceof NotFoundError + ? this.getSavedSourceConfiguration(requestContext, sourceId).then(result => ({ + ...result, + configuration: mergeSourceConfiguration( + staticDefaultSourceConfiguration, + result.configuration + ), + })) + : Promise.reject(err) + ) + .catch(err => + requestContext.core.savedObjects.client.errors.isNotFoundError(err) + ? Promise.resolve({ + id: sourceId, + version: undefined, + updatedAt: undefined, + origin: 'fallback' as 'fallback', + configuration: staticDefaultSourceConfiguration, + }) + : Promise.reject(err) + ); + + return savedSourceConfiguration; + } + + public async getAllSourceConfigurations(requestContext: RequestHandlerContext) { + const staticDefaultSourceConfiguration = await this.getStaticDefaultSourceConfiguration(); + + const savedSourceConfigurations = await this.getAllSavedSourceConfigurations(requestContext); + + return savedSourceConfigurations.map(savedSourceConfiguration => ({ + ...savedSourceConfiguration, + configuration: mergeSourceConfiguration( + staticDefaultSourceConfiguration, + savedSourceConfiguration.configuration + ), + })); + } + + public async createSourceConfiguration( + requestContext: RequestHandlerContext, + sourceId: string, + source: InfraSavedSourceConfiguration + ) { + const staticDefaultSourceConfiguration = await this.getStaticDefaultSourceConfiguration(); + + const newSourceConfiguration = mergeSourceConfiguration( + staticDefaultSourceConfiguration, + source + ); + + const createdSourceConfiguration = convertSavedObjectToSavedSourceConfiguration( + await requestContext.core.savedObjects.client.create( + infraSourceConfigurationSavedObjectType, + pickSavedSourceConfiguration(newSourceConfiguration) as any, + { id: sourceId } + ) + ); + + return { + ...createdSourceConfiguration, + configuration: mergeSourceConfiguration( + staticDefaultSourceConfiguration, + createdSourceConfiguration.configuration + ), + }; + } + + public async deleteSourceConfiguration(requestContext: RequestHandlerContext, sourceId: string) { + await requestContext.core.savedObjects.client.delete( + infraSourceConfigurationSavedObjectType, + sourceId + ); + } + + public async updateSourceConfiguration( + requestContext: RequestHandlerContext, + sourceId: string, + sourceProperties: InfraSavedSourceConfiguration + ) { + const staticDefaultSourceConfiguration = await this.getStaticDefaultSourceConfiguration(); + + const { configuration, version } = await this.getSourceConfiguration(requestContext, sourceId); + + const updatedSourceConfigurationAttributes = mergeSourceConfiguration( + configuration, + sourceProperties + ); + + const updatedSourceConfiguration = convertSavedObjectToSavedSourceConfiguration( + await requestContext.core.savedObjects.client.update( + infraSourceConfigurationSavedObjectType, + sourceId, + pickSavedSourceConfiguration(updatedSourceConfigurationAttributes) as any, + { + version, + } + ) + ); + + return { + ...updatedSourceConfiguration, + configuration: mergeSourceConfiguration( + staticDefaultSourceConfiguration, + updatedSourceConfiguration.configuration + ), + }; + } + + public async defineInternalSourceConfiguration( + sourceId: string, + sourceProperties: InfraStaticSourceConfiguration + ) { + this.internalSourceConfigurations.set(sourceId, sourceProperties); + } + + public async getInternalSourceConfiguration(sourceId: string) { + const internalSourceConfiguration = this.internalSourceConfigurations.get(sourceId); + + if (!internalSourceConfiguration) { + throw new NotFoundError( + `Failed to load internal source configuration: no configuration "${sourceId}" found.` + ); + } + + return internalSourceConfiguration; + } + + private async getStaticDefaultSourceConfiguration() { + const staticSourceConfiguration = pipe( + runtimeTypes + .type({ + sources: runtimeTypes.type({ + default: StaticSourceConfigurationRuntimeType, + }), + }) + .decode(this.libs.config), + map(({ sources: { default: defaultConfiguration } }) => defaultConfiguration), + fold(constant({}), identity) + ); + + return mergeSourceConfiguration(defaultSourceConfiguration, staticSourceConfiguration); + } + + private async getSavedSourceConfiguration( + requestContext: RequestHandlerContext, + sourceId: string + ) { + const savedObject = await requestContext.core.savedObjects.client.get( + infraSourceConfigurationSavedObjectType, + sourceId + ); + + return convertSavedObjectToSavedSourceConfiguration(savedObject); + } + + private async getAllSavedSourceConfigurations(requestContext: RequestHandlerContext) { + const savedObjects = await requestContext.core.savedObjects.client.find({ + type: infraSourceConfigurationSavedObjectType, + }); + + return savedObjects.saved_objects.map(convertSavedObjectToSavedSourceConfiguration); + } +} + +const mergeSourceConfiguration = ( + first: InfraSourceConfiguration, + ...others: InfraStaticSourceConfiguration[] +) => + others.reduce( + (previousSourceConfiguration, currentSourceConfiguration) => ({ + ...previousSourceConfiguration, + ...currentSourceConfiguration, + fields: { + ...previousSourceConfiguration.fields, + ...currentSourceConfiguration.fields, + }, + }), + first + ); + +const convertSavedObjectToSavedSourceConfiguration = (savedObject: unknown) => + pipe( + SourceConfigurationSavedObjectRuntimeType.decode(savedObject), + map(savedSourceConfiguration => ({ + id: savedSourceConfiguration.id, + version: savedSourceConfiguration.version, + updatedAt: savedSourceConfiguration.updated_at, + origin: 'stored' as 'stored', + configuration: savedSourceConfiguration.attributes, + })), + fold(errors => { + throw new Error(failure(errors).join('\n')); + }, identity) + ); diff --git a/x-pack/plugins/infra/server/lib/sources/types.ts b/x-pack/plugins/infra/server/lib/sources/types.ts new file mode 100644 index 0000000000000..1f850635cf35a --- /dev/null +++ b/x-pack/plugins/infra/server/lib/sources/types.ts @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/* eslint-disable @typescript-eslint/no-empty-interface */ + +import * as runtimeTypes from 'io-ts'; +import moment from 'moment'; +import { pipe } from 'fp-ts/lib/pipeable'; +import { chain } from 'fp-ts/lib/Either'; + +export const TimestampFromString = new runtimeTypes.Type( + 'TimestampFromString', + (input): input is number => typeof input === 'number', + (input, context) => + pipe( + runtimeTypes.string.validate(input, context), + chain(stringInput => { + const momentValue = moment(stringInput); + return momentValue.isValid() + ? runtimeTypes.success(momentValue.valueOf()) + : runtimeTypes.failure(stringInput, context); + }) + ), + output => new Date(output).toISOString() +); + +/** + * Stored source configuration as read from and written to saved objects + */ + +const SavedSourceConfigurationFieldsRuntimeType = runtimeTypes.partial({ + container: runtimeTypes.string, + host: runtimeTypes.string, + pod: runtimeTypes.string, + tiebreaker: runtimeTypes.string, + timestamp: runtimeTypes.string, +}); + +export const SavedSourceConfigurationTimestampColumnRuntimeType = runtimeTypes.type({ + timestampColumn: runtimeTypes.type({ + id: runtimeTypes.string, + }), +}); + +export const SavedSourceConfigurationMessageColumnRuntimeType = runtimeTypes.type({ + messageColumn: runtimeTypes.type({ + id: runtimeTypes.string, + }), +}); + +export const SavedSourceConfigurationFieldColumnRuntimeType = runtimeTypes.type({ + fieldColumn: runtimeTypes.type({ + id: runtimeTypes.string, + field: runtimeTypes.string, + }), +}); + +export const SavedSourceConfigurationColumnRuntimeType = runtimeTypes.union([ + SavedSourceConfigurationTimestampColumnRuntimeType, + SavedSourceConfigurationMessageColumnRuntimeType, + SavedSourceConfigurationFieldColumnRuntimeType, +]); + +export const SavedSourceConfigurationRuntimeType = runtimeTypes.partial({ + name: runtimeTypes.string, + description: runtimeTypes.string, + metricAlias: runtimeTypes.string, + logAlias: runtimeTypes.string, + fields: SavedSourceConfigurationFieldsRuntimeType, + logColumns: runtimeTypes.array(SavedSourceConfigurationColumnRuntimeType), +}); + +export interface InfraSavedSourceConfiguration + extends runtimeTypes.TypeOf {} + +export const pickSavedSourceConfiguration = ( + value: InfraSourceConfiguration +): InfraSavedSourceConfiguration => { + const { name, description, metricAlias, logAlias, fields, logColumns } = value; + const { container, host, pod, tiebreaker, timestamp } = fields; + + return { + name, + description, + metricAlias, + logAlias, + fields: { container, host, pod, tiebreaker, timestamp }, + logColumns, + }; +}; + +/** + * Static source configuration as read from the configuration file + */ + +const StaticSourceConfigurationFieldsRuntimeType = runtimeTypes.partial({ + ...SavedSourceConfigurationFieldsRuntimeType.props, + message: runtimeTypes.array(runtimeTypes.string), +}); + +export const StaticSourceConfigurationRuntimeType = runtimeTypes.partial({ + name: runtimeTypes.string, + description: runtimeTypes.string, + metricAlias: runtimeTypes.string, + logAlias: runtimeTypes.string, + fields: StaticSourceConfigurationFieldsRuntimeType, + logColumns: runtimeTypes.array(SavedSourceConfigurationColumnRuntimeType), +}); + +export interface InfraStaticSourceConfiguration + extends runtimeTypes.TypeOf {} + +/** + * Full source configuration type after all cleanup has been done at the edges + */ + +const SourceConfigurationFieldsRuntimeType = runtimeTypes.type({ + ...StaticSourceConfigurationFieldsRuntimeType.props, +}); + +export const SourceConfigurationRuntimeType = runtimeTypes.type({ + ...SavedSourceConfigurationRuntimeType.props, + fields: SourceConfigurationFieldsRuntimeType, + logColumns: runtimeTypes.array(SavedSourceConfigurationColumnRuntimeType), +}); + +export interface InfraSourceConfiguration + extends runtimeTypes.TypeOf {} + +/** + * Saved object type with metadata + */ + +export const SourceConfigurationSavedObjectRuntimeType = runtimeTypes.intersection([ + runtimeTypes.type({ + id: runtimeTypes.string, + attributes: SavedSourceConfigurationRuntimeType, + }), + runtimeTypes.partial({ + version: runtimeTypes.string, + updated_at: TimestampFromString, + }), +]); + +export interface SourceConfigurationSavedObject + extends runtimeTypes.TypeOf {} diff --git a/x-pack/plugins/infra/server/new_platform_index.ts b/x-pack/plugins/infra/server/new_platform_index.ts new file mode 100644 index 0000000000000..e59897a6b241d --- /dev/null +++ b/x-pack/plugins/infra/server/new_platform_index.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { PluginInitializerContext } from 'src/core/server'; +import { InfraServerPlugin, InfraPluginSetup } from './new_platform_plugin'; +import { config, InfraConfig } from '../../../../plugins/infra/server'; +import { InfraServerPluginDeps } from './lib/adapters/framework'; +export { config, InfraConfig, InfraServerPluginDeps, InfraPluginSetup }; + +export function plugin(context: PluginInitializerContext) { + return new InfraServerPlugin(context); +} diff --git a/x-pack/plugins/infra/server/new_platform_plugin.ts b/x-pack/plugins/infra/server/new_platform_plugin.ts new file mode 100644 index 0000000000000..147729a1d0b3e --- /dev/null +++ b/x-pack/plugins/infra/server/new_platform_plugin.ts @@ -0,0 +1,135 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import { CoreSetup, PluginInitializerContext } from 'src/core/server'; +import { i18n } from '@kbn/i18n'; +import { Server } from 'hapi'; +import { InfraConfig } from '../../../../plugins/infra/server'; +import { initInfraServer } from './infra_server'; +import { InfraBackendLibs, InfraDomainLibs } from './lib/infra_types'; +import { FrameworkFieldsAdapter } from './lib/adapters/fields/framework_fields_adapter'; +import { KibanaFramework } from './lib/adapters/framework/kibana_framework_adapter'; +import { InfraKibanaLogEntriesAdapter } from './lib/adapters/log_entries/kibana_log_entries_adapter'; +import { KibanaMetricsAdapter } from './lib/adapters/metrics/kibana_metrics_adapter'; +import { InfraElasticsearchSourceStatusAdapter } from './lib/adapters/source_status'; +import { InfraFieldsDomain } from './lib/domains/fields_domain'; +import { InfraLogEntriesDomain } from './lib/domains/log_entries_domain'; +import { InfraMetricsDomain } from './lib/domains/metrics_domain'; +import { InfraLogAnalysis } from './lib/log_analysis'; +import { InfraSnapshot } from './lib/snapshot'; +import { InfraSourceStatus } from './lib/source_status'; +import { InfraSources } from './lib/sources'; +import { InfraServerPluginDeps } from './lib/adapters/framework'; +import { METRICS_FEATURE, LOGS_FEATURE } from './features'; +import { UsageCollector } from './usage/usage_collector'; +import { APP_ID } from '../index'; +import { InfraStaticSourceConfiguration } from './lib/sources/types'; + +export interface KbnServer extends Server { + usage: any; +} + +const logsSampleDataLinkLabel = i18n.translate('xpack.infra.sampleDataLinkLabel', { + defaultMessage: 'Logs', +}); + +export interface InfraPluginSetup { + defineInternalSourceConfiguration: ( + sourceId: string, + sourceProperties: InfraStaticSourceConfiguration + ) => void; +} + +const DEFAULT_CONFIG: InfraConfig = { + enabled: true, + query: { + partitionSize: 75, + partitionFactor: 1.2, + }, +}; + +export class InfraServerPlugin { + public config: InfraConfig = DEFAULT_CONFIG; + public libs: InfraBackendLibs | undefined; + + constructor(context: PluginInitializerContext) { + const config$ = context.config.create(); + config$.subscribe(configValue => { + this.config = { + ...DEFAULT_CONFIG, + enabled: configValue.enabled, + query: { + ...DEFAULT_CONFIG.query, + ...configValue.query, + }, + }; + }); + } + + getLibs() { + if (!this.libs) { + throw new Error('libs not set up yet'); + } + return this.libs; + } + + setup(core: CoreSetup, plugins: InfraServerPluginDeps) { + const framework = new KibanaFramework(core, this.config, plugins); + const sources = new InfraSources({ + config: this.config, + }); + const sourceStatus = new InfraSourceStatus( + new InfraElasticsearchSourceStatusAdapter(framework), + { + sources, + } + ); + const snapshot = new InfraSnapshot({ sources, framework }); + const logAnalysis = new InfraLogAnalysis({ framework }); + + // TODO: separate these out individually and do away with "domains" as a temporary group + const domainLibs: InfraDomainLibs = { + fields: new InfraFieldsDomain(new FrameworkFieldsAdapter(framework), { + sources, + }), + logEntries: new InfraLogEntriesDomain(new InfraKibanaLogEntriesAdapter(framework), { + sources, + }), + metrics: new InfraMetricsDomain(new KibanaMetricsAdapter(framework)), + }; + + this.libs = { + configuration: this.config, + framework, + logAnalysis, + snapshot, + sources, + sourceStatus, + ...domainLibs, + }; + + plugins.features.registerFeature(METRICS_FEATURE); + plugins.features.registerFeature(LOGS_FEATURE); + + plugins.home.sampleData.addAppLinksToSampleDataset('logs', [ + { + path: `/app/${APP_ID}#/logs`, + label: logsSampleDataLinkLabel, + icon: 'logsApp', + }, + ]); + + initInfraServer(this.libs); + + // Telemetry + UsageCollector.registerUsageCollector(plugins.usageCollection); + + return { + defineInternalSourceConfiguration(sourceId, sourceProperties) { + sources.defineInternalSourceConfiguration(sourceId, sourceProperties); + }, + } as InfraPluginSetup; + } +} diff --git a/x-pack/plugins/infra/server/routes/inventory_metadata/index.ts b/x-pack/plugins/infra/server/routes/inventory_metadata/index.ts new file mode 100644 index 0000000000000..33328bdfebaf4 --- /dev/null +++ b/x-pack/plugins/infra/server/routes/inventory_metadata/index.ts @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { schema } from '@kbn/config-schema'; +import Boom from 'boom'; +import { pipe } from 'fp-ts/lib/pipeable'; +import { fold } from 'fp-ts/lib/Either'; +import { identity } from 'fp-ts/lib/function'; +import { InfraBackendLibs } from '../../lib/infra_types'; +import { throwErrors } from '../../../common/runtime_types'; + +import { + InventoryMetaRequestRT, + InventoryMetaResponseRT, +} from '../../../common/http_api/inventory_meta_api'; +import { getCloudMetadata } from './lib/get_cloud_metadata'; + +const escapeHatch = schema.object({}, { allowUnknowns: true }); + +export const initInventoryMetaRoute = (libs: InfraBackendLibs) => { + const { framework } = libs; + + framework.registerRoute( + { + method: 'post', + path: '/api/infra/inventory/meta', + validate: { + body: escapeHatch, + }, + }, + async (requestContext, request, response) => { + try { + const { sourceId, nodeType } = pipe( + InventoryMetaRequestRT.decode(request.body), + fold(throwErrors(Boom.badRequest), identity) + ); + + const { configuration } = await libs.sources.getSourceConfiguration( + requestContext, + sourceId + ); + const awsMetadata = await getCloudMetadata( + framework, + requestContext, + configuration, + nodeType + ); + + return response.ok({ + body: InventoryMetaResponseRT.encode(awsMetadata), + }); + } catch (error) { + return response.internalError({ + body: error.message, + }); + } + } + ); +}; diff --git a/x-pack/plugins/infra/server/routes/inventory_metadata/lib/get_cloud_metadata.ts b/x-pack/plugins/infra/server/routes/inventory_metadata/lib/get_cloud_metadata.ts new file mode 100644 index 0000000000000..dcac23d1a3d9d --- /dev/null +++ b/x-pack/plugins/infra/server/routes/inventory_metadata/lib/get_cloud_metadata.ts @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { RequestHandlerContext } from 'kibana/server'; +import { InventoryCloudAccount } from '../../../../common/http_api/inventory_meta_api'; +import { + InfraMetadataAggregationResponse, + InfraMetadataAggregationBucket, +} from '../../../lib/adapters/framework'; +import { InfraSourceConfiguration } from '../../../lib/sources'; +import { KibanaFramework } from '../../../lib/adapters/framework/kibana_framework_adapter'; +import { InventoryItemType } from '../../../../common/inventory_models/types'; +import { findInventoryModel } from '../../../../common/inventory_models'; + +export interface CloudMetaData { + accounts: InventoryCloudAccount[]; + projects: string[]; + regions: string[]; +} + +export const getCloudMetadata = async ( + framework: KibanaFramework, + req: RequestHandlerContext, + sourceConfiguration: InfraSourceConfiguration, + nodeType: InventoryItemType +): Promise => { + const model = findInventoryModel(nodeType); + + const metricQuery = { + allowNoIndices: true, + ignoreUnavailable: true, + index: sourceConfiguration.metricAlias, + body: { + query: { + bool: { + must: [{ match: { 'event.module': model.requiredModule } }], + }, + }, + size: 0, + aggs: { + accounts: { + terms: { + field: 'cloud.account.id', + size: 1000, + }, + aggs: { + accountNames: { + terms: { + field: 'cloud.account.name', + size: 1000, + }, + }, + }, + }, + regions: { + terms: { + field: 'cloud.region', + size: 1000, + }, + }, + }, + }, + }; + + const response = await framework.callWithRequest< + {}, + { + accounts?: { + buckets: Array< + InfraMetadataAggregationBucket & { accountNames: InfraMetadataAggregationResponse } + >; + }; + projects?: InfraMetadataAggregationResponse; + regions?: InfraMetadataAggregationResponse; + } + >(req, 'search', metricQuery); + + const projectBuckets = + response.aggregations && response.aggregations.projects + ? response.aggregations.projects.buckets + : []; + + const regionBuckets = + response.aggregations && response.aggregations.regions + ? response.aggregations.regions.buckets + : []; + + const accounts: InventoryCloudAccount[] = []; + if (response.aggregations && response.aggregations.accounts) { + response.aggregations.accounts.buckets.forEach(b => { + if (b.accountNames.buckets.length) { + accounts.push({ + value: b.key, + // There should only be one account name for each account id. + name: b.accountNames.buckets[0].key, + }); + } + }); + } + return { + accounts, + projects: projectBuckets.map(b => b.key), + regions: regionBuckets.map(b => b.key), + }; +}; diff --git a/x-pack/plugins/infra/server/routes/ip_to_hostname.ts b/x-pack/plugins/infra/server/routes/ip_to_hostname.ts new file mode 100644 index 0000000000000..5ad79b3d17a13 --- /dev/null +++ b/x-pack/plugins/infra/server/routes/ip_to_hostname.ts @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import { first } from 'lodash'; +import { schema } from '@kbn/config-schema'; +import { InfraBackendLibs } from '../lib/infra_types'; + +export interface IpToHostResponse { + host: string; +} + +interface HostDoc { + _source: { + host: { + name: string; + }; + }; +} + +const ipToHostSchema = schema.object({ + ip: schema.string(), + index_pattern: schema.string(), +}); + +export const initIpToHostName = ({ framework }: InfraBackendLibs) => { + const { callWithRequest } = framework; + framework.registerRoute( + { + method: 'post', + path: '/api/infra/ip_to_host', + validate: { + body: ipToHostSchema, + }, + }, + async (requestContext, { body }, response) => { + try { + const params = { + index: body.index_pattern, + body: { + size: 1, + query: { + match: { 'host.ip': body.ip }, + }, + _source: ['host.name'], + }, + }; + const { hits } = await callWithRequest(requestContext, 'search', params); + if (hits.total.value === 0) { + return response.notFound({ + body: { message: 'Host with matching IP address not found.' }, + }); + } + const hostDoc = first(hits.hits); + return response.ok({ body: { host: hostDoc._source.host.name } }); + } catch ({ statusCode = 500, message = 'Unknown error occurred' }) { + return response.customError({ + statusCode, + body: { message }, + }); + } + } + ); +}; diff --git a/x-pack/plugins/infra/server/routes/log_analysis/index.ts b/x-pack/plugins/infra/server/routes/log_analysis/index.ts new file mode 100644 index 0000000000000..378e32cb3582c --- /dev/null +++ b/x-pack/plugins/infra/server/routes/log_analysis/index.ts @@ -0,0 +1,8 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export * from './results'; +export * from './validation'; diff --git a/x-pack/plugins/infra/server/routes/log_analysis/results/index.ts b/x-pack/plugins/infra/server/routes/log_analysis/results/index.ts new file mode 100644 index 0000000000000..1749421277719 --- /dev/null +++ b/x-pack/plugins/infra/server/routes/log_analysis/results/index.ts @@ -0,0 +1,7 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export * from './log_entry_rate'; diff --git a/x-pack/plugins/infra/server/routes/log_analysis/results/log_entry_rate.ts b/x-pack/plugins/infra/server/routes/log_analysis/results/log_entry_rate.ts new file mode 100644 index 0000000000000..9778311bd8e58 --- /dev/null +++ b/x-pack/plugins/infra/server/routes/log_analysis/results/log_entry_rate.ts @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import Boom from 'boom'; + +import { pipe } from 'fp-ts/lib/pipeable'; +import { fold } from 'fp-ts/lib/Either'; +import { identity } from 'fp-ts/lib/function'; +import { schema } from '@kbn/config-schema'; +import { InfraBackendLibs } from '../../../lib/infra_types'; +import { + LOG_ANALYSIS_GET_LOG_ENTRY_RATE_PATH, + getLogEntryRateRequestPayloadRT, + getLogEntryRateSuccessReponsePayloadRT, + GetLogEntryRateSuccessResponsePayload, +} from '../../../../common/http_api/log_analysis'; +import { throwErrors } from '../../../../common/runtime_types'; +import { NoLogRateResultsIndexError } from '../../../lib/log_analysis'; + +const anyObject = schema.object({}, { allowUnknowns: true }); + +export const initGetLogEntryRateRoute = ({ framework, logAnalysis }: InfraBackendLibs) => { + framework.registerRoute( + { + method: 'post', + path: LOG_ANALYSIS_GET_LOG_ENTRY_RATE_PATH, + validate: { + // short-circuit forced @kbn/config-schema validation so we can do io-ts validation + body: anyObject, + }, + }, + async (requestContext, request, response) => { + try { + const payload = pipe( + getLogEntryRateRequestPayloadRT.decode(request.body), + fold(throwErrors(Boom.badRequest), identity) + ); + + const logEntryRateBuckets = await logAnalysis.getLogEntryRateBuckets( + requestContext, + payload.data.sourceId, + payload.data.timeRange.startTime, + payload.data.timeRange.endTime, + payload.data.bucketDuration, + request + ); + + return response.ok({ + body: getLogEntryRateSuccessReponsePayloadRT.encode({ + data: { + bucketDuration: payload.data.bucketDuration, + histogramBuckets: logEntryRateBuckets, + totalNumberOfLogEntries: getTotalNumberOfLogEntries(logEntryRateBuckets), + }, + }), + }); + } catch (e) { + const { statusCode = 500, message = 'Unknown error occurred' } = e; + if (e instanceof NoLogRateResultsIndexError) { + return response.notFound({ body: { message } }); + } + return response.customError({ + statusCode, + body: { message }, + }); + } + } + ); +}; + +const getTotalNumberOfLogEntries = ( + logEntryRateBuckets: GetLogEntryRateSuccessResponsePayload['data']['histogramBuckets'] +) => { + return logEntryRateBuckets.reduce((sumNumberOfLogEntries, bucket) => { + const sumPartitions = bucket.partitions.reduce((partitionsTotal, partition) => { + return (partitionsTotal += partition.numberOfLogEntries); + }, 0); + return (sumNumberOfLogEntries += sumPartitions); + }, 0); +}; diff --git a/x-pack/plugins/infra/server/routes/log_analysis/validation/index.ts b/x-pack/plugins/infra/server/routes/log_analysis/validation/index.ts new file mode 100644 index 0000000000000..727faca69298e --- /dev/null +++ b/x-pack/plugins/infra/server/routes/log_analysis/validation/index.ts @@ -0,0 +1,7 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export * from './indices'; diff --git a/x-pack/plugins/infra/server/routes/log_analysis/validation/indices.ts b/x-pack/plugins/infra/server/routes/log_analysis/validation/indices.ts new file mode 100644 index 0000000000000..fe579124cfe10 --- /dev/null +++ b/x-pack/plugins/infra/server/routes/log_analysis/validation/indices.ts @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import Boom from 'boom'; + +import { pipe } from 'fp-ts/lib/pipeable'; +import { fold } from 'fp-ts/lib/Either'; +import { identity } from 'fp-ts/lib/function'; +import { schema } from '@kbn/config-schema'; +import { InfraBackendLibs } from '../../../lib/infra_types'; +import { + LOG_ANALYSIS_VALIDATE_INDICES_PATH, + validationIndicesRequestPayloadRT, + validationIndicesResponsePayloadRT, + ValidationIndicesError, +} from '../../../../common/http_api'; + +import { throwErrors } from '../../../../common/runtime_types'; + +const escapeHatch = schema.object({}, { allowUnknowns: true }); + +export const initValidateLogAnalysisIndicesRoute = ({ framework }: InfraBackendLibs) => { + framework.registerRoute( + { + method: 'post', + path: LOG_ANALYSIS_VALIDATE_INDICES_PATH, + validate: { body: escapeHatch }, + }, + async (requestContext, request, response) => { + try { + const payload = pipe( + validationIndicesRequestPayloadRT.decode(request.body), + fold(throwErrors(Boom.badRequest), identity) + ); + + const { fields, indices } = payload.data; + const errors: ValidationIndicesError[] = []; + + // Query each pattern individually, to map correctly the errors + await Promise.all( + indices.map(async index => { + const fieldCaps = await framework.callWithRequest(requestContext, 'fieldCaps', { + allow_no_indices: true, + fields: fields.map(field => field.name), + ignore_unavailable: true, + index, + }); + + if (fieldCaps.indices.length === 0) { + errors.push({ + error: 'INDEX_NOT_FOUND', + index, + }); + return; + } + + fields.forEach(({ name: fieldName, validTypes }) => { + const fieldMetadata = fieldCaps.fields[fieldName]; + + if (fieldMetadata === undefined) { + errors.push({ + error: 'FIELD_NOT_FOUND', + index, + field: fieldName, + }); + } else { + const fieldTypes = Object.keys(fieldMetadata); + + if (!fieldTypes.every(fieldType => validTypes.includes(fieldType))) { + errors.push({ + error: `FIELD_NOT_VALID`, + index, + field: fieldName, + }); + } + } + }); + }) + ); + + return response.ok({ + body: validationIndicesResponsePayloadRT.encode({ data: { errors } }), + }); + } catch (error) { + return response.internalError({ + body: error.message, + }); + } + } + ); +}; diff --git a/x-pack/plugins/infra/server/routes/log_entries/index.ts b/x-pack/plugins/infra/server/routes/log_entries/index.ts new file mode 100644 index 0000000000000..8fed914c3dc8c --- /dev/null +++ b/x-pack/plugins/infra/server/routes/log_entries/index.ts @@ -0,0 +1,9 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export * from './item'; +export * from './summary'; +export * from './summary_highlights'; diff --git a/x-pack/plugins/infra/server/routes/log_entries/item.ts b/x-pack/plugins/infra/server/routes/log_entries/item.ts new file mode 100644 index 0000000000000..22663cb2001f0 --- /dev/null +++ b/x-pack/plugins/infra/server/routes/log_entries/item.ts @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import Boom from 'boom'; + +import { pipe } from 'fp-ts/lib/pipeable'; +import { fold } from 'fp-ts/lib/Either'; +import { identity } from 'fp-ts/lib/function'; +import { schema } from '@kbn/config-schema'; + +import { throwErrors } from '../../../common/runtime_types'; + +import { InfraBackendLibs } from '../../lib/infra_types'; +import { + LOG_ENTRIES_ITEM_PATH, + logEntriesItemRequestRT, + logEntriesItemResponseRT, +} from '../../../common/http_api'; + +const escapeHatch = schema.object({}, { allowUnknowns: true }); + +export const initLogEntriesItemRoute = ({ framework, sources, logEntries }: InfraBackendLibs) => { + framework.registerRoute( + { + method: 'post', + path: LOG_ENTRIES_ITEM_PATH, + validate: { body: escapeHatch }, + }, + async (requestContext, request, response) => { + try { + const payload = pipe( + logEntriesItemRequestRT.decode(request.body), + fold(throwErrors(Boom.badRequest), identity) + ); + + const { id, sourceId } = payload; + const sourceConfiguration = (await sources.getSourceConfiguration(requestContext, sourceId)) + .configuration; + + const logEntry = await logEntries.getLogItem(requestContext, id, sourceConfiguration); + + return response.ok({ + body: logEntriesItemResponseRT.encode({ + data: logEntry, + }), + }); + } catch (error) { + return response.internalError({ body: error.message }); + } + } + ); +}; diff --git a/x-pack/plugins/infra/server/routes/log_entries/summary.ts b/x-pack/plugins/infra/server/routes/log_entries/summary.ts new file mode 100644 index 0000000000000..05643adbe781f --- /dev/null +++ b/x-pack/plugins/infra/server/routes/log_entries/summary.ts @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import Boom from 'boom'; + +import { pipe } from 'fp-ts/lib/pipeable'; +import { fold } from 'fp-ts/lib/Either'; +import { identity } from 'fp-ts/lib/function'; +import { schema } from '@kbn/config-schema'; + +import { throwErrors } from '../../../common/runtime_types'; + +import { InfraBackendLibs } from '../../lib/infra_types'; +import { + LOG_ENTRIES_SUMMARY_PATH, + logEntriesSummaryRequestRT, + logEntriesSummaryResponseRT, +} from '../../../common/http_api/log_entries'; +import { parseFilterQuery } from '../../utils/serialized_query'; + +const escapeHatch = schema.object({}, { allowUnknowns: true }); + +export const initLogEntriesSummaryRoute = ({ framework, logEntries }: InfraBackendLibs) => { + framework.registerRoute( + { + method: 'post', + path: LOG_ENTRIES_SUMMARY_PATH, + validate: { body: escapeHatch }, + }, + async (requestContext, request, response) => { + try { + const payload = pipe( + logEntriesSummaryRequestRT.decode(request.body), + fold(throwErrors(Boom.badRequest), identity) + ); + const { sourceId, startDate, endDate, bucketSize, query } = payload; + + const buckets = await logEntries.getLogSummaryBucketsBetween( + requestContext, + sourceId, + startDate, + endDate, + bucketSize, + parseFilterQuery(query) + ); + + return response.ok({ + body: logEntriesSummaryResponseRT.encode({ + data: { + start: startDate, + end: endDate, + buckets, + }, + }), + }); + } catch (error) { + return response.internalError({ + body: error.message, + }); + } + } + ); +}; diff --git a/x-pack/plugins/infra/server/routes/log_entries/summary_highlights.ts b/x-pack/plugins/infra/server/routes/log_entries/summary_highlights.ts new file mode 100644 index 0000000000000..ecccd931bb371 --- /dev/null +++ b/x-pack/plugins/infra/server/routes/log_entries/summary_highlights.ts @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import Boom from 'boom'; + +import { pipe } from 'fp-ts/lib/pipeable'; +import { fold } from 'fp-ts/lib/Either'; +import { identity } from 'fp-ts/lib/function'; +import { schema } from '@kbn/config-schema'; + +import { throwErrors } from '../../../common/runtime_types'; + +import { InfraBackendLibs } from '../../lib/infra_types'; +import { + LOG_ENTRIES_SUMMARY_HIGHLIGHTS_PATH, + logEntriesSummaryHighlightsRequestRT, + logEntriesSummaryHighlightsResponseRT, +} from '../../../common/http_api/log_entries'; +import { parseFilterQuery } from '../../utils/serialized_query'; + +const escapeHatch = schema.object({}, { allowUnknowns: true }); + +export const initLogEntriesSummaryHighlightsRoute = ({ + framework, + logEntries, +}: InfraBackendLibs) => { + framework.registerRoute( + { + method: 'post', + path: LOG_ENTRIES_SUMMARY_HIGHLIGHTS_PATH, + validate: { body: escapeHatch }, + }, + async (requestContext, request, response) => { + try { + const payload = pipe( + logEntriesSummaryHighlightsRequestRT.decode(request.body), + fold(throwErrors(Boom.badRequest), identity) + ); + const { sourceId, startDate, endDate, bucketSize, query, highlightTerms } = payload; + + const bucketsPerHighlightTerm = await logEntries.getLogSummaryHighlightBucketsBetween( + requestContext, + sourceId, + startDate, + endDate, + bucketSize, + highlightTerms, + parseFilterQuery(query) + ); + + return response.ok({ + body: logEntriesSummaryHighlightsResponseRT.encode({ + data: bucketsPerHighlightTerm.map(buckets => ({ + start: startDate, + end: endDate, + buckets, + })), + }), + }); + } catch (error) { + return response.internalError({ + body: error.message, + }); + } + } + ); +}; diff --git a/x-pack/plugins/infra/server/routes/metadata/index.ts b/x-pack/plugins/infra/server/routes/metadata/index.ts new file mode 100644 index 0000000000000..a1f6311a103eb --- /dev/null +++ b/x-pack/plugins/infra/server/routes/metadata/index.ts @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { schema } from '@kbn/config-schema'; +import Boom from 'boom'; +import { get } from 'lodash'; +import { pipe } from 'fp-ts/lib/pipeable'; +import { fold } from 'fp-ts/lib/Either'; +import { identity } from 'fp-ts/lib/function'; +import { + InfraMetadataFeature, + InfraMetadataRequestRT, + InfraMetadataRT, +} from '../../../common/http_api/metadata_api'; +import { InfraBackendLibs } from '../../lib/infra_types'; +import { getMetricMetadata } from './lib/get_metric_metadata'; +import { pickFeatureName } from './lib/pick_feature_name'; +import { hasAPMData } from './lib/has_apm_data'; +import { getCloudMetricsMetadata } from './lib/get_cloud_metric_metadata'; +import { getNodeInfo } from './lib/get_node_info'; +import { throwErrors } from '../../../common/runtime_types'; + +const escapeHatch = schema.object({}, { allowUnknowns: true }); + +export const initMetadataRoute = (libs: InfraBackendLibs) => { + const { framework } = libs; + + framework.registerRoute( + { + method: 'post', + path: '/api/infra/metadata', + validate: { + body: escapeHatch, + }, + }, + async (requestContext, request, response) => { + try { + const { nodeId, nodeType, sourceId } = pipe( + InfraMetadataRequestRT.decode(request.body), + fold(throwErrors(Boom.badRequest), identity) + ); + + const { configuration } = await libs.sources.getSourceConfiguration( + requestContext, + sourceId + ); + const metricsMetadata = await getMetricMetadata( + framework, + requestContext, + configuration, + nodeId, + nodeType + ); + const metricFeatures = pickFeatureName(metricsMetadata.buckets).map( + nameToFeature('metrics') + ); + + const info = await getNodeInfo(framework, requestContext, configuration, nodeId, nodeType); + const cloudInstanceId = get(info, 'cloud.instance.id'); + + const cloudMetricsMetadata = cloudInstanceId + ? await getCloudMetricsMetadata(framework, requestContext, configuration, cloudInstanceId) + : { buckets: [] }; + const cloudMetricsFeatures = pickFeatureName(cloudMetricsMetadata.buckets).map( + nameToFeature('metrics') + ); + const hasAPM = await hasAPMData(framework, requestContext, configuration, nodeId, nodeType); + const apmMetricFeatures = hasAPM ? [{ name: 'apm.transaction', source: 'apm' }] : []; + + const id = metricsMetadata.id; + const name = metricsMetadata.name || id; + return response.ok({ + body: InfraMetadataRT.encode({ + id, + name, + features: [...metricFeatures, ...cloudMetricsFeatures, ...apmMetricFeatures], + info, + }), + }); + } catch (error) { + return response.internalError({ + body: error.message, + }); + } + } + ); +}; + +const nameToFeature = (source: string) => (name: string): InfraMetadataFeature => ({ + name, + source, +}); diff --git a/x-pack/plugins/infra/server/routes/metadata/lib/get_cloud_metric_metadata.ts b/x-pack/plugins/infra/server/routes/metadata/lib/get_cloud_metric_metadata.ts new file mode 100644 index 0000000000000..75ca3ae3caee2 --- /dev/null +++ b/x-pack/plugins/infra/server/routes/metadata/lib/get_cloud_metric_metadata.ts @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { RequestHandlerContext } from 'src/core/server'; +import { + InfraMetadataAggregationBucket, + InfraMetadataAggregationResponse, +} from '../../../lib/adapters/framework'; +import { KibanaFramework } from '../../../lib/adapters/framework/kibana_framework_adapter'; +import { InfraSourceConfiguration } from '../../../lib/sources'; +import { CLOUD_METRICS_MODULES } from '../../../lib/constants'; + +export interface InfraCloudMetricsAdapterResponse { + buckets: InfraMetadataAggregationBucket[]; +} + +export const getCloudMetricsMetadata = async ( + framework: KibanaFramework, + requestContext: RequestHandlerContext, + sourceConfiguration: InfraSourceConfiguration, + instanceId: string +): Promise => { + const metricQuery = { + allowNoIndices: true, + ignoreUnavailable: true, + index: sourceConfiguration.metricAlias, + body: { + query: { + bool: { + filter: [{ match: { 'cloud.instance.id': instanceId } }], + should: CLOUD_METRICS_MODULES.map(module => ({ match: { 'event.module': module } })), + }, + }, + size: 0, + aggs: { + metrics: { + terms: { + field: 'event.dataset', + size: 1000, + }, + }, + }, + }, + }; + + const response = await framework.callWithRequest< + {}, + { + metrics?: InfraMetadataAggregationResponse; + } + >(requestContext, 'search', metricQuery); + + const buckets = + response.aggregations && response.aggregations.metrics + ? response.aggregations.metrics.buckets + : []; + + return { buckets }; +}; diff --git a/x-pack/plugins/infra/server/routes/metadata/lib/get_metric_metadata.ts b/x-pack/plugins/infra/server/routes/metadata/lib/get_metric_metadata.ts new file mode 100644 index 0000000000000..191339565b813 --- /dev/null +++ b/x-pack/plugins/infra/server/routes/metadata/lib/get_metric_metadata.ts @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { get } from 'lodash'; +import { RequestHandlerContext } from 'src/core/server'; +import { + InfraMetadataAggregationBucket, + InfraMetadataAggregationResponse, +} from '../../../lib/adapters/framework'; +import { KibanaFramework } from '../../../lib/adapters/framework/kibana_framework_adapter'; +import { InfraSourceConfiguration } from '../../../lib/sources'; +import { findInventoryFields } from '../../../../common/inventory_models'; +import { InventoryItemType } from '../../../../common/inventory_models/types'; + +export interface InfraMetricsAdapterResponse { + id: string; + name?: string; + buckets: InfraMetadataAggregationBucket[]; +} + +export const getMetricMetadata = async ( + framework: KibanaFramework, + requestContext: RequestHandlerContext, + sourceConfiguration: InfraSourceConfiguration, + nodeId: string, + nodeType: InventoryItemType +): Promise => { + const fields = findInventoryFields(nodeType, sourceConfiguration.fields); + const metricQuery = { + allowNoIndices: true, + ignoreUnavailable: true, + index: sourceConfiguration.metricAlias, + body: { + query: { + bool: { + must_not: [{ match: { 'event.dataset': 'aws.ec2' } }], + filter: [ + { + match: { [fields.id]: nodeId }, + }, + ], + }, + }, + size: 0, + aggs: { + nodeName: { + terms: { + field: fields.name, + size: 1, + }, + }, + metrics: { + terms: { + field: 'event.dataset', + size: 1000, + }, + }, + }, + }, + }; + + const response = await framework.callWithRequest< + {}, + { + metrics?: InfraMetadataAggregationResponse; + nodeName?: InfraMetadataAggregationResponse; + } + >(requestContext, 'search', metricQuery); + + const buckets = + response.aggregations && response.aggregations.metrics + ? response.aggregations.metrics.buckets + : []; + + return { + id: nodeId, + name: get(response, ['aggregations', 'nodeName', 'buckets', 0, 'key'], nodeId), + buckets, + }; +}; diff --git a/x-pack/plugins/infra/server/routes/metadata/lib/get_node_info.ts b/x-pack/plugins/infra/server/routes/metadata/lib/get_node_info.ts new file mode 100644 index 0000000000000..4ff0df30abedd --- /dev/null +++ b/x-pack/plugins/infra/server/routes/metadata/lib/get_node_info.ts @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { first, set, startsWith } from 'lodash'; +import { RequestHandlerContext } from 'src/core/server'; +import { KibanaFramework } from '../../../lib/adapters/framework/kibana_framework_adapter'; +import { InfraSourceConfiguration } from '../../../lib/sources'; +import { InfraNodeType } from '../../../graphql/types'; +import { InfraMetadataInfo } from '../../../../common/http_api/metadata_api'; +import { getPodNodeName } from './get_pod_node_name'; +import { CLOUD_METRICS_MODULES } from '../../../lib/constants'; +import { findInventoryFields } from '../../../../common/inventory_models'; +import { InventoryItemType } from '../../../../common/inventory_models/types'; + +export const getNodeInfo = async ( + framework: KibanaFramework, + requestContext: RequestHandlerContext, + sourceConfiguration: InfraSourceConfiguration, + nodeId: string, + nodeType: InventoryItemType +): Promise => { + // If the nodeType is a Kubernetes pod then we need to get the node info + // from a host record instead of a pod. This is due to the fact that any host + // can report pod details and we can't rely on the host/cloud information associated + // with the kubernetes.pod.uid. We need to first lookup the `kubernetes.node.name` + // then use that to lookup the host's node information. + if (nodeType === InfraNodeType.pod) { + const kubernetesNodeName = await getPodNodeName( + framework, + requestContext, + sourceConfiguration, + nodeId, + nodeType + ); + if (kubernetesNodeName) { + return getNodeInfo( + framework, + requestContext, + sourceConfiguration, + kubernetesNodeName, + InfraNodeType.host + ); + } + return {}; + } + const fields = findInventoryFields(nodeType, sourceConfiguration.fields); + const params = { + allowNoIndices: true, + ignoreUnavailable: true, + terminateAfter: 1, + index: sourceConfiguration.metricAlias, + body: { + size: 1, + _source: ['host.*', 'cloud.*'], + query: { + bool: { + filter: [{ match: { [fields.id]: nodeId } }], + }, + }, + }, + }; + if (!CLOUD_METRICS_MODULES.some(m => startsWith(nodeType, m))) { + set( + params, + 'body.query.bool.must_not', + CLOUD_METRICS_MODULES.map(module => ({ match: { 'event.module': module } })) + ); + } + const response = await framework.callWithRequest<{ _source: InfraMetadataInfo }, {}>( + requestContext, + 'search', + params + ); + const firstHit = first(response.hits.hits); + if (firstHit) { + return firstHit._source; + } + return {}; +}; diff --git a/x-pack/plugins/infra/server/routes/metadata/lib/get_pod_node_name.ts b/x-pack/plugins/infra/server/routes/metadata/lib/get_pod_node_name.ts new file mode 100644 index 0000000000000..be6e29a794d09 --- /dev/null +++ b/x-pack/plugins/infra/server/routes/metadata/lib/get_pod_node_name.ts @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { first, get } from 'lodash'; +import { RequestHandlerContext } from 'src/core/server'; +import { KibanaFramework } from '../../../lib/adapters/framework/kibana_framework_adapter'; +import { InfraSourceConfiguration } from '../../../lib/sources'; +import { findInventoryFields } from '../../../../common/inventory_models'; + +export const getPodNodeName = async ( + framework: KibanaFramework, + requestContext: RequestHandlerContext, + sourceConfiguration: InfraSourceConfiguration, + nodeId: string, + nodeType: 'host' | 'pod' | 'container' +): Promise => { + const fields = findInventoryFields(nodeType, sourceConfiguration.fields); + const params = { + allowNoIndices: true, + ignoreUnavailable: true, + terminateAfter: 1, + index: sourceConfiguration.metricAlias, + body: { + size: 1, + _source: ['kubernetes.node.name'], + query: { + bool: { + filter: [ + { match: { [fields.id]: nodeId } }, + { exists: { field: `kubernetes.node.name` } }, + ], + }, + }, + }, + }; + const response = await framework.callWithRequest< + { _source: { kubernetes: { node: { name: string } } } }, + {} + >(requestContext, 'search', params); + const firstHit = first(response.hits.hits); + if (firstHit) { + return get(firstHit, '_source.kubernetes.node.name'); + } +}; diff --git a/x-pack/plugins/infra/server/routes/metadata/lib/has_apm_data.ts b/x-pack/plugins/infra/server/routes/metadata/lib/has_apm_data.ts new file mode 100644 index 0000000000000..9ca0819d74d46 --- /dev/null +++ b/x-pack/plugins/infra/server/routes/metadata/lib/has_apm_data.ts @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { RequestHandlerContext } from 'src/core/server'; + +import { KibanaFramework } from '../../../lib/adapters/framework/kibana_framework_adapter'; +import { InfraSourceConfiguration } from '../../../lib/sources'; +import { findInventoryFields } from '../../../../common/inventory_models'; +import { InventoryItemType } from '../../../../common/inventory_models/types'; + +export const hasAPMData = async ( + framework: KibanaFramework, + requestContext: RequestHandlerContext, + sourceConfiguration: InfraSourceConfiguration, + nodeId: string, + nodeType: InventoryItemType +) => { + const apmIndices = await framework.plugins.apm.getApmIndices( + requestContext.core.savedObjects.client + ); + const apmIndex = apmIndices['apm_oss.transactionIndices'] || 'apm-*'; + const fields = findInventoryFields(nodeType, sourceConfiguration.fields); + + // There is a bug in APM ECS data where host.name is not set. + // This will fixed with: https://github.com/elastic/apm-server/issues/2502 + const nodeFieldName = nodeType === 'host' ? 'host.hostname' : fields.id; + const params = { + allowNoIndices: true, + ignoreUnavailable: true, + terminateAfter: 1, + index: apmIndex, + body: { + size: 0, + query: { + bool: { + filter: [ + { + match: { [nodeFieldName]: nodeId }, + }, + { + exists: { field: 'service.name' }, + }, + { + exists: { field: 'transaction.type' }, + }, + ], + }, + }, + }, + }; + const response = await framework.callWithRequest<{}, {}>(requestContext, 'search', params); + return response.hits.total.value !== 0; +}; diff --git a/x-pack/plugins/infra/server/routes/metadata/lib/pick_feature_name.ts b/x-pack/plugins/infra/server/routes/metadata/lib/pick_feature_name.ts new file mode 100644 index 0000000000000..8b6bb49d9f645 --- /dev/null +++ b/x-pack/plugins/infra/server/routes/metadata/lib/pick_feature_name.ts @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { InfraMetadataAggregationBucket } from '../../../lib/adapters/framework'; + +export const pickFeatureName = (buckets: InfraMetadataAggregationBucket[]): string[] => { + if (buckets) { + const metadata = buckets.map(bucket => bucket.key); + return metadata; + } else { + return []; + } +}; diff --git a/x-pack/plugins/infra/server/routes/metrics_explorer/index.ts b/x-pack/plugins/infra/server/routes/metrics_explorer/index.ts new file mode 100644 index 0000000000000..64cdb9318b6e1 --- /dev/null +++ b/x-pack/plugins/infra/server/routes/metrics_explorer/index.ts @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import Boom from 'boom'; +import { pipe } from 'fp-ts/lib/pipeable'; +import { fold } from 'fp-ts/lib/Either'; +import { identity } from 'fp-ts/lib/function'; +import { schema } from '@kbn/config-schema'; +import { InfraBackendLibs } from '../../lib/infra_types'; +import { getGroupings } from './lib/get_groupings'; +import { populateSeriesWithTSVBData } from './lib/populate_series_with_tsvb_data'; +import { metricsExplorerRequestBodyRT, metricsExplorerResponseRT } from '../../../common/http_api'; +import { throwErrors } from '../../../common/runtime_types'; + +const escapeHatch = schema.object({}, { allowUnknowns: true }); + +export const initMetricExplorerRoute = (libs: InfraBackendLibs) => { + const { framework } = libs; + const { callWithRequest } = framework; + + framework.registerRoute( + { + method: 'post', + path: '/api/infra/metrics_explorer', + validate: { + body: escapeHatch, + }, + }, + async (requestContext, request, response) => { + try { + const payload = pipe( + metricsExplorerRequestBodyRT.decode(request.body), + fold(throwErrors(Boom.badRequest), identity) + ); + + const search = (searchOptions: object) => + callWithRequest<{}, Aggregation>(requestContext, 'search', searchOptions); + + // First we get the groupings from a composite aggregation + const groupings = await getGroupings(search, payload); + + // Then we take the results and fill in the data from TSVB with the + // user's custom metrics + const seriesWithMetrics = await Promise.all( + groupings.series.map( + populateSeriesWithTSVBData(request, payload, framework, requestContext) + ) + ); + return response.ok({ + body: metricsExplorerResponseRT.encode({ ...groupings, series: seriesWithMetrics }), + }); + } catch (error) { + return response.internalError({ + body: error.message, + }); + } + } + ); +}; diff --git a/x-pack/plugins/infra/server/routes/metrics_explorer/lib/create_metrics_model.ts b/x-pack/plugins/infra/server/routes/metrics_explorer/lib/create_metrics_model.ts new file mode 100644 index 0000000000000..9e5fe16d482b2 --- /dev/null +++ b/x-pack/plugins/infra/server/routes/metrics_explorer/lib/create_metrics_model.ts @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { InfraMetricModelMetricType } from '../../../lib/adapters/metrics'; +import { MetricsExplorerRequestBody } from '../types'; +import { InfraMetric } from '../../../graphql/types'; +import { TSVBMetricModel } from '../../../../common/inventory_models/types'; +export const createMetricModel = (options: MetricsExplorerRequestBody): TSVBMetricModel => { + return { + id: InfraMetric.custom, + requires: [], + index_pattern: options.indexPattern, + interval: options.timerange.interval, + time_field: options.timerange.field, + type: 'timeseries', + // Create one series per metric requested. The series.id will be used to identify the metric + // when the responses are processed and combined with the grouping request. + series: options.metrics.map((metric, index) => { + // If the metric is a rate then we need to add TSVB metrics for calculating the derivative + if (metric.aggregation === 'rate') { + const aggType = 'max'; + return { + id: `metric_${index}`, + split_mode: 'everything', + metrics: [ + { + id: `metric_${aggType}_${index}`, + field: metric.field, + type: aggType, + }, + { + id: `metric_deriv_${aggType}_${index}`, + field: `metric_${aggType}_${index}`, + type: 'derivative', + unit: '1s', + }, + { + id: `metric_posonly_deriv_${aggType}_${index}`, + type: 'calculation', + variables: [ + { id: 'var-rate', name: 'rate', field: `metric_deriv_${aggType}_${index}` }, + ], + script: 'params.rate > 0.0 ? params.rate : 0.0', + }, + ], + }; + } + // Create a basic TSVB series with a single metric + const aggregation = metric.aggregation || 'avg'; + + return { + id: `metric_${index}`, + split_mode: 'everything', + metrics: [ + { + field: metric.field, + id: `metric_${aggregation}_${index}`, + type: InfraMetricModelMetricType[aggregation], + }, + ], + }; + }), + }; +}; diff --git a/x-pack/plugins/infra/server/routes/metrics_explorer/lib/get_groupings.ts b/x-pack/plugins/infra/server/routes/metrics_explorer/lib/get_groupings.ts new file mode 100644 index 0000000000000..7111d3e7f8ca4 --- /dev/null +++ b/x-pack/plugins/infra/server/routes/metrics_explorer/lib/get_groupings.ts @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { isObject, set } from 'lodash'; +import { InfraDatabaseSearchResponse } from '../../../lib/adapters/framework'; +import { MetricsExplorerRequestBody, MetricsExplorerResponse } from '../types'; + +interface GroupingAggregation { + groupingsCount: { + value: number; + }; + groupings: { + after_key?: { + [name: string]: string; + }; + buckets: Array<{ key: { [id: string]: string }; doc_count: number }>; + }; +} + +const EMPTY_RESPONSE = { + series: [{ id: 'ALL', columns: [], rows: [] }], + pageInfo: { total: 0, afterKey: null }, +}; + +export const getGroupings = async ( + search: (options: object) => Promise>, + options: MetricsExplorerRequestBody +): Promise => { + if (!options.groupBy) { + return EMPTY_RESPONSE; + } + const limit = options.limit || 9; + const params = { + allowNoIndices: true, + ignoreUnavailable: true, + index: options.indexPattern, + body: { + size: 0, + query: { + bool: { + should: [ + ...options.metrics + .filter(m => m.field) + .map(m => ({ + exists: { field: m.field }, + })), + ], + filter: [ + { + range: { + [options.timerange.field]: { + gte: options.timerange.from, + lte: options.timerange.to, + format: 'epoch_millis', + }, + }, + }, + ] as object[], + }, + }, + aggs: { + groupingsCount: { + cardinality: { field: options.groupBy }, + }, + groupings: { + composite: { + size: limit, + sources: [{ groupBy: { terms: { field: options.groupBy, order: 'asc' } } }], + }, + }, + }, + }, + }; + + if (params.body.query.bool.should.length !== 0) { + set(params, 'body.query.bool.minimum_should_match', 1); + } + + if (options.afterKey) { + set(params, 'body.aggs.groupings.composite.after', { groupBy: options.afterKey }); + } + + if (options.filterQuery) { + try { + const filterObject = JSON.parse(options.filterQuery); + if (isObject(filterObject)) { + params.body.query.bool.filter.push(filterObject); + } + } catch (err) { + params.body.query.bool.filter.push({ + query_string: { + query: options.filterQuery, + analyze_wildcard: true, + }, + }); + } + } + + const response = await search(params); + if (response.hits.total.value === 0) { + return { ...EMPTY_RESPONSE, series: [] }; + } + if (!response.aggregations) { + throw new Error('Aggregations should be present.'); + } + const { groupings, groupingsCount } = response.aggregations; + const { after_key: afterKey } = groupings; + return { + series: groupings.buckets.map(bucket => { + return { id: bucket.key.groupBy, rows: [], columns: [] }; + }), + pageInfo: { + total: groupingsCount.value, + afterKey: afterKey && groupings.buckets.length === limit ? afterKey.groupBy : null, + }, + }; +}; diff --git a/x-pack/plugins/infra/server/routes/metrics_explorer/lib/populate_series_with_tsvb_data.ts b/x-pack/plugins/infra/server/routes/metrics_explorer/lib/populate_series_with_tsvb_data.ts new file mode 100644 index 0000000000000..17fc46b41278a --- /dev/null +++ b/x-pack/plugins/infra/server/routes/metrics_explorer/lib/populate_series_with_tsvb_data.ts @@ -0,0 +1,134 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { union } from 'lodash'; +import { KibanaRequest, RequestHandlerContext } from 'src/core/server'; +import { KibanaFramework } from '../../../lib/adapters/framework/kibana_framework_adapter'; +import { + MetricsExplorerRow, + MetricsExplorerSeries, + MetricsExplorerRequestBody, + MetricsExplorerColumn, +} from '../types'; +import { createMetricModel } from './create_metrics_model'; +import { JsonObject } from '../../../../common/typed_json'; +import { calculateMetricInterval } from '../../../utils/calculate_metric_interval'; + +export const populateSeriesWithTSVBData = ( + request: KibanaRequest, + options: MetricsExplorerRequestBody, + framework: KibanaFramework, + requestContext: RequestHandlerContext +) => async (series: MetricsExplorerSeries) => { + // IF there are no metrics selected then we should return an empty result. + if (options.metrics.length === 0) { + return { + ...series, + columns: [], + rows: [], + }; + } + + // Set the filter for the group by or match everything + const filters: JsonObject[] = options.groupBy + ? [{ match: { [options.groupBy]: series.id } }] + : []; + if (options.filterQuery) { + try { + const filterQuery = JSON.parse(options.filterQuery); + filters.push(filterQuery); + } catch (error) { + filters.push({ + query_string: { + query: options.filterQuery, + analyze_wildcard: true, + }, + }); + } + } + const timerange = { min: options.timerange.from, max: options.timerange.to }; + + // Create the TSVB model based on the request options + const model = createMetricModel(options); + const calculatedInterval = await calculateMetricInterval( + framework, + requestContext, + { + indexPattern: options.indexPattern, + timestampField: options.timerange.field, + timerange: options.timerange, + }, + options.metrics + .filter(metric => metric.field) + .map(metric => { + return metric + .field!.split(/\./) + .slice(0, 2) + .join('.'); + }) + ); + + if (calculatedInterval) { + model.interval = `>=${calculatedInterval}s`; + } + + // Get TSVB results using the model, timerange and filters + const tsvbResults = await framework.makeTSVBRequest(requestContext, model, timerange, filters); + + // If there is no data `custom` will not exist. + if (!tsvbResults.custom) { + return { + ...series, + columns: [], + rows: [], + }; + } + + // Setup the dynamic columns and row attributes depending on if the user is doing a group by + // and multiple metrics + const attributeColumns: MetricsExplorerColumn[] = + options.groupBy != null ? [{ name: 'groupBy', type: 'string' }] : []; + const metricColumns: MetricsExplorerColumn[] = options.metrics.map((m, i) => ({ + name: `metric_${i}`, + type: 'number', + })); + const rowAttributes = options.groupBy != null ? { groupBy: series.id } : {}; + + // To support multiple metrics, there are multiple TSVB series which need to be combined + // into one MetricExplorerRow (Canvas row). This is done by collecting all the timestamps + // across each TSVB series. Then for each timestamp we find the values and create a + // MetricsExplorerRow. + const timestamps = tsvbResults.custom.series.reduce( + (currentTimestamps, tsvbSeries) => + union( + currentTimestamps, + tsvbSeries.data.map(row => row[0]) + ).sort(), + [] as number[] + ); + // Combine the TSVB series for multiple metrics. + const rows = timestamps.map(timestamp => { + return tsvbResults.custom.series.reduce( + (currentRow, tsvbSeries) => { + const matches = tsvbSeries.data.find(d => d[0] === timestamp); + if (matches) { + return { ...currentRow, [tsvbSeries.id]: matches[1] }; + } + return currentRow; + }, + { timestamp, ...rowAttributes } as MetricsExplorerRow + ); + }); + return { + ...series, + rows, + columns: [ + { name: 'timestamp', type: 'date' } as MetricsExplorerColumn, + ...metricColumns, + ...attributeColumns, + ], + }; +}; diff --git a/x-pack/plugins/infra/server/routes/metrics_explorer/types.ts b/x-pack/plugins/infra/server/routes/metrics_explorer/types.ts new file mode 100644 index 0000000000000..f4c5e26c5c6d1 --- /dev/null +++ b/x-pack/plugins/infra/server/routes/metrics_explorer/types.ts @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import * as rt from 'io-ts'; +import { + metricsExplorerMetricRT, + metricsExplorerPageInfoRT, + metricsExplorerColumnRT, + metricsExplorerRowRT, + metricsExplorerSeriesRT, + metricsExplorerRequestBodyRT, + metricsExplorerResponseRT, + metricsExplorerAggregationRT, + metricsExplorerColumnTypeRT, +} from '../../../common/http_api'; + +export type MetricsExplorerAggregation = rt.TypeOf; + +export type MetricsExplorerColumnType = rt.TypeOf; + +export type MetricsExplorerMetric = rt.TypeOf; + +export type MetricsExplorerPageInfo = rt.TypeOf; + +export type MetricsExplorerColumn = rt.TypeOf; + +export type MetricsExplorerRow = rt.TypeOf; + +export type MetricsExplorerSeries = rt.TypeOf; + +export type MetricsExplorerRequestBody = rt.TypeOf; + +export type MetricsExplorerResponse = rt.TypeOf; diff --git a/x-pack/plugins/infra/server/routes/node_details/index.ts b/x-pack/plugins/infra/server/routes/node_details/index.ts new file mode 100644 index 0000000000000..a9419cd27e684 --- /dev/null +++ b/x-pack/plugins/infra/server/routes/node_details/index.ts @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import Boom from 'boom'; +import { schema } from '@kbn/config-schema'; +import { pipe } from 'fp-ts/lib/pipeable'; +import { fold } from 'fp-ts/lib/Either'; +import { identity } from 'fp-ts/lib/function'; +import { InfraBackendLibs } from '../../lib/infra_types'; +import { UsageCollector } from '../../usage/usage_collector'; +import { InfraMetricsRequestOptions } from '../../lib/adapters/metrics'; +import { InfraNodeType, InfraMetric } from '../../graphql/types'; +import { + NodeDetailsRequestRT, + NodeDetailsMetricDataResponseRT, +} from '../../../common/http_api/node_details_api'; +import { throwErrors } from '../../../common/runtime_types'; + +const escapeHatch = schema.object({}, { allowUnknowns: true }); + +export const initNodeDetailsRoute = (libs: InfraBackendLibs) => { + const { framework } = libs; + + framework.registerRoute( + { + method: 'post', + path: '/api/metrics/node_details', + validate: { + body: escapeHatch, + }, + }, + async (requestContext, request, response) => { + try { + const { nodeId, cloudId, nodeType, metrics, timerange, sourceId } = pipe( + NodeDetailsRequestRT.decode(request.body), + fold(throwErrors(Boom.badRequest), identity) + ); + const source = await libs.sources.getSourceConfiguration(requestContext, sourceId); + + UsageCollector.countNode(nodeType); + + const options: InfraMetricsRequestOptions = { + nodeIds: { + nodeId, + cloudId, + }, + nodeType: nodeType as InfraNodeType, + sourceConfiguration: source.configuration, + metrics: metrics as InfraMetric[], + timerange, + }; + return response.ok({ + body: NodeDetailsMetricDataResponseRT.encode({ + metrics: await libs.metrics.getMetrics(requestContext, options, request), + }), + }); + } catch (error) { + return response.internalError({ + body: error.message, + }); + } + } + ); +}; diff --git a/x-pack/plugins/infra/server/routes/snapshot/index.ts b/x-pack/plugins/infra/server/routes/snapshot/index.ts new file mode 100644 index 0000000000000..ba7f52e9ec1e7 --- /dev/null +++ b/x-pack/plugins/infra/server/routes/snapshot/index.ts @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +import Boom from 'boom'; +import { schema } from '@kbn/config-schema'; +import { pipe } from 'fp-ts/lib/pipeable'; +import { fold } from 'fp-ts/lib/Either'; +import { identity } from 'fp-ts/lib/function'; +import { InfraBackendLibs } from '../../lib/infra_types'; +import { UsageCollector } from '../../usage/usage_collector'; +import { parseFilterQuery } from '../../utils/serialized_query'; +import { InfraNodeType, InfraSnapshotMetricInput } from '../../../public/graphql/types'; +import { SnapshotRequestRT, SnapshotNodeResponseRT } from '../../../common/http_api/snapshot_api'; +import { throwErrors } from '../../../common/runtime_types'; +import { InfraSnapshotRequestOptions } from '../../lib/snapshot/types'; + +const escapeHatch = schema.object({}, { allowUnknowns: true }); + +export const initSnapshotRoute = (libs: InfraBackendLibs) => { + const { framework } = libs; + + framework.registerRoute( + { + method: 'post', + path: '/api/metrics/snapshot', + validate: { + body: escapeHatch, + }, + }, + async (requestContext, request, response) => { + try { + const { + filterQuery, + nodeType, + groupBy, + sourceId, + metric, + timerange, + accountId, + region, + } = pipe( + SnapshotRequestRT.decode(request.body), + fold(throwErrors(Boom.badRequest), identity) + ); + const source = await libs.sources.getSourceConfiguration(requestContext, sourceId); + UsageCollector.countNode(nodeType); + const options: InfraSnapshotRequestOptions = { + filterQuery: parseFilterQuery(filterQuery), + accountId, + region, + // TODO: Use common infra metric and replace graphql type + nodeType: nodeType as InfraNodeType, + groupBy, + sourceConfiguration: source.configuration, + // TODO: Use common infra metric and replace graphql type + metric: metric as InfraSnapshotMetricInput, + timerange, + }; + const nodesWithInterval = await libs.snapshot.getNodes(requestContext, options); + return response.ok({ + body: SnapshotNodeResponseRT.encode(nodesWithInterval), + }); + } catch (error) { + return response.internalError({ + body: error.message, + }); + } + } + ); +}; diff --git a/x-pack/plugins/infra/server/saved_objects.ts b/x-pack/plugins/infra/server/saved_objects.ts new file mode 100644 index 0000000000000..2e554300b0ecb --- /dev/null +++ b/x-pack/plugins/infra/server/saved_objects.ts @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { infraSourceConfigurationSavedObjectMappings } from './lib/sources'; +import { metricsExplorerViewSavedObjectMappings } from '../common/saved_objects/metrics_explorer_view'; +import { inventoryViewSavedObjectMappings } from '../common/saved_objects/inventory_view'; + +export const savedObjectMappings = { + ...infraSourceConfigurationSavedObjectMappings, + ...metricsExplorerViewSavedObjectMappings, + ...inventoryViewSavedObjectMappings, +}; diff --git a/x-pack/plugins/infra/server/usage/usage_collector.ts b/x-pack/plugins/infra/server/usage/usage_collector.ts new file mode 100644 index 0000000000000..60b9372b135df --- /dev/null +++ b/x-pack/plugins/infra/server/usage/usage_collector.ts @@ -0,0 +1,119 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { UsageCollectionSetup } from 'src/plugins/usage_collection/server'; +import { InfraNodeType } from '../graphql/types'; +import { InventoryItemType } from '../../common/inventory_models/types'; + +const KIBANA_REPORTING_TYPE = 'infraops'; + +interface InfraopsSum { + infraopsHosts: number; + infraopsDocker: number; + infraopsKubernetes: number; + logs: number; +} + +export class UsageCollector { + public static registerUsageCollector(usageCollection: UsageCollectionSetup): void { + const collector = UsageCollector.getUsageCollector(usageCollection); + usageCollection.registerCollector(collector); + } + + public static getUsageCollector(usageCollection: UsageCollectionSetup) { + return usageCollection.makeUsageCollector({ + type: KIBANA_REPORTING_TYPE, + isReady: () => true, + fetch: async () => { + return this.getReport(); + }, + }); + } + + public static countNode(nodeType: InventoryItemType) { + const bucket = this.getBucket(); + this.maybeInitializeBucket(bucket); + + switch (nodeType) { + case InfraNodeType.pod: + this.counters[bucket].infraopsKubernetes += 1; + break; + case InfraNodeType.container: + this.counters[bucket].infraopsDocker += 1; + break; + default: + this.counters[bucket].infraopsHosts += 1; + } + } + + public static countLogs() { + const bucket = this.getBucket(); + this.maybeInitializeBucket(bucket); + this.counters[bucket].logs += 1; + } + + private static counters: any = {}; + private static BUCKET_SIZE = 3600; // seconds in an hour + private static BUCKET_NUMBER = 24; // report the last 24 hours + + private static getBucket() { + const now = Math.floor(Date.now() / 1000); + return now - (now % this.BUCKET_SIZE); + } + + private static maybeInitializeBucket(bucket: any) { + if (!this.counters[bucket]) { + this.counters[bucket] = { + infraopsHosts: 0, + infraopsDocker: 0, + infraopsKubernetes: 0, + logs: 0, + }; + } + } + + private static getReport() { + const keys = Object.keys(this.counters); + + // only keep the newest BUCKET_NUMBER buckets + const cutoff = this.getBucket() - this.BUCKET_SIZE * (this.BUCKET_NUMBER - 1); + keys.forEach(key => { + if (parseInt(key, 10) < cutoff) { + delete this.counters[key]; + } + }); + + // all remaining buckets are current + const sums = Object.keys(this.counters).reduce( + (a: InfraopsSum, b: any) => { + const key = parseInt(b, 10); + return { + infraopsHosts: a.infraopsHosts + this.counters[key].infraopsHosts, + infraopsDocker: a.infraopsDocker + this.counters[key].infraopsDocker, + infraopsKubernetes: a.infraopsKubernetes + this.counters[key].infraopsKubernetes, + logs: a.logs + this.counters[key].logs, + }; + }, + { + infraopsHosts: 0, + infraopsDocker: 0, + infraopsKubernetes: 0, + logs: 0, + } + ); + + return { + last_24_hours: { + hits: { + infraops_hosts: sums.infraopsHosts, + infraops_docker: sums.infraopsDocker, + infraops_kubernetes: sums.infraopsKubernetes, + logs: sums.logs, + }, + }, + }; + } +} diff --git a/x-pack/plugins/infra/server/utils/README.md b/x-pack/plugins/infra/server/utils/README.md new file mode 100644 index 0000000000000..8a6a27aa29867 --- /dev/null +++ b/x-pack/plugins/infra/server/utils/README.md @@ -0,0 +1 @@ +Utils should be data processing functions and other tools.... all in all utils is basicly everything that is not an adaptor, or presenter and yet too much to put in a lib. \ No newline at end of file diff --git a/x-pack/plugins/infra/server/utils/calculate_metric_interval.ts b/x-pack/plugins/infra/server/utils/calculate_metric_interval.ts new file mode 100644 index 0000000000000..586193a3c242d --- /dev/null +++ b/x-pack/plugins/infra/server/utils/calculate_metric_interval.ts @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { RequestHandlerContext } from 'src/core/server'; +import { InfraNodeType } from '../graphql/types'; +import { findInventoryModel } from '../../common/inventory_models'; +import { KibanaFramework } from '../lib/adapters/framework/kibana_framework_adapter'; + +interface Options { + indexPattern: string; + timestampField: string; + timerange: { + from: number; + to: number; + }; +} + +/** + * Look at the data from metricbeat and get the max period for a given timerange. + * This is useful for visualizing metric modules like s3 that only send metrics once per day. + */ +export const calculateMetricInterval = async ( + framework: KibanaFramework, + requestContext: RequestHandlerContext, + options: Options, + modules?: string[], + nodeType?: InfraNodeType // TODO: check that this type still makes sense +) => { + let from = options.timerange.from; + if (nodeType) { + const inventoryModel = findInventoryModel(nodeType); + from = options.timerange.to - inventoryModel.metrics.defaultTimeRangeInSeconds * 1000; + } + const query = { + allowNoIndices: true, + index: options.indexPattern, + ignoreUnavailable: true, + body: { + query: { + bool: { + filter: [ + { + range: { + [options.timestampField]: { + gte: from, + lte: options.timerange.to, + format: 'epoch_millis', + }, + }, + }, + ], + }, + }, + size: 0, + aggs: { + modules: { + terms: { + field: 'event.dataset', + include: modules, + }, + aggs: { + period: { + max: { + field: 'metricset.period', + }, + }, + }, + }, + }, + }, + }; + + const resp = await framework.callWithRequest<{}, PeriodAggregationData>( + requestContext, + 'search', + query + ); + + // if ES doesn't return an aggregations key, something went seriously wrong. + if (!resp.aggregations) { + return; + } + + const intervals = resp.aggregations.modules.buckets.map(a => a.period.value).filter(v => !!v); + if (!intervals.length) { + return; + } + + return Math.max(...intervals) / 1000; +}; + +interface PeriodAggregationData { + modules: { + buckets: Array<{ + key: string; + doc_count: number; + period: { + value: number; + }; + }>; + }; +} diff --git a/x-pack/plugins/infra/server/utils/create_afterkey_handler.ts b/x-pack/plugins/infra/server/utils/create_afterkey_handler.ts new file mode 100644 index 0000000000000..559fba0799987 --- /dev/null +++ b/x-pack/plugins/infra/server/utils/create_afterkey_handler.ts @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { set } from 'lodash'; +import { InfraDatabaseSearchResponse } from '../lib/adapters/framework'; + +export const createAfterKeyHandler = ( + optionsAfterKeyPath: string | string[], + afterKeySelector: (input: InfraDatabaseSearchResponse) => any +) => (options: Options, response: InfraDatabaseSearchResponse): Options => { + if (!response.aggregations) { + return options; + } + const newOptions = { ...options }; + const afterKey = afterKeySelector(response); + set(newOptions, optionsAfterKeyPath, afterKey); + return newOptions; +}; diff --git a/x-pack/plugins/infra/server/utils/get_all_composite_data.ts b/x-pack/plugins/infra/server/utils/get_all_composite_data.ts new file mode 100644 index 0000000000000..c7ff1b077f685 --- /dev/null +++ b/x-pack/plugins/infra/server/utils/get_all_composite_data.ts @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { RequestHandlerContext } from 'src/core/server'; +import { KibanaFramework } from '../lib/adapters/framework/kibana_framework_adapter'; +import { InfraDatabaseSearchResponse } from '../lib/adapters/framework'; + +export const getAllCompositeData = async < + Aggregation = undefined, + Bucket = {}, + Options extends object = {} +>( + framework: KibanaFramework, + requestContext: RequestHandlerContext, + options: Options, + bucketSelector: (response: InfraDatabaseSearchResponse<{}, Aggregation>) => Bucket[], + onAfterKey: (options: Options, response: InfraDatabaseSearchResponse<{}, Aggregation>) => Options, + previousBuckets: Bucket[] = [] +): Promise => { + const response = await framework.callWithRequest<{}, Aggregation>( + requestContext, + 'search', + options + ); + + // Nothing available, return the previous buckets. + if (response.hits.total.value === 0) { + return previousBuckets; + } + + // if ES doesn't return an aggregations key, something went seriously wrong. + if (!response.aggregations) { + throw new Error('Whoops!, `aggregations` key must always be returned.'); + } + + const currentBuckets = bucketSelector(response); + + // if there are no currentBuckets then we are finished paginating through the results + if (currentBuckets.length === 0) { + return previousBuckets; + } + + // There is possibly more data, concat previous and current buckets and call ourselves recursively. + const newOptions = onAfterKey(options, response); + return getAllCompositeData( + framework, + requestContext, + newOptions, + bucketSelector, + onAfterKey, + previousBuckets.concat(currentBuckets) + ); +}; diff --git a/x-pack/plugins/infra/server/utils/get_interval_in_seconds.ts b/x-pack/plugins/infra/server/utils/get_interval_in_seconds.ts new file mode 100644 index 0000000000000..297e5828956af --- /dev/null +++ b/x-pack/plugins/infra/server/utils/get_interval_in_seconds.ts @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +const intervalUnits = ['y', 'M', 'w', 'd', 'h', 'm', 's', 'ms']; +const INTERVAL_STRING_RE = new RegExp('^([0-9\\.]*)\\s*(' + intervalUnits.join('|') + ')$'); + +interface UnitsToSeconds { + [unit: string]: number; +} + +const units: UnitsToSeconds = { + ms: 0.001, + s: 1, + m: 60, + h: 3600, + d: 86400, + w: 86400 * 7, + M: 86400 * 30, + y: 86400 * 356, +}; + +export const getIntervalInSeconds = (interval: string): number => { + const matches = interval.match(INTERVAL_STRING_RE); + if (matches) { + return parseFloat(matches[1]) * units[matches[2]]; + } + throw new Error('Invalid interval string format.'); +}; diff --git a/x-pack/plugins/infra/server/utils/serialized_query.ts b/x-pack/plugins/infra/server/utils/serialized_query.ts new file mode 100644 index 0000000000000..932df847e65d0 --- /dev/null +++ b/x-pack/plugins/infra/server/utils/serialized_query.ts @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { UserInputError } from 'apollo-server-errors'; + +import { JsonObject } from '../../common/typed_json'; + +export const parseFilterQuery = ( + filterQuery: string | null | undefined +): JsonObject | undefined => { + try { + if (filterQuery) { + const parsedFilterQuery = JSON.parse(filterQuery); + if ( + !parsedFilterQuery || + ['string', 'number', 'boolean'].includes(typeof parsedFilterQuery) || + Array.isArray(parsedFilterQuery) + ) { + throw new Error('expected value to be an object'); + } + return parsedFilterQuery; + } else { + return undefined; + } + } catch (err) { + throw new UserInputError(`Failed to parse query: ${err}`, { + query: filterQuery, + originalError: err, + }); + } +}; diff --git a/x-pack/plugins/infra/server/utils/typed_elasticsearch_mappings.ts b/x-pack/plugins/infra/server/utils/typed_elasticsearch_mappings.ts new file mode 100644 index 0000000000000..f18b9f3de55c9 --- /dev/null +++ b/x-pack/plugins/infra/server/utils/typed_elasticsearch_mappings.ts @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +export type ElasticsearchMappingOf = Type extends string + ? ElasticsearchStringFieldMapping + : Type extends number + ? ElasticsearchNumberFieldMapping + : Type extends boolean + ? ElasticsearchBooleanFieldMapping + : Type extends object[] + ? ElasticsearchNestedFieldMapping + : Type extends {} + ? ElasticsearchObjectFieldMapping + : never; + +export interface ElasticsearchStringFieldMapping { + type: 'keyword' | 'text'; +} + +export interface ElasticsearchBooleanFieldMapping { + type: 'boolean'; +} + +export interface ElasticsearchNumberFieldMapping { + type: + | 'long' + | 'integer' + | 'short' + | 'byte' + | 'double' + | 'float' + | 'half_float' + | 'scaled_float' + | 'date'; +} + +export interface ElasticsearchNestedFieldMapping { + type?: 'nested'; + properties: { [K in keyof Obj[0]]-?: ElasticsearchMappingOf }; +} + +export interface ElasticsearchObjectFieldMapping { + type?: 'object'; + properties: { [K in keyof Obj]-?: ElasticsearchMappingOf }; +} diff --git a/x-pack/plugins/infra/server/utils/typed_resolvers.ts b/x-pack/plugins/infra/server/utils/typed_resolvers.ts new file mode 100644 index 0000000000000..d5f2d00abd504 --- /dev/null +++ b/x-pack/plugins/infra/server/utils/typed_resolvers.ts @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +import { Resolver } from '../graphql/types'; + +type ResolverResult = R | Promise; + +type InfraResolverResult = + | Promise + | Promise<{ [P in keyof R]: () => Promise }> + | { [P in keyof R]: () => Promise } + | { [P in keyof R]: () => R[P] } + | R; + +export type ResultOf = Resolver_ extends Resolver> + ? Result + : never; + +export type SubsetResolverWithFields = R extends Resolver< + Array, + infer ParentInArray, + infer ContextInArray, + infer ArgsInArray +> + ? Resolver< + Array>>, + ParentInArray, + ContextInArray, + ArgsInArray + > + : R extends Resolver + ? Resolver>, Parent, Context, Args> + : never; + +export type SubsetResolverWithoutFields = R extends Resolver< + Array, + infer ParentInArray, + infer ContextInArray, + infer ArgsInArray +> + ? Resolver< + Array>>, + ParentInArray, + ContextInArray, + ArgsInArray + > + : R extends Resolver + ? Resolver>, Parent, Context, Args> + : never; + +export type ResolverWithParent = Resolver_ extends Resolver< + infer Result, + any, + infer Context, + infer Args +> + ? Resolver + : never; + +export type InfraResolver = Resolver< + InfraResolverResult, + Parent, + Context, + Args +>; + +export type InfraResolverOf = Resolver_ extends Resolver< + ResolverResult, + never, + infer ContextWithNeverParent, + infer ArgsWithNeverParent +> + ? InfraResolver + : Resolver_ extends Resolver< + ResolverResult, + infer Parent, + infer Context, + infer Args + > + ? InfraResolver + : never; + +export type InfraResolverWithFields = InfraResolverOf< + SubsetResolverWithFields +>; + +export type InfraResolverWithoutFields = InfraResolverOf< + SubsetResolverWithoutFields +>; + +export type ChildResolverOf = ResolverWithParent< + Resolver_, + ResultOf +>; From 39fe4fda06fdc521d27b886eb73bfebd16e204b2 Mon Sep 17 00:00:00 2001 From: Jason Rhodes Date: Mon, 23 Dec 2019 13:15:57 -0500 Subject: [PATCH 2/9] Unshims NP server plugin --- x-pack/plugins/infra/server/index.ts | 20 +-- .../infra/server/new_platform_index.ts | 15 -- x-pack/plugins/infra/server/plugin.ts | 140 +++++++++++++++--- 3 files changed, 127 insertions(+), 48 deletions(-) delete mode 100644 x-pack/plugins/infra/server/new_platform_index.ts diff --git a/x-pack/plugins/infra/server/index.ts b/x-pack/plugins/infra/server/index.ts index b12f92c8c5a9d..1853509dfa8e7 100644 --- a/x-pack/plugins/infra/server/index.ts +++ b/x-pack/plugins/infra/server/index.ts @@ -4,21 +4,11 @@ * you may not use this file except in compliance with the Elastic License. */ -import { schema, TypeOf } from '@kbn/config-schema'; import { PluginInitializerContext } from 'src/core/server'; -import { InfraPlugin } from './plugin'; +import { config, InfraConfig, InfraServerPlugin } from './plugin'; -export const config = { - schema: schema.object({ - enabled: schema.maybe(schema.boolean()), - query: schema.object({ - partitionSize: schema.maybe(schema.number()), - partitionFactor: schema.maybe(schema.number()), - }), - }), -}; +export { config, InfraConfig }; -export const plugin = (initContext: PluginInitializerContext) => new InfraPlugin(initContext); - -export type InfraConfig = TypeOf; -export { InfraSetup } from './plugin'; +export function plugin(context: PluginInitializerContext) { + return new InfraServerPlugin(context); +} diff --git a/x-pack/plugins/infra/server/new_platform_index.ts b/x-pack/plugins/infra/server/new_platform_index.ts deleted file mode 100644 index e59897a6b241d..0000000000000 --- a/x-pack/plugins/infra/server/new_platform_index.ts +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { PluginInitializerContext } from 'src/core/server'; -import { InfraServerPlugin, InfraPluginSetup } from './new_platform_plugin'; -import { config, InfraConfig } from '../../../../plugins/infra/server'; -import { InfraServerPluginDeps } from './lib/adapters/framework'; -export { config, InfraConfig, InfraServerPluginDeps, InfraPluginSetup }; - -export function plugin(context: PluginInitializerContext) { - return new InfraServerPlugin(context); -} diff --git a/x-pack/plugins/infra/server/plugin.ts b/x-pack/plugins/infra/server/plugin.ts index 0c763313fb973..b06ff20031ad7 100644 --- a/x-pack/plugins/infra/server/plugin.ts +++ b/x-pack/plugins/infra/server/plugin.ts @@ -4,30 +4,134 @@ * you may not use this file except in compliance with the Elastic License. */ -import { Plugin, PluginInitializerContext } from 'src/core/server'; +import { CoreSetup, PluginInitializerContext } from 'src/core/server'; +import { Server } from 'hapi'; +import { schema, TypeOf } from '@kbn/config-schema'; +import { initInfraServer } from './infra_server'; +import { InfraBackendLibs, InfraDomainLibs } from './lib/infra_types'; +import { FrameworkFieldsAdapter } from './lib/adapters/fields/framework_fields_adapter'; +import { KibanaFramework } from './lib/adapters/framework/kibana_framework_adapter'; +import { InfraKibanaLogEntriesAdapter } from './lib/adapters/log_entries/kibana_log_entries_adapter'; +import { KibanaMetricsAdapter } from './lib/adapters/metrics/kibana_metrics_adapter'; +import { InfraElasticsearchSourceStatusAdapter } from './lib/adapters/source_status'; +import { InfraFieldsDomain } from './lib/domains/fields_domain'; +import { InfraLogEntriesDomain } from './lib/domains/log_entries_domain'; +import { InfraMetricsDomain } from './lib/domains/metrics_domain'; +import { InfraLogAnalysis } from './lib/log_analysis'; +import { InfraSnapshot } from './lib/snapshot'; +import { InfraSourceStatus } from './lib/source_status'; +import { InfraSources } from './lib/sources'; +import { InfraServerPluginDeps } from './lib/adapters/framework'; +import { METRICS_FEATURE, LOGS_FEATURE } from './features'; +import { UsageCollector } from './usage/usage_collector'; +import { InfraStaticSourceConfiguration } from './lib/sources/types'; -export class InfraPlugin implements Plugin { - private readonly initContext: PluginInitializerContext; +export const config = { + schema: schema.object({ + enabled: schema.maybe(schema.boolean()), + query: schema.object({ + partitionSize: schema.maybe(schema.number()), + partitionFactor: schema.maybe(schema.number()), + }), + }), +}; - constructor(initContext: PluginInitializerContext) { - this.initContext = initContext; +export type InfraConfig = TypeOf; + +export interface KbnServer extends Server { + usage: any; +} + +export interface InfraPluginSetup { + defineInternalSourceConfiguration: ( + sourceId: string, + sourceProperties: InfraStaticSourceConfiguration + ) => void; +} + +const DEFAULT_CONFIG: InfraConfig = { + enabled: true, + query: { + partitionSize: 75, + partitionFactor: 1.2, + }, +}; + +export class InfraServerPlugin { + public config: InfraConfig = DEFAULT_CONFIG; + public libs: InfraBackendLibs | undefined; + + constructor(context: PluginInitializerContext) { + const config$ = context.config.create(); + config$.subscribe(configValue => { + this.config = { + ...DEFAULT_CONFIG, + enabled: configValue.enabled, + query: { + ...DEFAULT_CONFIG.query, + ...configValue.query, + }, + }; + }); } - public setup() { + getLibs() { + if (!this.libs) { + throw new Error('libs not set up yet'); + } + return this.libs; + } + + setup(core: CoreSetup, plugins: InfraServerPluginDeps) { + const framework = new KibanaFramework(core, this.config, plugins); + const sources = new InfraSources({ + config: this.config, + }); + const sourceStatus = new InfraSourceStatus( + new InfraElasticsearchSourceStatusAdapter(framework), + { + sources, + } + ); + const snapshot = new InfraSnapshot({ sources, framework }); + const logAnalysis = new InfraLogAnalysis({ framework }); + + // TODO: separate these out individually and do away with "domains" as a temporary group + const domainLibs: InfraDomainLibs = { + fields: new InfraFieldsDomain(new FrameworkFieldsAdapter(framework), { + sources, + }), + logEntries: new InfraLogEntriesDomain(new InfraKibanaLogEntriesAdapter(framework), { + sources, + }), + metrics: new InfraMetricsDomain(new KibanaMetricsAdapter(framework)), + }; + + this.libs = { + configuration: this.config, + framework, + logAnalysis, + snapshot, + sources, + sourceStatus, + ...domainLibs, + }; + + plugins.features.registerFeature(METRICS_FEATURE); + plugins.features.registerFeature(LOGS_FEATURE); + + initInfraServer(this.libs); + + // Telemetry + UsageCollector.registerUsageCollector(plugins.usageCollection); + return { - __legacy: { - config: this.initContext.config, + defineInternalSourceConfiguration(sourceId, sourceProperties) { + sources.defineInternalSourceConfiguration(sourceId, sourceProperties); }, - }; + } as InfraPluginSetup; } - public start() {} - public stop() {} -} - -export interface InfraSetup { - /** @deprecated */ - __legacy: { - config: PluginInitializerContext['config']; - }; + start() {} + stop() {} } From 4e57e2a439c482d6750df1b5135c233189034c7c Mon Sep 17 00:00:00 2001 From: Jason Rhodes Date: Mon, 23 Dec 2019 13:16:32 -0500 Subject: [PATCH 3/9] Fixes index patterns dependency for NP (based on APM) --- .../framework/kibana_framework_adapter.ts | 21 ++++++++----------- 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/x-pack/plugins/infra/server/lib/adapters/framework/kibana_framework_adapter.ts b/x-pack/plugins/infra/server/lib/adapters/framework/kibana_framework_adapter.ts index 4409667d8390a..f91533be305f9 100644 --- a/x-pack/plugins/infra/server/lib/adapters/framework/kibana_framework_adapter.ts +++ b/x-pack/plugins/infra/server/lib/adapters/framework/kibana_framework_adapter.ts @@ -6,7 +6,6 @@ /* eslint-disable @typescript-eslint/array-type */ -import { GenericParams } from 'elasticsearch'; import { GraphQLSchema } from 'graphql'; import { Legacy } from 'kibana'; import { runHttpQuery } from 'apollo-server-core'; @@ -30,9 +29,11 @@ import { RequestHandlerContext, KibanaResponseFactory, RouteMethod, -} from '../../../../../../../../src/core/server'; -import { RequestHandler } from '../../../../../../../../src/core/server'; -import { InfraConfig } from '../../../../../../../plugins/infra/server'; + APICaller, +} from '../../../../../../../src/core/server'; +import { RequestHandler } from '../../../../../../../src/core/server'; +import { InfraConfig } from '../../../plugin'; +import { IndexPatternsFetcher } from '../../../../../../../src/plugins/data/server'; export class KibanaFramework { public router: IRouter; @@ -214,14 +215,10 @@ export class KibanaFramework { public getIndexPatternsService( requestContext: RequestHandlerContext ): Legacy.IndexPatternsService { - return this.plugins.indexPatterns.indexPatternsServiceFactory({ - callCluster: async (method: string, args: [GenericParams], ...rest: any[]) => { - const fieldCaps = await this.callWithRequest(requestContext, method, { - ...args, - allowNoIndices: true, - } as GenericParams); - return fieldCaps; - }, + return new IndexPatternsFetcher((...rest: Parameters) => { + rest[1] = rest[1] || {}; + rest[1].allowNoIndices = true; + return requestContext.core.elasticsearch.adminClient.callAsCurrentUser(...rest); }); } From d8509400c960b158c7797e8c9ed5c9556381b688 Mon Sep 17 00:00:00 2001 From: Jason Rhodes Date: Mon, 23 Dec 2019 13:16:58 -0500 Subject: [PATCH 4/9] Removes lodash/fp dependency to avoid need for our own lodash --- .../log_entries/kibana_log_entries_adapter.ts | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/x-pack/plugins/infra/server/lib/adapters/log_entries/kibana_log_entries_adapter.ts b/x-pack/plugins/infra/server/lib/adapters/log_entries/kibana_log_entries_adapter.ts index ec45171baa7b0..f26a6ab22e0a9 100644 --- a/x-pack/plugins/infra/server/lib/adapters/log_entries/kibana_log_entries_adapter.ts +++ b/x-pack/plugins/infra/server/lib/adapters/log_entries/kibana_log_entries_adapter.ts @@ -8,10 +8,7 @@ import { timeMilliseconds } from 'd3-time'; import * as runtimeTypes from 'io-ts'; -import first from 'lodash/fp/first'; -import get from 'lodash/fp/get'; -import has from 'lodash/fp/has'; -import zip from 'lodash/fp/zip'; +import _ from 'lodash'; import { pipe } from 'fp-ts/lib/pipeable'; import { map, fold } from 'fp-ts/lib/Either'; import { identity, constant } from 'fp-ts/lib/function'; @@ -205,7 +202,7 @@ export class InfraKibanaLogEntriesAdapter implements LogEntriesAdapter { }; const response = await search(params); - const document = first(response.hits.hits); + const document = _.first(response.hits.hits); if (!document) { throw new Error('Document not found'); } @@ -313,7 +310,7 @@ export class InfraKibanaLogEntriesAdapter implements LogEntriesAdapter { function getLookupIntervals(start: number, direction: 'asc' | 'desc'): Array<[number, number]> { const offsetSign = direction === 'asc' ? 1 : -1; const translatedOffsets = LOOKUP_OFFSETS.map(offset => start + offset * offsetSign); - const intervals = zip(translatedOffsets.slice(0, -1), translatedOffsets.slice(1)) as Array< + const intervals = _.zip(translatedOffsets.slice(0, -1), translatedOffsets.slice(1)) as Array< [number, number] >; return intervals; @@ -325,13 +322,13 @@ const convertHitToLogEntryDocument = (fields: string[]) => ( gid: hit._id, fields: fields.reduce( (flattenedFields, fieldName) => - has(fieldName, hit._source) + _.has(hit._source, fieldName) ? { ...flattenedFields, - [fieldName]: get(fieldName, hit._source), + [fieldName]: _.get(hit._source, fieldName), } : flattenedFields, - {} as { [fieldName: string]: string | number | boolean | null } + {} as { [fieldName: string]: string | number | object | boolean | null } ), highlights: hit.highlight || {}, key: { From ea99934b3ac955f8046a3094a2a02551039244c9 Mon Sep 17 00:00:00 2001 From: Jason Rhodes Date: Fri, 3 Jan 2020 10:40:49 -0500 Subject: [PATCH 5/9] NP cleanup and removal of legacy server directory --- x-pack/legacy/plugins/infra/index.ts | 58 +- .../legacy/plugins/infra/server/features.ts | 65 - .../plugins/infra/server/graphql/index.ts | 19 - .../infra/server/graphql/log_entries/index.ts | 7 - .../server/graphql/log_entries/resolvers.ts | 175 -- .../server/graphql/log_entries/schema.gql.ts | 136 -- .../server/graphql/source_status/index.ts | 7 - .../server/graphql/source_status/resolvers.ts | 90 - .../graphql/source_status/schema.gql.ts | 40 - .../infra/server/graphql/sources/index.ts | 8 - .../infra/server/graphql/sources/resolvers.ts | 197 --- .../server/graphql/sources/schema.gql.ts | 201 --- .../plugins/infra/server/graphql/types.ts | 1513 ----------------- .../plugins/infra/server/infra_server.ts | 52 - .../plugins/infra/server/kibana.index.ts | 46 - .../lib/adapters/fields/adapter_types.ts | 23 - .../fields/framework_fields_adapter.ts | 126 -- .../infra/server/lib/adapters/fields/index.ts | 7 - .../lib/adapters/framework/adapter_types.ts | 172 -- .../server/lib/adapters/framework/index.ts | 7 - .../framework/kibana_framework_adapter.ts | 259 --- .../lib/adapters/log_entries/adapter_types.ts | 5 - .../server/lib/adapters/log_entries/index.ts | 5 - .../log_entries/kibana_log_entries_adapter.ts | 390 ----- .../lib/adapters/metrics/adapter_types.ts | 125 -- .../server/lib/adapters/metrics/index.ts | 7 - .../metrics/kibana_metrics_adapter.ts | 154 -- .../adapters/metrics/lib/check_valid_node.ts | 32 - .../server/lib/adapters/metrics/lib/errors.ts | 15 - .../elasticsearch_source_status_adapter.ts | 71 - .../lib/adapters/source_status/index.ts | 7 - .../infra/server/lib/compose/kibana.ts | 56 - .../plugins/infra/server/lib/constants.ts | 7 - .../infra/server/lib/domains/fields_domain.ts | 40 - .../builtin_rules/filebeat_apache2.test.ts | 263 --- .../builtin_rules/filebeat_apache2.ts | 100 -- .../builtin_rules/filebeat_auditd.test.ts | 359 ---- .../builtin_rules/filebeat_auditd.ts | 119 -- .../builtin_rules/filebeat_haproxy.test.ts | 791 --------- .../builtin_rules/filebeat_haproxy.ts | 329 ---- .../builtin_rules/filebeat_icinga.test.ts | 147 -- .../builtin_rules/filebeat_icinga.ts | 86 - .../builtin_rules/filebeat_iis.test.ts | 562 ------ .../builtin_rules/filebeat_iis.ts | 142 -- .../builtin_rules/filebeat_kafka.test.ts | 60 - .../builtin_rules/filebeat_logstash.test.ts | 206 --- .../builtin_rules/filebeat_logstash.ts | 80 - .../builtin_rules/filebeat_mongodb.test.ts | 52 - .../builtin_rules/filebeat_mongodb.ts | 28 - .../builtin_rules/filebeat_mysql.test.ts | 219 --- .../builtin_rules/filebeat_mysql.ts | 104 -- .../builtin_rules/filebeat_nginx.test.ts | 264 --- .../builtin_rules/filebeat_nginx.ts | 106 -- .../builtin_rules/filebeat_osquery.test.ts | 77 - .../builtin_rules/filebeat_osquery.ts | 34 - .../builtin_rules/filebeat_redis.ts | 30 - .../builtin_rules/filebeat_system.ts | 90 - .../builtin_rules/filebeat_traefik.test.ts | 124 -- .../builtin_rules/filebeat_traefik.ts | 64 - .../builtin_rules/generic.test.ts | 168 -- .../builtin_rules/generic.ts | 101 -- .../builtin_rules/generic_webserver.ts | 116 -- .../builtin_rules/helpers.ts | 12 - .../log_entries_domain/builtin_rules/index.ts | 63 - ...document_source_to_log_item_fields.test.ts | 70 - ...vert_document_source_to_log_item_fields.ts | 38 - .../lib/domains/log_entries_domain/index.ts | 7 - .../log_entries_domain/log_entries_domain.ts | 436 ----- .../lib/domains/log_entries_domain/message.ts | 190 --- .../domains/log_entries_domain/rule_types.ts | 36 - .../server/lib/domains/metrics_domain.ts | 25 - .../plugins/infra/server/lib/infra_types.ts | 49 - .../infra/server/lib/log_analysis/errors.ts | 12 - .../infra/server/lib/log_analysis/index.ts | 8 - .../server/lib/log_analysis/log_analysis.ts | 143 -- .../server/lib/log_analysis/queries/index.ts | 7 - .../log_analysis/queries/log_entry_rate.ts | 182 -- .../infra/server/lib/snapshot/constants.ts | 9 - .../create_timerange_with_interval.ts | 59 - .../infra/server/lib/snapshot/index.ts | 7 - .../server/lib/snapshot/query_helpers.ts | 73 - .../lib/snapshot/response_helpers.test.ts | 78 - .../server/lib/snapshot/response_helpers.ts | 173 -- .../infra/server/lib/snapshot/snapshot.ts | 237 --- .../infra/server/lib/snapshot/types.ts | 25 - .../plugins/infra/server/lib/source_status.ts | 106 -- .../infra/server/lib/sources/defaults.ts | 40 - .../infra/server/lib/sources/errors.ts | 12 - .../plugins/infra/server/lib/sources/index.ts | 10 - .../lib/sources/saved_object_mappings.ts | 79 - .../infra/server/lib/sources/sources.test.ts | 153 -- .../infra/server/lib/sources/sources.ts | 247 --- .../plugins/infra/server/lib/sources/types.ts | 149 -- .../infra/server/new_platform_index.ts | 15 - .../infra/server/new_platform_plugin.ts | 135 -- .../server/routes/inventory_metadata/index.ts | 62 - .../infra/server/routes/ip_to_hostname.ts | 65 - .../infra/server/routes/log_analysis/index.ts | 8 - .../routes/log_analysis/results/index.ts | 7 - .../log_analysis/results/log_entry_rate.ts | 83 - .../routes/log_analysis/validation/index.ts | 7 - .../routes/log_analysis/validation/indices.ts | 93 - .../infra/server/routes/log_entries/index.ts | 9 - .../server/routes/log_entries/summary.ts | 66 - .../routes/log_entries/summary_highlights.ts | 70 - .../infra/server/routes/metadata/index.ts | 95 -- .../metadata/lib/get_cloud_metric_metadata.ts | 62 - .../metadata/lib/get_metric_metadata.ts | 83 - .../routes/metadata/lib/get_node_info.ts | 82 - .../routes/metadata/lib/get_pod_node_name.ts | 47 - .../routes/metadata/lib/has_apm_data.ts | 56 - .../routes/metadata/lib/pick_feature_name.ts | 16 - .../server/routes/metrics_explorer/index.ts | 62 - .../lib/create_metrics_model.ts | 67 - .../metrics_explorer/lib/get_groupings.ts | 120 -- .../lib/populate_series_with_tsvb_data.ts | 134 -- .../server/routes/metrics_explorer/types.ts | 36 - .../infra/server/routes/node_details/index.ts | 66 - .../infra/server/routes/snapshot/index.ts | 72 - .../plugins/infra/server/saved_objects.ts | 15 - .../infra/server/usage/usage_collector.ts | 119 -- .../plugins/infra/server/utils/README.md | 1 - .../server/utils/calculate_metric_interval.ts | 105 -- .../server/utils/create_afterkey_handler.ts | 21 - .../server/utils/get_all_composite_data.ts | 56 - .../server/utils/get_interval_in_seconds.ts | 31 - .../infra/server/utils/serialized_query.ts | 34 - .../utils/typed_elasticsearch_mappings.ts | 48 - .../infra/server/utils/typed_resolvers.ts | 97 -- x-pack/legacy/plugins/monitoring/index.js | 2 +- x-pack/plugins/infra/common/time/time_key.ts | 4 +- x-pack/plugins/infra/kibana.json | 4 +- .../plugins/infra/server/lib/infra_types.ts | 4 +- .../server/lib/log_analysis/log_analysis.ts | 2 +- 134 files changed, 22 insertions(+), 13817 deletions(-) delete mode 100644 x-pack/legacy/plugins/infra/server/features.ts delete mode 100644 x-pack/legacy/plugins/infra/server/graphql/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/graphql/log_entries/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/graphql/log_entries/resolvers.ts delete mode 100644 x-pack/legacy/plugins/infra/server/graphql/log_entries/schema.gql.ts delete mode 100644 x-pack/legacy/plugins/infra/server/graphql/source_status/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/graphql/source_status/resolvers.ts delete mode 100644 x-pack/legacy/plugins/infra/server/graphql/source_status/schema.gql.ts delete mode 100644 x-pack/legacy/plugins/infra/server/graphql/sources/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/graphql/sources/resolvers.ts delete mode 100644 x-pack/legacy/plugins/infra/server/graphql/sources/schema.gql.ts delete mode 100644 x-pack/legacy/plugins/infra/server/graphql/types.ts delete mode 100644 x-pack/legacy/plugins/infra/server/infra_server.ts delete mode 100644 x-pack/legacy/plugins/infra/server/kibana.index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/adapters/fields/adapter_types.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/adapters/fields/framework_fields_adapter.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/adapters/fields/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/adapters/framework/adapter_types.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/adapters/framework/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/adapters/framework/kibana_framework_adapter.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/adapters/log_entries/adapter_types.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/adapters/log_entries/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/adapters/log_entries/kibana_log_entries_adapter.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/adapters/metrics/adapter_types.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/adapters/metrics/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/adapters/metrics/kibana_metrics_adapter.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/adapters/metrics/lib/check_valid_node.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/adapters/metrics/lib/errors.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/adapters/source_status/elasticsearch_source_status_adapter.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/adapters/source_status/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/compose/kibana.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/constants.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/fields_domain.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_apache2.test.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_apache2.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_auditd.test.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_auditd.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_haproxy.test.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_haproxy.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_icinga.test.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_icinga.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_iis.test.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_iis.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_kafka.test.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_logstash.test.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_logstash.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mongodb.test.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mongodb.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mysql.test.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mysql.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_nginx.test.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_nginx.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_osquery.test.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_osquery.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_redis.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_system.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_traefik.test.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_traefik.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic.test.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic_webserver.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/helpers.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/convert_document_source_to_log_item_fields.test.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/convert_document_source_to_log_item_fields.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/log_entries_domain.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/message.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/rule_types.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/domains/metrics_domain.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/infra_types.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/log_analysis/errors.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/log_analysis/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/log_analysis/log_analysis.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/log_analysis/queries/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/log_analysis/queries/log_entry_rate.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/snapshot/constants.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/snapshot/create_timerange_with_interval.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/snapshot/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/snapshot/query_helpers.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/snapshot/response_helpers.test.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/snapshot/response_helpers.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/snapshot/snapshot.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/snapshot/types.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/source_status.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/sources/defaults.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/sources/errors.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/sources/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/sources/saved_object_mappings.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/sources/sources.test.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/sources/sources.ts delete mode 100644 x-pack/legacy/plugins/infra/server/lib/sources/types.ts delete mode 100644 x-pack/legacy/plugins/infra/server/new_platform_index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/new_platform_plugin.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/inventory_metadata/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/ip_to_hostname.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/log_analysis/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/log_analysis/results/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/log_analysis/results/log_entry_rate.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/log_analysis/validation/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/log_analysis/validation/indices.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/log_entries/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/log_entries/summary.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/log_entries/summary_highlights.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/metadata/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/metadata/lib/get_cloud_metric_metadata.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/metadata/lib/get_metric_metadata.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/metadata/lib/get_node_info.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/metadata/lib/get_pod_node_name.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/metadata/lib/has_apm_data.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/metadata/lib/pick_feature_name.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/metrics_explorer/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/metrics_explorer/lib/create_metrics_model.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/metrics_explorer/lib/get_groupings.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/metrics_explorer/lib/populate_series_with_tsvb_data.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/metrics_explorer/types.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/node_details/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/routes/snapshot/index.ts delete mode 100644 x-pack/legacy/plugins/infra/server/saved_objects.ts delete mode 100644 x-pack/legacy/plugins/infra/server/usage/usage_collector.ts delete mode 100644 x-pack/legacy/plugins/infra/server/utils/README.md delete mode 100644 x-pack/legacy/plugins/infra/server/utils/calculate_metric_interval.ts delete mode 100644 x-pack/legacy/plugins/infra/server/utils/create_afterkey_handler.ts delete mode 100644 x-pack/legacy/plugins/infra/server/utils/get_all_composite_data.ts delete mode 100644 x-pack/legacy/plugins/infra/server/utils/get_interval_in_seconds.ts delete mode 100644 x-pack/legacy/plugins/infra/server/utils/serialized_query.ts delete mode 100644 x-pack/legacy/plugins/infra/server/utils/typed_elasticsearch_mappings.ts delete mode 100644 x-pack/legacy/plugins/infra/server/utils/typed_resolvers.ts diff --git a/x-pack/legacy/plugins/infra/index.ts b/x-pack/legacy/plugins/infra/index.ts index 196950b51be3a..ced94d30bf747 100644 --- a/x-pack/legacy/plugins/infra/index.ts +++ b/x-pack/legacy/plugins/infra/index.ts @@ -7,17 +7,6 @@ import { i18n } from '@kbn/i18n'; import JoiNamespace from 'joi'; import { resolve } from 'path'; -import { PluginInitializerContext } from 'src/core/server'; -import { UsageCollectionSetup } from 'src/plugins/usage_collection/server'; -import KbnServer from 'src/legacy/server/kbn_server'; -import { getConfigSchema } from './server/kibana.index'; -import { savedObjectMappings } from './server/saved_objects'; -import { plugin, InfraServerPluginDeps } from './server/new_platform_index'; -import { InfraSetup } from '../../../plugins/infra/server'; -import { PluginSetupContract as FeaturesPluginSetup } from '../../../plugins/features/server'; -import { SpacesPluginSetup } from '../../../plugins/spaces/server'; -import { VisTypeTimeseriesSetup } from '../../../../src/plugins/vis_type_timeseries/server'; -import { APMPluginContract } from '../../../plugins/apm/server'; export const APP_ID = 'infra'; @@ -70,45 +59,24 @@ export function infra(kibana: any) { url: `/app/${APP_ID}#/logs`, }, ], - mappings: savedObjectMappings, + // mappings: savedObjectMappings, }, config(Joi: typeof JoiNamespace) { - return getConfigSchema(Joi); + return Joi.object({ + enabled: Joi.boolean().default(true), + }) + .unknown() + .default(); }, init(legacyServer: any) { - const { newPlatform } = legacyServer as KbnServer; - const { core, plugins } = newPlatform.setup; - - const infraSetup = (plugins.infra as unknown) as InfraSetup; // chef's kiss - - const initContext = ({ - config: infraSetup.__legacy.config, - } as unknown) as PluginInitializerContext; - // NP_TODO: Use real types from the other plugins as they are migrated - const pluginDeps: InfraServerPluginDeps = { - home: legacyServer.newPlatform.setup.plugins.home, - usageCollection: plugins.usageCollection as UsageCollectionSetup, - indexPatterns: { - indexPatternsServiceFactory: legacyServer.indexPatternsServiceFactory, + // NP_TODO: How do we move this to new platform? + legacyServer.addAppLinksToSampleDataset('logs', [ + { + path: `/app/${APP_ID}#/logs`, + label: logsSampleDataLinkLabel, + icon: 'logsApp', }, - metrics: plugins.metrics as VisTypeTimeseriesSetup, - spaces: plugins.spaces as SpacesPluginSetup, - features: plugins.features as FeaturesPluginSetup, - apm: plugins.apm as APMPluginContract, - }; - - const infraPluginInstance = plugin(initContext); - infraPluginInstance.setup(core, pluginDeps); - - // NP_TODO: EVERYTHING BELOW HERE IS LEGACY - - const libs = infraPluginInstance.getLibs(); - - // NP_NOTE: Left here for now for legacy plugins to consume - legacyServer.expose( - 'defineInternalSourceConfiguration', - libs.sources.defineInternalSourceConfiguration.bind(libs.sources) - ); + ]); }, }); } diff --git a/x-pack/legacy/plugins/infra/server/features.ts b/x-pack/legacy/plugins/infra/server/features.ts deleted file mode 100644 index fc20813c777b6..0000000000000 --- a/x-pack/legacy/plugins/infra/server/features.ts +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { i18n } from '@kbn/i18n'; - -export const METRICS_FEATURE = { - id: 'infrastructure', - name: i18n.translate('xpack.infra.featureRegistry.linkInfrastructureTitle', { - defaultMessage: 'Infrastructure', - }), - icon: 'infraApp', - navLinkId: 'infra:home', - app: ['infra', 'kibana'], - catalogue: ['infraops'], - privileges: { - all: { - api: ['infra'], - savedObject: { - all: ['infrastructure-ui-source'], - read: ['index-pattern'], - }, - ui: ['show', 'configureSource', 'save'], - }, - read: { - api: ['infra'], - savedObject: { - all: [], - read: ['infrastructure-ui-source', 'index-pattern'], - }, - ui: ['show'], - }, - }, -}; - -export const LOGS_FEATURE = { - id: 'logs', - name: i18n.translate('xpack.infra.featureRegistry.linkLogsTitle', { - defaultMessage: 'Logs', - }), - icon: 'loggingApp', - navLinkId: 'infra:logs', - app: ['infra', 'kibana'], - catalogue: ['infralogging'], - privileges: { - all: { - api: ['infra'], - savedObject: { - all: ['infrastructure-ui-source'], - read: [], - }, - ui: ['show', 'configureSource', 'save'], - }, - read: { - api: ['infra'], - savedObject: { - all: [], - read: ['infrastructure-ui-source'], - }, - ui: ['show'], - }, - }, -}; diff --git a/x-pack/legacy/plugins/infra/server/graphql/index.ts b/x-pack/legacy/plugins/infra/server/graphql/index.ts deleted file mode 100644 index 82fef41db1a73..0000000000000 --- a/x-pack/legacy/plugins/infra/server/graphql/index.ts +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { rootSchema } from '../../common/graphql/root/schema.gql'; -import { sharedSchema } from '../../common/graphql/shared/schema.gql'; -import { logEntriesSchema } from './log_entries/schema.gql'; -import { sourceStatusSchema } from './source_status/schema.gql'; -import { sourcesSchema } from './sources/schema.gql'; - -export const schemas = [ - rootSchema, - sharedSchema, - logEntriesSchema, - sourcesSchema, - sourceStatusSchema, -]; diff --git a/x-pack/legacy/plugins/infra/server/graphql/log_entries/index.ts b/x-pack/legacy/plugins/infra/server/graphql/log_entries/index.ts deleted file mode 100644 index 21134862663ec..0000000000000 --- a/x-pack/legacy/plugins/infra/server/graphql/log_entries/index.ts +++ /dev/null @@ -1,7 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export { createLogEntriesResolvers } from './resolvers'; diff --git a/x-pack/legacy/plugins/infra/server/graphql/log_entries/resolvers.ts b/x-pack/legacy/plugins/infra/server/graphql/log_entries/resolvers.ts deleted file mode 100644 index edbb736b2c4fd..0000000000000 --- a/x-pack/legacy/plugins/infra/server/graphql/log_entries/resolvers.ts +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { - InfraLogEntryColumn, - InfraLogEntryFieldColumn, - InfraLogEntryMessageColumn, - InfraLogEntryTimestampColumn, - InfraLogMessageConstantSegment, - InfraLogMessageFieldSegment, - InfraLogMessageSegment, - InfraSourceResolvers, -} from '../../graphql/types'; -import { InfraLogEntriesDomain } from '../../lib/domains/log_entries_domain'; -import { parseFilterQuery } from '../../utils/serialized_query'; -import { ChildResolverOf, InfraResolverOf } from '../../utils/typed_resolvers'; -import { QuerySourceResolver } from '../sources/resolvers'; - -export type InfraSourceLogEntriesAroundResolver = ChildResolverOf< - InfraResolverOf, - QuerySourceResolver ->; - -export type InfraSourceLogEntriesBetweenResolver = ChildResolverOf< - InfraResolverOf, - QuerySourceResolver ->; - -export type InfraSourceLogEntryHighlightsResolver = ChildResolverOf< - InfraResolverOf, - QuerySourceResolver ->; - -export const createLogEntriesResolvers = (libs: { - logEntries: InfraLogEntriesDomain; -}): { - InfraSource: { - logEntriesAround: InfraSourceLogEntriesAroundResolver; - logEntriesBetween: InfraSourceLogEntriesBetweenResolver; - logEntryHighlights: InfraSourceLogEntryHighlightsResolver; - }; - InfraLogEntryColumn: { - __resolveType( - logEntryColumn: InfraLogEntryColumn - ): - | 'InfraLogEntryTimestampColumn' - | 'InfraLogEntryMessageColumn' - | 'InfraLogEntryFieldColumn' - | null; - }; - InfraLogMessageSegment: { - __resolveType( - messageSegment: InfraLogMessageSegment - ): 'InfraLogMessageFieldSegment' | 'InfraLogMessageConstantSegment' | null; - }; -} => ({ - InfraSource: { - async logEntriesAround(source, args, { req }) { - const countBefore = args.countBefore || 0; - const countAfter = args.countAfter || 0; - - const { entriesBefore, entriesAfter } = await libs.logEntries.getLogEntriesAround( - req, - source.id, - args.key, - countBefore + 1, - countAfter + 1, - parseFilterQuery(args.filterQuery) - ); - - const hasMoreBefore = entriesBefore.length > countBefore; - const hasMoreAfter = entriesAfter.length > countAfter; - - const entries = [ - ...(hasMoreBefore ? entriesBefore.slice(1) : entriesBefore), - ...(hasMoreAfter ? entriesAfter.slice(0, -1) : entriesAfter), - ]; - - return { - start: entries.length > 0 ? entries[0].key : null, - end: entries.length > 0 ? entries[entries.length - 1].key : null, - hasMoreBefore, - hasMoreAfter, - filterQuery: args.filterQuery, - entries, - }; - }, - async logEntriesBetween(source, args, { req }) { - const entries = await libs.logEntries.getLogEntriesBetween( - req, - source.id, - args.startKey, - args.endKey, - parseFilterQuery(args.filterQuery) - ); - - return { - start: entries.length > 0 ? entries[0].key : null, - end: entries.length > 0 ? entries[entries.length - 1].key : null, - hasMoreBefore: true, - hasMoreAfter: true, - filterQuery: args.filterQuery, - entries, - }; - }, - async logEntryHighlights(source, args, { req }) { - const highlightedLogEntrySets = await libs.logEntries.getLogEntryHighlights( - req, - source.id, - args.startKey, - args.endKey, - args.highlights.filter(highlightInput => !!highlightInput.query), - parseFilterQuery(args.filterQuery) - ); - - return highlightedLogEntrySets.map(entries => ({ - start: entries.length > 0 ? entries[0].key : null, - end: entries.length > 0 ? entries[entries.length - 1].key : null, - hasMoreBefore: true, - hasMoreAfter: true, - filterQuery: args.filterQuery, - entries, - })); - }, - }, - InfraLogEntryColumn: { - __resolveType(logEntryColumn) { - if (isTimestampColumn(logEntryColumn)) { - return 'InfraLogEntryTimestampColumn'; - } - - if (isMessageColumn(logEntryColumn)) { - return 'InfraLogEntryMessageColumn'; - } - - if (isFieldColumn(logEntryColumn)) { - return 'InfraLogEntryFieldColumn'; - } - - return null; - }, - }, - InfraLogMessageSegment: { - __resolveType(messageSegment) { - if (isConstantSegment(messageSegment)) { - return 'InfraLogMessageConstantSegment'; - } - - if (isFieldSegment(messageSegment)) { - return 'InfraLogMessageFieldSegment'; - } - - return null; - }, - }, -}); - -const isTimestampColumn = (column: InfraLogEntryColumn): column is InfraLogEntryTimestampColumn => - 'timestamp' in column; - -const isMessageColumn = (column: InfraLogEntryColumn): column is InfraLogEntryMessageColumn => - 'message' in column; - -const isFieldColumn = (column: InfraLogEntryColumn): column is InfraLogEntryFieldColumn => - 'field' in column && 'value' in column; - -const isConstantSegment = ( - segment: InfraLogMessageSegment -): segment is InfraLogMessageConstantSegment => 'constant' in segment; - -const isFieldSegment = (segment: InfraLogMessageSegment): segment is InfraLogMessageFieldSegment => - 'field' in segment && 'value' in segment && 'highlights' in segment; diff --git a/x-pack/legacy/plugins/infra/server/graphql/log_entries/schema.gql.ts b/x-pack/legacy/plugins/infra/server/graphql/log_entries/schema.gql.ts deleted file mode 100644 index 945f2f85435e5..0000000000000 --- a/x-pack/legacy/plugins/infra/server/graphql/log_entries/schema.gql.ts +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import gql from 'graphql-tag'; - -export const logEntriesSchema = gql` - "A segment of the log entry message that was derived from a field" - type InfraLogMessageFieldSegment { - "The field the segment was derived from" - field: String! - "The segment's message" - value: String! - "A list of highlighted substrings of the value" - highlights: [String!]! - } - - "A segment of the log entry message that was derived from a string literal" - type InfraLogMessageConstantSegment { - "The segment's message" - constant: String! - } - - "A segment of the log entry message" - union InfraLogMessageSegment = InfraLogMessageFieldSegment | InfraLogMessageConstantSegment - - "A special built-in column that contains the log entry's timestamp" - type InfraLogEntryTimestampColumn { - "The id of the corresponding column configuration" - columnId: ID! - "The timestamp" - timestamp: Float! - } - - "A special built-in column that contains the log entry's constructed message" - type InfraLogEntryMessageColumn { - "The id of the corresponding column configuration" - columnId: ID! - "A list of the formatted log entry segments" - message: [InfraLogMessageSegment!]! - } - - "A column that contains the value of a field of the log entry" - type InfraLogEntryFieldColumn { - "The id of the corresponding column configuration" - columnId: ID! - "The field name of the column" - field: String! - "The value of the field in the log entry" - value: String! - "A list of highlighted substrings of the value" - highlights: [String!]! - } - - "A column of a log entry" - union InfraLogEntryColumn = - InfraLogEntryTimestampColumn - | InfraLogEntryMessageColumn - | InfraLogEntryFieldColumn - - "A log entry" - type InfraLogEntry { - "A unique representation of the log entry's position in the event stream" - key: InfraTimeKey! - "The log entry's id" - gid: String! - "The source id" - source: String! - "The columns used for rendering the log entry" - columns: [InfraLogEntryColumn!]! - } - - "A highlighting definition" - input InfraLogEntryHighlightInput { - "The query to highlight by" - query: String! - "The number of highlighted documents to include beyond the beginning of the interval" - countBefore: Int! - "The number of highlighted documents to include beyond the end of the interval" - countAfter: Int! - } - - "A consecutive sequence of log entries" - type InfraLogEntryInterval { - "The key corresponding to the start of the interval covered by the entries" - start: InfraTimeKey - "The key corresponding to the end of the interval covered by the entries" - end: InfraTimeKey - "Whether there are more log entries available before the start" - hasMoreBefore: Boolean! - "Whether there are more log entries available after the end" - hasMoreAfter: Boolean! - "The query the log entries were filtered by" - filterQuery: String - "The query the log entries were highlighted with" - highlightQuery: String - "A list of the log entries" - entries: [InfraLogEntry!]! - } - - extend type InfraSource { - "A consecutive span of log entries surrounding a point in time" - logEntriesAround( - "The sort key that corresponds to the point in time" - key: InfraTimeKeyInput! - "The maximum number of preceding to return" - countBefore: Int = 0 - "The maximum number of following to return" - countAfter: Int = 0 - "The query to filter the log entries by" - filterQuery: String - ): InfraLogEntryInterval! - "A consecutive span of log entries within an interval" - logEntriesBetween( - "The sort key that corresponds to the start of the interval" - startKey: InfraTimeKeyInput! - "The sort key that corresponds to the end of the interval" - endKey: InfraTimeKeyInput! - "The query to filter the log entries by" - filterQuery: String - ): InfraLogEntryInterval! - "Sequences of log entries matching sets of highlighting queries within an interval" - logEntryHighlights( - "The sort key that corresponds to the start of the interval" - startKey: InfraTimeKeyInput! - "The sort key that corresponds to the end of the interval" - endKey: InfraTimeKeyInput! - "The query to filter the log entries by" - filterQuery: String - "The highlighting to apply to the log entries" - highlights: [InfraLogEntryHighlightInput!]! - ): [InfraLogEntryInterval!]! - } -`; diff --git a/x-pack/legacy/plugins/infra/server/graphql/source_status/index.ts b/x-pack/legacy/plugins/infra/server/graphql/source_status/index.ts deleted file mode 100644 index abc91fa3815c8..0000000000000 --- a/x-pack/legacy/plugins/infra/server/graphql/source_status/index.ts +++ /dev/null @@ -1,7 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export { createSourceStatusResolvers } from './resolvers'; diff --git a/x-pack/legacy/plugins/infra/server/graphql/source_status/resolvers.ts b/x-pack/legacy/plugins/infra/server/graphql/source_status/resolvers.ts deleted file mode 100644 index 848d66058e64c..0000000000000 --- a/x-pack/legacy/plugins/infra/server/graphql/source_status/resolvers.ts +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { InfraIndexType, InfraSourceStatusResolvers } from '../../graphql/types'; -import { InfraFieldsDomain } from '../../lib/domains/fields_domain'; -import { InfraSourceStatus } from '../../lib/source_status'; -import { ChildResolverOf, InfraResolverOf } from '../../utils/typed_resolvers'; -import { QuerySourceResolver } from '../sources/resolvers'; - -export type InfraSourceStatusMetricAliasExistsResolver = ChildResolverOf< - InfraResolverOf, - QuerySourceResolver ->; - -export type InfraSourceStatusMetricIndicesExistResolver = ChildResolverOf< - InfraResolverOf, - QuerySourceResolver ->; - -export type InfraSourceStatusMetricIndicesResolver = ChildResolverOf< - InfraResolverOf, - QuerySourceResolver ->; - -export type InfraSourceStatusLogAliasExistsResolver = ChildResolverOf< - InfraResolverOf, - QuerySourceResolver ->; - -export type InfraSourceStatusLogIndicesExistResolver = ChildResolverOf< - InfraResolverOf, - QuerySourceResolver ->; - -export type InfraSourceStatusLogIndicesResolver = ChildResolverOf< - InfraResolverOf, - QuerySourceResolver ->; - -export type InfraSourceStatusIndexFieldsResolver = ChildResolverOf< - InfraResolverOf, - QuerySourceResolver ->; - -export const createSourceStatusResolvers = (libs: { - sourceStatus: InfraSourceStatus; - fields: InfraFieldsDomain; -}): { - InfraSourceStatus: { - metricAliasExists: InfraSourceStatusMetricAliasExistsResolver; - metricIndicesExist: InfraSourceStatusMetricIndicesExistResolver; - metricIndices: InfraSourceStatusMetricIndicesResolver; - logAliasExists: InfraSourceStatusLogAliasExistsResolver; - logIndicesExist: InfraSourceStatusLogIndicesExistResolver; - logIndices: InfraSourceStatusLogIndicesResolver; - indexFields: InfraSourceStatusIndexFieldsResolver; - }; -} => ({ - InfraSourceStatus: { - async metricAliasExists(source, args, { req }) { - return await libs.sourceStatus.hasMetricAlias(req, source.id); - }, - async metricIndicesExist(source, args, { req }) { - return await libs.sourceStatus.hasMetricIndices(req, source.id); - }, - async metricIndices(source, args, { req }) { - return await libs.sourceStatus.getMetricIndexNames(req, source.id); - }, - async logAliasExists(source, args, { req }) { - return await libs.sourceStatus.hasLogAlias(req, source.id); - }, - async logIndicesExist(source, args, { req }) { - return await libs.sourceStatus.hasLogIndices(req, source.id); - }, - async logIndices(source, args, { req }) { - return await libs.sourceStatus.getLogIndexNames(req, source.id); - }, - async indexFields(source, args, { req }) { - const fields = await libs.fields.getFields( - req, - source.id, - args.indexType || InfraIndexType.ANY - ); - return fields; - }, - }, -}); diff --git a/x-pack/legacy/plugins/infra/server/graphql/source_status/schema.gql.ts b/x-pack/legacy/plugins/infra/server/graphql/source_status/schema.gql.ts deleted file mode 100644 index e0482382c6d6a..0000000000000 --- a/x-pack/legacy/plugins/infra/server/graphql/source_status/schema.gql.ts +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import gql from 'graphql-tag'; - -export const sourceStatusSchema = gql` - "A descriptor of a field in an index" - type InfraIndexField { - "The name of the field" - name: String! - "The type of the field's values as recognized by Kibana" - type: String! - "Whether the field's values can be efficiently searched for" - searchable: Boolean! - "Whether the field's values can be aggregated" - aggregatable: Boolean! - "Whether the field should be displayed based on event.module and a ECS allowed list" - displayable: Boolean! - } - - extend type InfraSourceStatus { - "Whether the configured metric alias exists" - metricAliasExists: Boolean! - "Whether the configured log alias exists" - logAliasExists: Boolean! - "Whether the configured alias or wildcard pattern resolve to any metric indices" - metricIndicesExist: Boolean! - "Whether the configured alias or wildcard pattern resolve to any log indices" - logIndicesExist: Boolean! - "The list of indices in the metric alias" - metricIndices: [String!]! - "The list of indices in the log alias" - logIndices: [String!]! - "The list of fields defined in the index mappings" - indexFields(indexType: InfraIndexType = ANY): [InfraIndexField!]! - } -`; diff --git a/x-pack/legacy/plugins/infra/server/graphql/sources/index.ts b/x-pack/legacy/plugins/infra/server/graphql/sources/index.ts deleted file mode 100644 index ee187d8c31bec..0000000000000 --- a/x-pack/legacy/plugins/infra/server/graphql/sources/index.ts +++ /dev/null @@ -1,8 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export { createSourcesResolvers } from './resolvers'; -export { sourcesSchema } from './schema.gql'; diff --git a/x-pack/legacy/plugins/infra/server/graphql/sources/resolvers.ts b/x-pack/legacy/plugins/infra/server/graphql/sources/resolvers.ts deleted file mode 100644 index 1fe1431392a38..0000000000000 --- a/x-pack/legacy/plugins/infra/server/graphql/sources/resolvers.ts +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { UserInputError } from 'apollo-server-errors'; -import { failure } from 'io-ts/lib/PathReporter'; - -import { identity } from 'fp-ts/lib/function'; -import { pipe } from 'fp-ts/lib/pipeable'; -import { fold } from 'fp-ts/lib/Either'; -import { - InfraSourceLogColumn, - InfraSourceResolvers, - MutationResolvers, - QueryResolvers, - UpdateSourceLogColumnInput, -} from '../../graphql/types'; -import { InfraSourceStatus } from '../../lib/source_status'; -import { - InfraSources, - SavedSourceConfigurationFieldColumnRuntimeType, - SavedSourceConfigurationMessageColumnRuntimeType, - SavedSourceConfigurationTimestampColumnRuntimeType, - SavedSourceConfigurationColumnRuntimeType, -} from '../../lib/sources'; -import { - ChildResolverOf, - InfraResolverOf, - InfraResolverWithFields, - ResultOf, -} from '../../utils/typed_resolvers'; - -export type QuerySourceResolver = InfraResolverWithFields< - QueryResolvers.SourceResolver, - 'id' | 'version' | 'updatedAt' | 'configuration' ->; - -export type QueryAllSourcesResolver = InfraResolverWithFields< - QueryResolvers.AllSourcesResolver, - 'id' | 'version' | 'updatedAt' | 'configuration' ->; - -export type InfraSourceStatusResolver = ChildResolverOf< - InfraResolverOf>>, - QuerySourceResolver ->; - -export type MutationCreateSourceResolver = InfraResolverOf< - MutationResolvers.CreateSourceResolver<{ - source: ResultOf; - }> ->; - -export type MutationDeleteSourceResolver = InfraResolverOf; - -export type MutationUpdateSourceResolver = InfraResolverOf< - MutationResolvers.UpdateSourceResolver<{ - source: ResultOf; - }> ->; - -interface SourcesResolversDeps { - sources: InfraSources; - sourceStatus: InfraSourceStatus; -} - -export const createSourcesResolvers = ( - libs: SourcesResolversDeps -): { - Query: { - source: QuerySourceResolver; - allSources: QueryAllSourcesResolver; - }; - InfraSource: { - status: InfraSourceStatusResolver; - }; - InfraSourceLogColumn: { - __resolveType( - logColumn: InfraSourceLogColumn - ): - | 'InfraSourceTimestampLogColumn' - | 'InfraSourceMessageLogColumn' - | 'InfraSourceFieldLogColumn' - | null; - }; - Mutation: { - createSource: MutationCreateSourceResolver; - deleteSource: MutationDeleteSourceResolver; - updateSource: MutationUpdateSourceResolver; - }; -} => ({ - Query: { - async source(root, args, { req }) { - const requestedSourceConfiguration = await libs.sources.getSourceConfiguration(req, args.id); - - return requestedSourceConfiguration; - }, - async allSources(root, args, { req }) { - const sourceConfigurations = await libs.sources.getAllSourceConfigurations(req); - - return sourceConfigurations; - }, - }, - InfraSource: { - async status(source) { - return source; - }, - }, - InfraSourceLogColumn: { - __resolveType(logColumn) { - if (SavedSourceConfigurationTimestampColumnRuntimeType.is(logColumn)) { - return 'InfraSourceTimestampLogColumn'; - } - - if (SavedSourceConfigurationMessageColumnRuntimeType.is(logColumn)) { - return 'InfraSourceMessageLogColumn'; - } - - if (SavedSourceConfigurationFieldColumnRuntimeType.is(logColumn)) { - return 'InfraSourceFieldLogColumn'; - } - - return null; - }, - }, - Mutation: { - async createSource(root, args, { req }) { - const sourceConfiguration = await libs.sources.createSourceConfiguration( - req, - args.id, - compactObject({ - ...args.sourceProperties, - fields: args.sourceProperties.fields - ? compactObject(args.sourceProperties.fields) - : undefined, - logColumns: decodeLogColumns(args.sourceProperties.logColumns), - }) - ); - - return { - source: sourceConfiguration, - }; - }, - async deleteSource(root, args, { req }) { - await libs.sources.deleteSourceConfiguration(req, args.id); - - return { - id: args.id, - }; - }, - async updateSource(root, args, { req }) { - const updatedSourceConfiguration = await libs.sources.updateSourceConfiguration( - req, - args.id, - compactObject({ - ...args.sourceProperties, - fields: args.sourceProperties.fields - ? compactObject(args.sourceProperties.fields) - : undefined, - logColumns: decodeLogColumns(args.sourceProperties.logColumns), - }) - ); - - return { - source: updatedSourceConfiguration, - }; - }, - }, -}); - -type CompactObject = { [K in keyof T]: NonNullable }; - -const compactObject = (obj: T): CompactObject => - Object.entries(obj).reduce>( - (accumulatedObj, [key, value]) => - typeof value === 'undefined' || value === null - ? accumulatedObj - : { - ...(accumulatedObj as any), - [key]: value, - }, - {} as CompactObject - ); - -const decodeLogColumns = (logColumns?: UpdateSourceLogColumnInput[] | null) => - logColumns - ? logColumns.map(logColumn => - pipe( - SavedSourceConfigurationColumnRuntimeType.decode(logColumn), - fold(errors => { - throw new UserInputError(failure(errors).join('\n')); - }, identity) - ) - ) - : undefined; diff --git a/x-pack/legacy/plugins/infra/server/graphql/sources/schema.gql.ts b/x-pack/legacy/plugins/infra/server/graphql/sources/schema.gql.ts deleted file mode 100644 index a39399cec7c32..0000000000000 --- a/x-pack/legacy/plugins/infra/server/graphql/sources/schema.gql.ts +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import gql from 'graphql-tag'; - -export const sourcesSchema = gql` - "A source of infrastructure data" - type InfraSource { - "The id of the source" - id: ID! - "The version number the source configuration was last persisted with" - version: String - "The timestamp the source configuration was last persisted at" - updatedAt: Float - "The origin of the source (one of 'fallback', 'internal', 'stored')" - origin: String! - "The raw configuration of the source" - configuration: InfraSourceConfiguration! - "The status of the source" - status: InfraSourceStatus! - } - - "The status of an infrastructure data source" - type InfraSourceStatus - - "A set of configuration options for an infrastructure data source" - type InfraSourceConfiguration { - "The name of the data source" - name: String! - "A description of the data source" - description: String! - "The alias to read metric data from" - metricAlias: String! - "The alias to read log data from" - logAlias: String! - "The field mapping to use for this source" - fields: InfraSourceFields! - "The columns to use for log display" - logColumns: [InfraSourceLogColumn!]! - } - - "A mapping of semantic fields to their document counterparts" - type InfraSourceFields { - "The field to identify a container by" - container: String! - "The fields to identify a host by" - host: String! - "The fields to use as the log message" - message: [String!]! - "The field to identify a pod by" - pod: String! - "The field to use as a tiebreaker for log events that have identical timestamps" - tiebreaker: String! - "The field to use as a timestamp for metrics and logs" - timestamp: String! - } - - "The built-in timestamp log column" - type InfraSourceTimestampLogColumn { - timestampColumn: InfraSourceTimestampLogColumnAttributes! - } - - type InfraSourceTimestampLogColumnAttributes { - "A unique id for the column" - id: ID! - } - - "The built-in message log column" - type InfraSourceMessageLogColumn { - messageColumn: InfraSourceMessageLogColumnAttributes! - } - - type InfraSourceMessageLogColumnAttributes { - "A unique id for the column" - id: ID! - } - - "A log column containing a field value" - type InfraSourceFieldLogColumn { - fieldColumn: InfraSourceFieldLogColumnAttributes! - } - - type InfraSourceFieldLogColumnAttributes { - "A unique id for the column" - id: ID! - "The field name this column refers to" - field: String! - } - - "All known log column types" - union InfraSourceLogColumn = - InfraSourceTimestampLogColumn - | InfraSourceMessageLogColumn - | InfraSourceFieldLogColumn - - extend type Query { - """ - Get an infrastructure data source by id. - - The resolution order for the source configuration attributes is as follows - with the first defined value winning: - - 1. The attributes of the saved object with the given 'id'. - 2. The attributes defined in the static Kibana configuration key - 'xpack.infra.sources.default'. - 3. The hard-coded default values. - - As a consequence, querying a source that doesn't exist doesn't error out, - but returns the configured or hardcoded defaults. - """ - source("The id of the source" id: ID!): InfraSource! - "Get a list of all infrastructure data sources" - allSources: [InfraSource!]! - } - - "The properties to update the source with" - input UpdateSourceInput { - "The name of the data source" - name: String - "A description of the data source" - description: String - "The alias to read metric data from" - metricAlias: String - "The alias to read log data from" - logAlias: String - "The field mapping to use for this source" - fields: UpdateSourceFieldsInput - "The log columns to display for this source" - logColumns: [UpdateSourceLogColumnInput!] - } - - "The mapping of semantic fields of the source to be created" - input UpdateSourceFieldsInput { - "The field to identify a container by" - container: String - "The fields to identify a host by" - host: String - "The field to identify a pod by" - pod: String - "The field to use as a tiebreaker for log events that have identical timestamps" - tiebreaker: String - "The field to use as a timestamp for metrics and logs" - timestamp: String - } - - "One of the log column types to display for this source" - input UpdateSourceLogColumnInput { - "A custom field log column" - fieldColumn: UpdateSourceFieldLogColumnInput - "A built-in message log column" - messageColumn: UpdateSourceMessageLogColumnInput - "A built-in timestamp log column" - timestampColumn: UpdateSourceTimestampLogColumnInput - } - - input UpdateSourceFieldLogColumnInput { - id: ID! - field: String! - } - - input UpdateSourceMessageLogColumnInput { - id: ID! - } - - input UpdateSourceTimestampLogColumnInput { - id: ID! - } - - "The result of a successful source update" - type UpdateSourceResult { - "The source that was updated" - source: InfraSource! - } - - "The result of a source deletion operations" - type DeleteSourceResult { - "The id of the source that was deleted" - id: ID! - } - - extend type Mutation { - "Create a new source of infrastructure data" - createSource( - "The id of the source" - id: ID! - sourceProperties: UpdateSourceInput! - ): UpdateSourceResult! - "Modify an existing source" - updateSource( - "The id of the source" - id: ID! - "The properties to update the source with" - sourceProperties: UpdateSourceInput! - ): UpdateSourceResult! - "Delete a source of infrastructure data" - deleteSource("The id of the source" id: ID!): DeleteSourceResult! - } -`; diff --git a/x-pack/legacy/plugins/infra/server/graphql/types.ts b/x-pack/legacy/plugins/infra/server/graphql/types.ts deleted file mode 100644 index 1d6b03ac7bffb..0000000000000 --- a/x-pack/legacy/plugins/infra/server/graphql/types.ts +++ /dev/null @@ -1,1513 +0,0 @@ -/* tslint:disable */ -import { InfraContext } from '../lib/infra_types'; -import { GraphQLResolveInfo } from 'graphql'; - -export type Resolver = ( - parent: Parent, - args: Args, - context: Context, - info: GraphQLResolveInfo -) => Promise | Result; - -export interface ISubscriptionResolverObject { - subscribe( - parent: P, - args: Args, - context: Context, - info: GraphQLResolveInfo - ): AsyncIterator; - resolve?( - parent: P, - args: Args, - context: Context, - info: GraphQLResolveInfo - ): R | Result | Promise; -} - -export type SubscriptionResolver = - | ((...args: any[]) => ISubscriptionResolverObject) - | ISubscriptionResolverObject; - -// ==================================================== -// START: Typescript template -// ==================================================== - -// ==================================================== -// Types -// ==================================================== - -export interface Query { - /** Get an infrastructure data source by id.The resolution order for the source configuration attributes is as followswith the first defined value winning:1. The attributes of the saved object with the given 'id'.2. The attributes defined in the static Kibana configuration key'xpack.infra.sources.default'.3. The hard-coded default values.As a consequence, querying a source that doesn't exist doesn't error out,but returns the configured or hardcoded defaults. */ - source: InfraSource; - /** Get a list of all infrastructure data sources */ - allSources: InfraSource[]; -} -/** A source of infrastructure data */ -export interface InfraSource { - /** The id of the source */ - id: string; - /** The version number the source configuration was last persisted with */ - version?: string | null; - /** The timestamp the source configuration was last persisted at */ - updatedAt?: number | null; - /** The origin of the source (one of 'fallback', 'internal', 'stored') */ - origin: string; - /** The raw configuration of the source */ - configuration: InfraSourceConfiguration; - /** The status of the source */ - status: InfraSourceStatus; - /** A consecutive span of log entries surrounding a point in time */ - logEntriesAround: InfraLogEntryInterval; - /** A consecutive span of log entries within an interval */ - logEntriesBetween: InfraLogEntryInterval; - /** Sequences of log entries matching sets of highlighting queries within an interval */ - logEntryHighlights: InfraLogEntryInterval[]; - - /** A snapshot of nodes */ - snapshot?: InfraSnapshotResponse | null; - - metrics: InfraMetricData[]; -} -/** A set of configuration options for an infrastructure data source */ -export interface InfraSourceConfiguration { - /** The name of the data source */ - name: string; - /** A description of the data source */ - description: string; - /** The alias to read metric data from */ - metricAlias: string; - /** The alias to read log data from */ - logAlias: string; - /** The field mapping to use for this source */ - fields: InfraSourceFields; - /** The columns to use for log display */ - logColumns: InfraSourceLogColumn[]; -} -/** A mapping of semantic fields to their document counterparts */ -export interface InfraSourceFields { - /** The field to identify a container by */ - container: string; - /** The fields to identify a host by */ - host: string; - /** The fields to use as the log message */ - message: string[]; - /** The field to identify a pod by */ - pod: string; - /** The field to use as a tiebreaker for log events that have identical timestamps */ - tiebreaker: string; - /** The field to use as a timestamp for metrics and logs */ - timestamp: string; -} -/** The built-in timestamp log column */ -export interface InfraSourceTimestampLogColumn { - timestampColumn: InfraSourceTimestampLogColumnAttributes; -} - -export interface InfraSourceTimestampLogColumnAttributes { - /** A unique id for the column */ - id: string; -} -/** The built-in message log column */ -export interface InfraSourceMessageLogColumn { - messageColumn: InfraSourceMessageLogColumnAttributes; -} - -export interface InfraSourceMessageLogColumnAttributes { - /** A unique id for the column */ - id: string; -} -/** A log column containing a field value */ -export interface InfraSourceFieldLogColumn { - fieldColumn: InfraSourceFieldLogColumnAttributes; -} - -export interface InfraSourceFieldLogColumnAttributes { - /** A unique id for the column */ - id: string; - /** The field name this column refers to */ - field: string; -} -/** The status of an infrastructure data source */ -export interface InfraSourceStatus { - /** Whether the configured metric alias exists */ - metricAliasExists: boolean; - /** Whether the configured log alias exists */ - logAliasExists: boolean; - /** Whether the configured alias or wildcard pattern resolve to any metric indices */ - metricIndicesExist: boolean; - /** Whether the configured alias or wildcard pattern resolve to any log indices */ - logIndicesExist: boolean; - /** The list of indices in the metric alias */ - metricIndices: string[]; - /** The list of indices in the log alias */ - logIndices: string[]; - /** The list of fields defined in the index mappings */ - indexFields: InfraIndexField[]; -} -/** A descriptor of a field in an index */ -export interface InfraIndexField { - /** The name of the field */ - name: string; - /** The type of the field's values as recognized by Kibana */ - type: string; - /** Whether the field's values can be efficiently searched for */ - searchable: boolean; - /** Whether the field's values can be aggregated */ - aggregatable: boolean; - /** Whether the field should be displayed based on event.module and a ECS allowed list */ - displayable: boolean; -} -/** A consecutive sequence of log entries */ -export interface InfraLogEntryInterval { - /** The key corresponding to the start of the interval covered by the entries */ - start?: InfraTimeKey | null; - /** The key corresponding to the end of the interval covered by the entries */ - end?: InfraTimeKey | null; - /** Whether there are more log entries available before the start */ - hasMoreBefore: boolean; - /** Whether there are more log entries available after the end */ - hasMoreAfter: boolean; - /** The query the log entries were filtered by */ - filterQuery?: string | null; - /** The query the log entries were highlighted with */ - highlightQuery?: string | null; - /** A list of the log entries */ - entries: InfraLogEntry[]; -} -/** A representation of the log entry's position in the event stream */ -export interface InfraTimeKey { - /** The timestamp of the event that the log entry corresponds to */ - time: number; - /** The tiebreaker that disambiguates events with the same timestamp */ - tiebreaker: number; -} -/** A log entry */ -export interface InfraLogEntry { - /** A unique representation of the log entry's position in the event stream */ - key: InfraTimeKey; - /** The log entry's id */ - gid: string; - /** The source id */ - source: string; - /** The columns used for rendering the log entry */ - columns: InfraLogEntryColumn[]; -} -/** A special built-in column that contains the log entry's timestamp */ -export interface InfraLogEntryTimestampColumn { - /** The id of the corresponding column configuration */ - columnId: string; - /** The timestamp */ - timestamp: number; -} -/** A special built-in column that contains the log entry's constructed message */ -export interface InfraLogEntryMessageColumn { - /** The id of the corresponding column configuration */ - columnId: string; - /** A list of the formatted log entry segments */ - message: InfraLogMessageSegment[]; -} -/** A segment of the log entry message that was derived from a field */ -export interface InfraLogMessageFieldSegment { - /** The field the segment was derived from */ - field: string; - /** The segment's message */ - value: string; - /** A list of highlighted substrings of the value */ - highlights: string[]; -} -/** A segment of the log entry message that was derived from a string literal */ -export interface InfraLogMessageConstantSegment { - /** The segment's message */ - constant: string; -} -/** A column that contains the value of a field of the log entry */ -export interface InfraLogEntryFieldColumn { - /** The id of the corresponding column configuration */ - columnId: string; - /** The field name of the column */ - field: string; - /** The value of the field in the log entry */ - value: string; - /** A list of highlighted substrings of the value */ - highlights: string[]; -} - -export interface InfraSnapshotResponse { - /** Nodes of type host, container or pod grouped by 0, 1 or 2 terms */ - nodes: InfraSnapshotNode[]; -} - -export interface InfraSnapshotNode { - path: InfraSnapshotNodePath[]; - - metric: InfraSnapshotNodeMetric; -} - -export interface InfraSnapshotNodePath { - value: string; - - label: string; - - ip?: string | null; -} - -export interface InfraSnapshotNodeMetric { - name: InfraSnapshotMetricType; - - value?: number | null; - - avg?: number | null; - - max?: number | null; -} - -export interface InfraMetricData { - id?: InfraMetric | null; - - series: InfraDataSeries[]; -} - -export interface InfraDataSeries { - id: string; - - label: string; - - data: InfraDataPoint[]; -} - -export interface InfraDataPoint { - timestamp: number; - - value?: number | null; -} - -export interface Mutation { - /** Create a new source of infrastructure data */ - createSource: UpdateSourceResult; - /** Modify an existing source */ - updateSource: UpdateSourceResult; - /** Delete a source of infrastructure data */ - deleteSource: DeleteSourceResult; -} -/** The result of a successful source update */ -export interface UpdateSourceResult { - /** The source that was updated */ - source: InfraSource; -} -/** The result of a source deletion operations */ -export interface DeleteSourceResult { - /** The id of the source that was deleted */ - id: string; -} - -// ==================================================== -// InputTypes -// ==================================================== - -export interface InfraTimeKeyInput { - time: number; - - tiebreaker: number; -} -/** A highlighting definition */ -export interface InfraLogEntryHighlightInput { - /** The query to highlight by */ - query: string; - /** The number of highlighted documents to include beyond the beginning of the interval */ - countBefore: number; - /** The number of highlighted documents to include beyond the end of the interval */ - countAfter: number; -} - -export interface InfraTimerangeInput { - /** The interval string to use for last bucket. The format is '{value}{unit}'. For example '5m' would return the metrics for the last 5 minutes of the timespan. */ - interval: string; - /** The end of the timerange */ - to: number; - /** The beginning of the timerange */ - from: number; -} - -export interface InfraSnapshotGroupbyInput { - /** The label to use in the results for the group by for the terms group by */ - label?: string | null; - /** The field to group by from a terms aggregation, this is ignored by the filter type */ - field?: string | null; -} - -export interface InfraSnapshotMetricInput { - /** The type of metric */ - type: InfraSnapshotMetricType; -} - -export interface InfraNodeIdsInput { - nodeId: string; - - cloudId?: string | null; -} -/** The properties to update the source with */ -export interface UpdateSourceInput { - /** The name of the data source */ - name?: string | null; - /** A description of the data source */ - description?: string | null; - /** The alias to read metric data from */ - metricAlias?: string | null; - /** The alias to read log data from */ - logAlias?: string | null; - /** The field mapping to use for this source */ - fields?: UpdateSourceFieldsInput | null; - /** The log columns to display for this source */ - logColumns?: UpdateSourceLogColumnInput[] | null; -} -/** The mapping of semantic fields of the source to be created */ -export interface UpdateSourceFieldsInput { - /** The field to identify a container by */ - container?: string | null; - /** The fields to identify a host by */ - host?: string | null; - /** The field to identify a pod by */ - pod?: string | null; - /** The field to use as a tiebreaker for log events that have identical timestamps */ - tiebreaker?: string | null; - /** The field to use as a timestamp for metrics and logs */ - timestamp?: string | null; -} -/** One of the log column types to display for this source */ -export interface UpdateSourceLogColumnInput { - /** A custom field log column */ - fieldColumn?: UpdateSourceFieldLogColumnInput | null; - /** A built-in message log column */ - messageColumn?: UpdateSourceMessageLogColumnInput | null; - /** A built-in timestamp log column */ - timestampColumn?: UpdateSourceTimestampLogColumnInput | null; -} - -export interface UpdateSourceFieldLogColumnInput { - id: string; - - field: string; -} - -export interface UpdateSourceMessageLogColumnInput { - id: string; -} - -export interface UpdateSourceTimestampLogColumnInput { - id: string; -} - -// ==================================================== -// Arguments -// ==================================================== - -export interface SourceQueryArgs { - /** The id of the source */ - id: string; -} -export interface LogEntriesAroundInfraSourceArgs { - /** The sort key that corresponds to the point in time */ - key: InfraTimeKeyInput; - /** The maximum number of preceding to return */ - countBefore?: number | null; - /** The maximum number of following to return */ - countAfter?: number | null; - /** The query to filter the log entries by */ - filterQuery?: string | null; -} -export interface LogEntriesBetweenInfraSourceArgs { - /** The sort key that corresponds to the start of the interval */ - startKey: InfraTimeKeyInput; - /** The sort key that corresponds to the end of the interval */ - endKey: InfraTimeKeyInput; - /** The query to filter the log entries by */ - filterQuery?: string | null; -} -export interface LogEntryHighlightsInfraSourceArgs { - /** The sort key that corresponds to the start of the interval */ - startKey: InfraTimeKeyInput; - /** The sort key that corresponds to the end of the interval */ - endKey: InfraTimeKeyInput; - /** The query to filter the log entries by */ - filterQuery?: string | null; - /** The highlighting to apply to the log entries */ - highlights: InfraLogEntryHighlightInput[]; -} -export interface SnapshotInfraSourceArgs { - timerange: InfraTimerangeInput; - - filterQuery?: string | null; -} -export interface MetricsInfraSourceArgs { - nodeIds: InfraNodeIdsInput; - - nodeType: InfraNodeType; - - timerange: InfraTimerangeInput; - - metrics: InfraMetric[]; -} -export interface IndexFieldsInfraSourceStatusArgs { - indexType?: InfraIndexType | null; -} -export interface NodesInfraSnapshotResponseArgs { - type: InfraNodeType; - - groupBy: InfraSnapshotGroupbyInput[]; - - metric: InfraSnapshotMetricInput; -} -export interface CreateSourceMutationArgs { - /** The id of the source */ - id: string; - - sourceProperties: UpdateSourceInput; -} -export interface UpdateSourceMutationArgs { - /** The id of the source */ - id: string; - /** The properties to update the source with */ - sourceProperties: UpdateSourceInput; -} -export interface DeleteSourceMutationArgs { - /** The id of the source */ - id: string; -} - -// ==================================================== -// Enums -// ==================================================== - -export enum InfraIndexType { - ANY = 'ANY', - LOGS = 'LOGS', - METRICS = 'METRICS', -} - -export enum InfraNodeType { - pod = 'pod', - container = 'container', - host = 'host', - awsEC2 = 'awsEC2', - awsS3 = 'awsS3', - awsRDS = 'awsRDS', - awsSQS = 'awsSQS', -} - -export enum InfraSnapshotMetricType { - count = 'count', - cpu = 'cpu', - load = 'load', - memory = 'memory', - tx = 'tx', - rx = 'rx', - logRate = 'logRate', - diskIOReadBytes = 'diskIOReadBytes', - diskIOWriteBytes = 'diskIOWriteBytes', - s3TotalRequests = 's3TotalRequests', - s3NumberOfObjects = 's3NumberOfObjects', - s3BucketSize = 's3BucketSize', - s3DownloadBytes = 's3DownloadBytes', - s3UploadBytes = 's3UploadBytes', - rdsConnections = 'rdsConnections', - rdsQueriesExecuted = 'rdsQueriesExecuted', - rdsActiveTransactions = 'rdsActiveTransactions', - rdsLatency = 'rdsLatency', - sqsMessagesVisible = 'sqsMessagesVisible', - sqsMessagesDelayed = 'sqsMessagesDelayed', - sqsMessagesSent = 'sqsMessagesSent', - sqsMessagesEmpty = 'sqsMessagesEmpty', - sqsOldestMessage = 'sqsOldestMessage', -} - -export enum InfraMetric { - hostSystemOverview = 'hostSystemOverview', - hostCpuUsage = 'hostCpuUsage', - hostFilesystem = 'hostFilesystem', - hostK8sOverview = 'hostK8sOverview', - hostK8sCpuCap = 'hostK8sCpuCap', - hostK8sDiskCap = 'hostK8sDiskCap', - hostK8sMemoryCap = 'hostK8sMemoryCap', - hostK8sPodCap = 'hostK8sPodCap', - hostLoad = 'hostLoad', - hostMemoryUsage = 'hostMemoryUsage', - hostNetworkTraffic = 'hostNetworkTraffic', - hostDockerOverview = 'hostDockerOverview', - hostDockerInfo = 'hostDockerInfo', - hostDockerTop5ByCpu = 'hostDockerTop5ByCpu', - hostDockerTop5ByMemory = 'hostDockerTop5ByMemory', - podOverview = 'podOverview', - podCpuUsage = 'podCpuUsage', - podMemoryUsage = 'podMemoryUsage', - podLogUsage = 'podLogUsage', - podNetworkTraffic = 'podNetworkTraffic', - containerOverview = 'containerOverview', - containerCpuKernel = 'containerCpuKernel', - containerCpuUsage = 'containerCpuUsage', - containerDiskIOOps = 'containerDiskIOOps', - containerDiskIOBytes = 'containerDiskIOBytes', - containerMemory = 'containerMemory', - containerNetworkTraffic = 'containerNetworkTraffic', - nginxHits = 'nginxHits', - nginxRequestRate = 'nginxRequestRate', - nginxActiveConnections = 'nginxActiveConnections', - nginxRequestsPerConnection = 'nginxRequestsPerConnection', - awsOverview = 'awsOverview', - awsCpuUtilization = 'awsCpuUtilization', - awsNetworkBytes = 'awsNetworkBytes', - awsNetworkPackets = 'awsNetworkPackets', - awsDiskioBytes = 'awsDiskioBytes', - awsDiskioOps = 'awsDiskioOps', - awsEC2CpuUtilization = 'awsEC2CpuUtilization', - awsEC2DiskIOBytes = 'awsEC2DiskIOBytes', - awsEC2NetworkTraffic = 'awsEC2NetworkTraffic', - awsS3TotalRequests = 'awsS3TotalRequests', - awsS3NumberOfObjects = 'awsS3NumberOfObjects', - awsS3BucketSize = 'awsS3BucketSize', - awsS3DownloadBytes = 'awsS3DownloadBytes', - awsS3UploadBytes = 'awsS3UploadBytes', - awsRDSCpuTotal = 'awsRDSCpuTotal', - awsRDSConnections = 'awsRDSConnections', - awsRDSQueriesExecuted = 'awsRDSQueriesExecuted', - awsRDSActiveTransactions = 'awsRDSActiveTransactions', - awsRDSLatency = 'awsRDSLatency', - awsSQSMessagesVisible = 'awsSQSMessagesVisible', - awsSQSMessagesDelayed = 'awsSQSMessagesDelayed', - awsSQSMessagesSent = 'awsSQSMessagesSent', - awsSQSMessagesEmpty = 'awsSQSMessagesEmpty', - awsSQSOldestMessage = 'awsSQSOldestMessage', - custom = 'custom', -} - -// ==================================================== -// Unions -// ==================================================== - -/** All known log column types */ -export type InfraSourceLogColumn = - | InfraSourceTimestampLogColumn - | InfraSourceMessageLogColumn - | InfraSourceFieldLogColumn; - -/** A column of a log entry */ -export type InfraLogEntryColumn = - | InfraLogEntryTimestampColumn - | InfraLogEntryMessageColumn - | InfraLogEntryFieldColumn; - -/** A segment of the log entry message */ -export type InfraLogMessageSegment = InfraLogMessageFieldSegment | InfraLogMessageConstantSegment; - -// ==================================================== -// END: Typescript template -// ==================================================== - -// ==================================================== -// Resolvers -// ==================================================== - -export namespace QueryResolvers { - export interface Resolvers { - /** Get an infrastructure data source by id.The resolution order for the source configuration attributes is as followswith the first defined value winning:1. The attributes of the saved object with the given 'id'.2. The attributes defined in the static Kibana configuration key'xpack.infra.sources.default'.3. The hard-coded default values.As a consequence, querying a source that doesn't exist doesn't error out,but returns the configured or hardcoded defaults. */ - source?: SourceResolver; - /** Get a list of all infrastructure data sources */ - allSources?: AllSourcesResolver; - } - - export type SourceResolver = Resolver< - R, - Parent, - Context, - SourceArgs - >; - export interface SourceArgs { - /** The id of the source */ - id: string; - } - - export type AllSourcesResolver< - R = InfraSource[], - Parent = never, - Context = InfraContext - > = Resolver; -} -/** A source of infrastructure data */ -export namespace InfraSourceResolvers { - export interface Resolvers { - /** The id of the source */ - id?: IdResolver; - /** The version number the source configuration was last persisted with */ - version?: VersionResolver; - /** The timestamp the source configuration was last persisted at */ - updatedAt?: UpdatedAtResolver; - /** The origin of the source (one of 'fallback', 'internal', 'stored') */ - origin?: OriginResolver; - /** The raw configuration of the source */ - configuration?: ConfigurationResolver; - /** The status of the source */ - status?: StatusResolver; - /** A consecutive span of log entries surrounding a point in time */ - logEntriesAround?: LogEntriesAroundResolver; - /** A consecutive span of log entries within an interval */ - logEntriesBetween?: LogEntriesBetweenResolver; - /** Sequences of log entries matching sets of highlighting queries within an interval */ - logEntryHighlights?: LogEntryHighlightsResolver; - - /** A snapshot of nodes */ - snapshot?: SnapshotResolver; - - metrics?: MetricsResolver; - } - - export type IdResolver = Resolver< - R, - Parent, - Context - >; - export type VersionResolver< - R = string | null, - Parent = InfraSource, - Context = InfraContext - > = Resolver; - export type UpdatedAtResolver< - R = number | null, - Parent = InfraSource, - Context = InfraContext - > = Resolver; - export type OriginResolver = Resolver< - R, - Parent, - Context - >; - export type ConfigurationResolver< - R = InfraSourceConfiguration, - Parent = InfraSource, - Context = InfraContext - > = Resolver; - export type StatusResolver< - R = InfraSourceStatus, - Parent = InfraSource, - Context = InfraContext - > = Resolver; - export type LogEntriesAroundResolver< - R = InfraLogEntryInterval, - Parent = InfraSource, - Context = InfraContext - > = Resolver; - export interface LogEntriesAroundArgs { - /** The sort key that corresponds to the point in time */ - key: InfraTimeKeyInput; - /** The maximum number of preceding to return */ - countBefore?: number | null; - /** The maximum number of following to return */ - countAfter?: number | null; - /** The query to filter the log entries by */ - filterQuery?: string | null; - } - - export type LogEntriesBetweenResolver< - R = InfraLogEntryInterval, - Parent = InfraSource, - Context = InfraContext - > = Resolver; - export interface LogEntriesBetweenArgs { - /** The sort key that corresponds to the start of the interval */ - startKey: InfraTimeKeyInput; - /** The sort key that corresponds to the end of the interval */ - endKey: InfraTimeKeyInput; - /** The query to filter the log entries by */ - filterQuery?: string | null; - } - - export type LogEntryHighlightsResolver< - R = InfraLogEntryInterval[], - Parent = InfraSource, - Context = InfraContext - > = Resolver; - export interface LogEntryHighlightsArgs { - /** The sort key that corresponds to the start of the interval */ - startKey: InfraTimeKeyInput; - /** The sort key that corresponds to the end of the interval */ - endKey: InfraTimeKeyInput; - /** The query to filter the log entries by */ - filterQuery?: string | null; - /** The highlighting to apply to the log entries */ - highlights: InfraLogEntryHighlightInput[]; - } - - export type SnapshotResolver< - R = InfraSnapshotResponse | null, - Parent = InfraSource, - Context = InfraContext - > = Resolver; - export interface SnapshotArgs { - timerange: InfraTimerangeInput; - - filterQuery?: string | null; - } - - export type MetricsResolver< - R = InfraMetricData[], - Parent = InfraSource, - Context = InfraContext - > = Resolver; - export interface MetricsArgs { - nodeIds: InfraNodeIdsInput; - - nodeType: InfraNodeType; - - timerange: InfraTimerangeInput; - - metrics: InfraMetric[]; - } -} -/** A set of configuration options for an infrastructure data source */ -export namespace InfraSourceConfigurationResolvers { - export interface Resolvers { - /** The name of the data source */ - name?: NameResolver; - /** A description of the data source */ - description?: DescriptionResolver; - /** The alias to read metric data from */ - metricAlias?: MetricAliasResolver; - /** The alias to read log data from */ - logAlias?: LogAliasResolver; - /** The field mapping to use for this source */ - fields?: FieldsResolver; - /** The columns to use for log display */ - logColumns?: LogColumnsResolver; - } - - export type NameResolver< - R = string, - Parent = InfraSourceConfiguration, - Context = InfraContext - > = Resolver; - export type DescriptionResolver< - R = string, - Parent = InfraSourceConfiguration, - Context = InfraContext - > = Resolver; - export type MetricAliasResolver< - R = string, - Parent = InfraSourceConfiguration, - Context = InfraContext - > = Resolver; - export type LogAliasResolver< - R = string, - Parent = InfraSourceConfiguration, - Context = InfraContext - > = Resolver; - export type FieldsResolver< - R = InfraSourceFields, - Parent = InfraSourceConfiguration, - Context = InfraContext - > = Resolver; - export type LogColumnsResolver< - R = InfraSourceLogColumn[], - Parent = InfraSourceConfiguration, - Context = InfraContext - > = Resolver; -} -/** A mapping of semantic fields to their document counterparts */ -export namespace InfraSourceFieldsResolvers { - export interface Resolvers { - /** The field to identify a container by */ - container?: ContainerResolver; - /** The fields to identify a host by */ - host?: HostResolver; - /** The fields to use as the log message */ - message?: MessageResolver; - /** The field to identify a pod by */ - pod?: PodResolver; - /** The field to use as a tiebreaker for log events that have identical timestamps */ - tiebreaker?: TiebreakerResolver; - /** The field to use as a timestamp for metrics and logs */ - timestamp?: TimestampResolver; - } - - export type ContainerResolver< - R = string, - Parent = InfraSourceFields, - Context = InfraContext - > = Resolver; - export type HostResolver< - R = string, - Parent = InfraSourceFields, - Context = InfraContext - > = Resolver; - export type MessageResolver< - R = string[], - Parent = InfraSourceFields, - Context = InfraContext - > = Resolver; - export type PodResolver< - R = string, - Parent = InfraSourceFields, - Context = InfraContext - > = Resolver; - export type TiebreakerResolver< - R = string, - Parent = InfraSourceFields, - Context = InfraContext - > = Resolver; - export type TimestampResolver< - R = string, - Parent = InfraSourceFields, - Context = InfraContext - > = Resolver; -} -/** The built-in timestamp log column */ -export namespace InfraSourceTimestampLogColumnResolvers { - export interface Resolvers { - timestampColumn?: TimestampColumnResolver< - InfraSourceTimestampLogColumnAttributes, - TypeParent, - Context - >; - } - - export type TimestampColumnResolver< - R = InfraSourceTimestampLogColumnAttributes, - Parent = InfraSourceTimestampLogColumn, - Context = InfraContext - > = Resolver; -} - -export namespace InfraSourceTimestampLogColumnAttributesResolvers { - export interface Resolvers< - Context = InfraContext, - TypeParent = InfraSourceTimestampLogColumnAttributes - > { - /** A unique id for the column */ - id?: IdResolver; - } - - export type IdResolver< - R = string, - Parent = InfraSourceTimestampLogColumnAttributes, - Context = InfraContext - > = Resolver; -} -/** The built-in message log column */ -export namespace InfraSourceMessageLogColumnResolvers { - export interface Resolvers { - messageColumn?: MessageColumnResolver< - InfraSourceMessageLogColumnAttributes, - TypeParent, - Context - >; - } - - export type MessageColumnResolver< - R = InfraSourceMessageLogColumnAttributes, - Parent = InfraSourceMessageLogColumn, - Context = InfraContext - > = Resolver; -} - -export namespace InfraSourceMessageLogColumnAttributesResolvers { - export interface Resolvers< - Context = InfraContext, - TypeParent = InfraSourceMessageLogColumnAttributes - > { - /** A unique id for the column */ - id?: IdResolver; - } - - export type IdResolver< - R = string, - Parent = InfraSourceMessageLogColumnAttributes, - Context = InfraContext - > = Resolver; -} -/** A log column containing a field value */ -export namespace InfraSourceFieldLogColumnResolvers { - export interface Resolvers { - fieldColumn?: FieldColumnResolver; - } - - export type FieldColumnResolver< - R = InfraSourceFieldLogColumnAttributes, - Parent = InfraSourceFieldLogColumn, - Context = InfraContext - > = Resolver; -} - -export namespace InfraSourceFieldLogColumnAttributesResolvers { - export interface Resolvers< - Context = InfraContext, - TypeParent = InfraSourceFieldLogColumnAttributes - > { - /** A unique id for the column */ - id?: IdResolver; - /** The field name this column refers to */ - field?: FieldResolver; - } - - export type IdResolver< - R = string, - Parent = InfraSourceFieldLogColumnAttributes, - Context = InfraContext - > = Resolver; - export type FieldResolver< - R = string, - Parent = InfraSourceFieldLogColumnAttributes, - Context = InfraContext - > = Resolver; -} -/** The status of an infrastructure data source */ -export namespace InfraSourceStatusResolvers { - export interface Resolvers { - /** Whether the configured metric alias exists */ - metricAliasExists?: MetricAliasExistsResolver; - /** Whether the configured log alias exists */ - logAliasExists?: LogAliasExistsResolver; - /** Whether the configured alias or wildcard pattern resolve to any metric indices */ - metricIndicesExist?: MetricIndicesExistResolver; - /** Whether the configured alias or wildcard pattern resolve to any log indices */ - logIndicesExist?: LogIndicesExistResolver; - /** The list of indices in the metric alias */ - metricIndices?: MetricIndicesResolver; - /** The list of indices in the log alias */ - logIndices?: LogIndicesResolver; - /** The list of fields defined in the index mappings */ - indexFields?: IndexFieldsResolver; - } - - export type MetricAliasExistsResolver< - R = boolean, - Parent = InfraSourceStatus, - Context = InfraContext - > = Resolver; - export type LogAliasExistsResolver< - R = boolean, - Parent = InfraSourceStatus, - Context = InfraContext - > = Resolver; - export type MetricIndicesExistResolver< - R = boolean, - Parent = InfraSourceStatus, - Context = InfraContext - > = Resolver; - export type LogIndicesExistResolver< - R = boolean, - Parent = InfraSourceStatus, - Context = InfraContext - > = Resolver; - export type MetricIndicesResolver< - R = string[], - Parent = InfraSourceStatus, - Context = InfraContext - > = Resolver; - export type LogIndicesResolver< - R = string[], - Parent = InfraSourceStatus, - Context = InfraContext - > = Resolver; - export type IndexFieldsResolver< - R = InfraIndexField[], - Parent = InfraSourceStatus, - Context = InfraContext - > = Resolver; - export interface IndexFieldsArgs { - indexType?: InfraIndexType | null; - } -} -/** A descriptor of a field in an index */ -export namespace InfraIndexFieldResolvers { - export interface Resolvers { - /** The name of the field */ - name?: NameResolver; - /** The type of the field's values as recognized by Kibana */ - type?: TypeResolver; - /** Whether the field's values can be efficiently searched for */ - searchable?: SearchableResolver; - /** Whether the field's values can be aggregated */ - aggregatable?: AggregatableResolver; - /** Whether the field should be displayed based on event.module and a ECS allowed list */ - displayable?: DisplayableResolver; - } - - export type NameResolver = Resolver< - R, - Parent, - Context - >; - export type TypeResolver = Resolver< - R, - Parent, - Context - >; - export type SearchableResolver< - R = boolean, - Parent = InfraIndexField, - Context = InfraContext - > = Resolver; - export type AggregatableResolver< - R = boolean, - Parent = InfraIndexField, - Context = InfraContext - > = Resolver; - export type DisplayableResolver< - R = boolean, - Parent = InfraIndexField, - Context = InfraContext - > = Resolver; -} -/** A consecutive sequence of log entries */ -export namespace InfraLogEntryIntervalResolvers { - export interface Resolvers { - /** The key corresponding to the start of the interval covered by the entries */ - start?: StartResolver; - /** The key corresponding to the end of the interval covered by the entries */ - end?: EndResolver; - /** Whether there are more log entries available before the start */ - hasMoreBefore?: HasMoreBeforeResolver; - /** Whether there are more log entries available after the end */ - hasMoreAfter?: HasMoreAfterResolver; - /** The query the log entries were filtered by */ - filterQuery?: FilterQueryResolver; - /** The query the log entries were highlighted with */ - highlightQuery?: HighlightQueryResolver; - /** A list of the log entries */ - entries?: EntriesResolver; - } - - export type StartResolver< - R = InfraTimeKey | null, - Parent = InfraLogEntryInterval, - Context = InfraContext - > = Resolver; - export type EndResolver< - R = InfraTimeKey | null, - Parent = InfraLogEntryInterval, - Context = InfraContext - > = Resolver; - export type HasMoreBeforeResolver< - R = boolean, - Parent = InfraLogEntryInterval, - Context = InfraContext - > = Resolver; - export type HasMoreAfterResolver< - R = boolean, - Parent = InfraLogEntryInterval, - Context = InfraContext - > = Resolver; - export type FilterQueryResolver< - R = string | null, - Parent = InfraLogEntryInterval, - Context = InfraContext - > = Resolver; - export type HighlightQueryResolver< - R = string | null, - Parent = InfraLogEntryInterval, - Context = InfraContext - > = Resolver; - export type EntriesResolver< - R = InfraLogEntry[], - Parent = InfraLogEntryInterval, - Context = InfraContext - > = Resolver; -} -/** A representation of the log entry's position in the event stream */ -export namespace InfraTimeKeyResolvers { - export interface Resolvers { - /** The timestamp of the event that the log entry corresponds to */ - time?: TimeResolver; - /** The tiebreaker that disambiguates events with the same timestamp */ - tiebreaker?: TiebreakerResolver; - } - - export type TimeResolver = Resolver< - R, - Parent, - Context - >; - export type TiebreakerResolver< - R = number, - Parent = InfraTimeKey, - Context = InfraContext - > = Resolver; -} -/** A log entry */ -export namespace InfraLogEntryResolvers { - export interface Resolvers { - /** A unique representation of the log entry's position in the event stream */ - key?: KeyResolver; - /** The log entry's id */ - gid?: GidResolver; - /** The source id */ - source?: SourceResolver; - /** The columns used for rendering the log entry */ - columns?: ColumnsResolver; - } - - export type KeyResolver< - R = InfraTimeKey, - Parent = InfraLogEntry, - Context = InfraContext - > = Resolver; - export type GidResolver = Resolver< - R, - Parent, - Context - >; - export type SourceResolver = Resolver< - R, - Parent, - Context - >; - export type ColumnsResolver< - R = InfraLogEntryColumn[], - Parent = InfraLogEntry, - Context = InfraContext - > = Resolver; -} -/** A special built-in column that contains the log entry's timestamp */ -export namespace InfraLogEntryTimestampColumnResolvers { - export interface Resolvers { - /** The id of the corresponding column configuration */ - columnId?: ColumnIdResolver; - /** The timestamp */ - timestamp?: TimestampResolver; - } - - export type ColumnIdResolver< - R = string, - Parent = InfraLogEntryTimestampColumn, - Context = InfraContext - > = Resolver; - export type TimestampResolver< - R = number, - Parent = InfraLogEntryTimestampColumn, - Context = InfraContext - > = Resolver; -} -/** A special built-in column that contains the log entry's constructed message */ -export namespace InfraLogEntryMessageColumnResolvers { - export interface Resolvers { - /** The id of the corresponding column configuration */ - columnId?: ColumnIdResolver; - /** A list of the formatted log entry segments */ - message?: MessageResolver; - } - - export type ColumnIdResolver< - R = string, - Parent = InfraLogEntryMessageColumn, - Context = InfraContext - > = Resolver; - export type MessageResolver< - R = InfraLogMessageSegment[], - Parent = InfraLogEntryMessageColumn, - Context = InfraContext - > = Resolver; -} -/** A segment of the log entry message that was derived from a field */ -export namespace InfraLogMessageFieldSegmentResolvers { - export interface Resolvers { - /** The field the segment was derived from */ - field?: FieldResolver; - /** The segment's message */ - value?: ValueResolver; - /** A list of highlighted substrings of the value */ - highlights?: HighlightsResolver; - } - - export type FieldResolver< - R = string, - Parent = InfraLogMessageFieldSegment, - Context = InfraContext - > = Resolver; - export type ValueResolver< - R = string, - Parent = InfraLogMessageFieldSegment, - Context = InfraContext - > = Resolver; - export type HighlightsResolver< - R = string[], - Parent = InfraLogMessageFieldSegment, - Context = InfraContext - > = Resolver; -} -/** A segment of the log entry message that was derived from a string literal */ -export namespace InfraLogMessageConstantSegmentResolvers { - export interface Resolvers { - /** The segment's message */ - constant?: ConstantResolver; - } - - export type ConstantResolver< - R = string, - Parent = InfraLogMessageConstantSegment, - Context = InfraContext - > = Resolver; -} -/** A column that contains the value of a field of the log entry */ -export namespace InfraLogEntryFieldColumnResolvers { - export interface Resolvers { - /** The id of the corresponding column configuration */ - columnId?: ColumnIdResolver; - /** The field name of the column */ - field?: FieldResolver; - /** The value of the field in the log entry */ - value?: ValueResolver; - /** A list of highlighted substrings of the value */ - highlights?: HighlightsResolver; - } - - export type ColumnIdResolver< - R = string, - Parent = InfraLogEntryFieldColumn, - Context = InfraContext - > = Resolver; - export type FieldResolver< - R = string, - Parent = InfraLogEntryFieldColumn, - Context = InfraContext - > = Resolver; - export type ValueResolver< - R = string, - Parent = InfraLogEntryFieldColumn, - Context = InfraContext - > = Resolver; - export type HighlightsResolver< - R = string[], - Parent = InfraLogEntryFieldColumn, - Context = InfraContext - > = Resolver; -} - -export namespace InfraSnapshotResponseResolvers { - export interface Resolvers { - /** Nodes of type host, container or pod grouped by 0, 1 or 2 terms */ - nodes?: NodesResolver; - } - - export type NodesResolver< - R = InfraSnapshotNode[], - Parent = InfraSnapshotResponse, - Context = InfraContext - > = Resolver; - export interface NodesArgs { - type: InfraNodeType; - - groupBy: InfraSnapshotGroupbyInput[]; - - metric: InfraSnapshotMetricInput; - } -} - -export namespace InfraSnapshotNodeResolvers { - export interface Resolvers { - path?: PathResolver; - - metric?: MetricResolver; - } - - export type PathResolver< - R = InfraSnapshotNodePath[], - Parent = InfraSnapshotNode, - Context = InfraContext - > = Resolver; - export type MetricResolver< - R = InfraSnapshotNodeMetric, - Parent = InfraSnapshotNode, - Context = InfraContext - > = Resolver; -} - -export namespace InfraSnapshotNodePathResolvers { - export interface Resolvers { - value?: ValueResolver; - - label?: LabelResolver; - - ip?: IpResolver; - } - - export type ValueResolver< - R = string, - Parent = InfraSnapshotNodePath, - Context = InfraContext - > = Resolver; - export type LabelResolver< - R = string, - Parent = InfraSnapshotNodePath, - Context = InfraContext - > = Resolver; - export type IpResolver< - R = string | null, - Parent = InfraSnapshotNodePath, - Context = InfraContext - > = Resolver; -} - -export namespace InfraSnapshotNodeMetricResolvers { - export interface Resolvers { - name?: NameResolver; - - value?: ValueResolver; - - avg?: AvgResolver; - - max?: MaxResolver; - } - - export type NameResolver< - R = InfraSnapshotMetricType, - Parent = InfraSnapshotNodeMetric, - Context = InfraContext - > = Resolver; - export type ValueResolver< - R = number | null, - Parent = InfraSnapshotNodeMetric, - Context = InfraContext - > = Resolver; - export type AvgResolver< - R = number | null, - Parent = InfraSnapshotNodeMetric, - Context = InfraContext - > = Resolver; - export type MaxResolver< - R = number | null, - Parent = InfraSnapshotNodeMetric, - Context = InfraContext - > = Resolver; -} - -export namespace InfraMetricDataResolvers { - export interface Resolvers { - id?: IdResolver; - - series?: SeriesResolver; - } - - export type IdResolver< - R = InfraMetric | null, - Parent = InfraMetricData, - Context = InfraContext - > = Resolver; - export type SeriesResolver< - R = InfraDataSeries[], - Parent = InfraMetricData, - Context = InfraContext - > = Resolver; -} - -export namespace InfraDataSeriesResolvers { - export interface Resolvers { - id?: IdResolver; - - label?: LabelResolver; - - data?: DataResolver; - } - - export type IdResolver = Resolver< - R, - Parent, - Context - >; - export type LabelResolver< - R = string, - Parent = InfraDataSeries, - Context = InfraContext - > = Resolver; - export type DataResolver< - R = InfraDataPoint[], - Parent = InfraDataSeries, - Context = InfraContext - > = Resolver; -} - -export namespace InfraDataPointResolvers { - export interface Resolvers { - timestamp?: TimestampResolver; - - value?: ValueResolver; - } - - export type TimestampResolver< - R = number, - Parent = InfraDataPoint, - Context = InfraContext - > = Resolver; - export type ValueResolver< - R = number | null, - Parent = InfraDataPoint, - Context = InfraContext - > = Resolver; -} - -export namespace MutationResolvers { - export interface Resolvers { - /** Create a new source of infrastructure data */ - createSource?: CreateSourceResolver; - /** Modify an existing source */ - updateSource?: UpdateSourceResolver; - /** Delete a source of infrastructure data */ - deleteSource?: DeleteSourceResolver; - } - - export type CreateSourceResolver< - R = UpdateSourceResult, - Parent = never, - Context = InfraContext - > = Resolver; - export interface CreateSourceArgs { - /** The id of the source */ - id: string; - - sourceProperties: UpdateSourceInput; - } - - export type UpdateSourceResolver< - R = UpdateSourceResult, - Parent = never, - Context = InfraContext - > = Resolver; - export interface UpdateSourceArgs { - /** The id of the source */ - id: string; - /** The properties to update the source with */ - sourceProperties: UpdateSourceInput; - } - - export type DeleteSourceResolver< - R = DeleteSourceResult, - Parent = never, - Context = InfraContext - > = Resolver; - export interface DeleteSourceArgs { - /** The id of the source */ - id: string; - } -} -/** The result of a successful source update */ -export namespace UpdateSourceResultResolvers { - export interface Resolvers { - /** The source that was updated */ - source?: SourceResolver; - } - - export type SourceResolver< - R = InfraSource, - Parent = UpdateSourceResult, - Context = InfraContext - > = Resolver; -} -/** The result of a source deletion operations */ -export namespace DeleteSourceResultResolvers { - export interface Resolvers { - /** The id of the source that was deleted */ - id?: IdResolver; - } - - export type IdResolver< - R = string, - Parent = DeleteSourceResult, - Context = InfraContext - > = Resolver; -} diff --git a/x-pack/legacy/plugins/infra/server/infra_server.ts b/x-pack/legacy/plugins/infra/server/infra_server.ts deleted file mode 100644 index 108e1b1e3f392..0000000000000 --- a/x-pack/legacy/plugins/infra/server/infra_server.ts +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { IResolvers, makeExecutableSchema } from 'graphql-tools'; -import { initIpToHostName } from './routes/ip_to_hostname'; -import { schemas } from './graphql'; -import { createLogEntriesResolvers } from './graphql/log_entries'; -import { createSourceStatusResolvers } from './graphql/source_status'; -import { createSourcesResolvers } from './graphql/sources'; -import { InfraBackendLibs } from './lib/infra_types'; -import { - initGetLogEntryRateRoute, - initValidateLogAnalysisIndicesRoute, -} from './routes/log_analysis'; -import { initMetricExplorerRoute } from './routes/metrics_explorer'; -import { initMetadataRoute } from './routes/metadata'; -import { initSnapshotRoute } from './routes/snapshot'; -import { initNodeDetailsRoute } from './routes/node_details'; -import { - initLogEntriesSummaryRoute, - initLogEntriesSummaryHighlightsRoute, - initLogEntriesItemRoute, -} from './routes/log_entries'; -import { initInventoryMetaRoute } from './routes/inventory_metadata'; - -export const initInfraServer = (libs: InfraBackendLibs) => { - const schema = makeExecutableSchema({ - resolvers: [ - createLogEntriesResolvers(libs) as IResolvers, - createSourcesResolvers(libs) as IResolvers, - createSourceStatusResolvers(libs) as IResolvers, - ], - typeDefs: schemas, - }); - - libs.framework.registerGraphQLEndpoint('/graphql', schema); - - initIpToHostName(libs); - initGetLogEntryRateRoute(libs); - initSnapshotRoute(libs); - initNodeDetailsRoute(libs); - initValidateLogAnalysisIndicesRoute(libs); - initLogEntriesSummaryRoute(libs); - initLogEntriesSummaryHighlightsRoute(libs); - initLogEntriesItemRoute(libs); - initMetricExplorerRoute(libs); - initMetadataRoute(libs); - initInventoryMetaRoute(libs); -}; diff --git a/x-pack/legacy/plugins/infra/server/kibana.index.ts b/x-pack/legacy/plugins/infra/server/kibana.index.ts deleted file mode 100644 index b4301b3edf367..0000000000000 --- a/x-pack/legacy/plugins/infra/server/kibana.index.ts +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { Server } from 'hapi'; -import JoiNamespace from 'joi'; - -export interface KbnServer extends Server { - usage: any; -} - -// NP_TODO: this is only used in the root index file AFAICT, can remove after migrating to NP -export const getConfigSchema = (Joi: typeof JoiNamespace) => { - const InfraDefaultSourceConfigSchema = Joi.object({ - metricAlias: Joi.string(), - logAlias: Joi.string(), - fields: Joi.object({ - container: Joi.string(), - host: Joi.string(), - message: Joi.array() - .items(Joi.string()) - .single(), - pod: Joi.string(), - tiebreaker: Joi.string(), - timestamp: Joi.string(), - }), - }); - - // NP_TODO: make sure this is all represented in the NP config schema - const InfraRootConfigSchema = Joi.object({ - enabled: Joi.boolean().default(true), - query: Joi.object({ - partitionSize: Joi.number(), - partitionFactor: Joi.number(), - }).default(), - sources: Joi.object() - .keys({ - default: InfraDefaultSourceConfigSchema, - }) - .default(), - }).default(); - - return InfraRootConfigSchema; -}; diff --git a/x-pack/legacy/plugins/infra/server/lib/adapters/fields/adapter_types.ts b/x-pack/legacy/plugins/infra/server/lib/adapters/fields/adapter_types.ts deleted file mode 100644 index 3aaa23b378096..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/adapters/fields/adapter_types.ts +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { RequestHandlerContext } from 'src/core/server'; - -export interface FieldsAdapter { - getIndexFields( - requestContext: RequestHandlerContext, - indices: string, - timefield: string - ): Promise; -} - -export interface IndexFieldDescriptor { - name: string; - type: string; - searchable: boolean; - aggregatable: boolean; - displayable: boolean; -} diff --git a/x-pack/legacy/plugins/infra/server/lib/adapters/fields/framework_fields_adapter.ts b/x-pack/legacy/plugins/infra/server/lib/adapters/fields/framework_fields_adapter.ts deleted file mode 100644 index 834c991d5c6a4..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/adapters/fields/framework_fields_adapter.ts +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { startsWith, uniq, first } from 'lodash'; -import { RequestHandlerContext } from 'src/core/server'; -import { InfraDatabaseSearchResponse } from '../framework'; -import { KibanaFramework } from '../framework/kibana_framework_adapter'; -import { FieldsAdapter, IndexFieldDescriptor } from './adapter_types'; -import { getAllowedListForPrefix } from '../../../../common/ecs_allowed_list'; -import { getAllCompositeData } from '../../../utils/get_all_composite_data'; -import { createAfterKeyHandler } from '../../../utils/create_afterkey_handler'; - -interface Bucket { - key: { dataset: string }; - doc_count: number; -} - -interface DataSetResponse { - datasets: { - buckets: Bucket[]; - after_key: { - dataset: string; - }; - }; -} - -export class FrameworkFieldsAdapter implements FieldsAdapter { - private framework: KibanaFramework; - - constructor(framework: KibanaFramework) { - this.framework = framework; - } - - public async getIndexFields( - requestContext: RequestHandlerContext, - indices: string, - timefield: string - ): Promise { - const indexPatternsService = this.framework.getIndexPatternsService(requestContext); - const response = await indexPatternsService.getFieldsForWildcard({ - pattern: indices, - }); - const { dataSets, modules } = await this.getDataSetsAndModules( - requestContext, - indices, - timefield - ); - const allowedList = modules.reduce( - (acc, name) => uniq([...acc, ...getAllowedListForPrefix(name)]), - [] as string[] - ); - const dataSetsWithAllowedList = [...allowedList, ...dataSets]; - return response.map(field => ({ - ...field, - displayable: dataSetsWithAllowedList.some(name => startsWith(field.name, name)), - })); - } - - private async getDataSetsAndModules( - requestContext: RequestHandlerContext, - indices: string, - timefield: string - ): Promise<{ dataSets: string[]; modules: string[] }> { - const params = { - index: indices, - allowNoIndices: true, - ignoreUnavailable: true, - body: { - size: 0, - query: { - bool: { - filter: [ - { - range: { - [timefield]: { - gte: 'now-24h', - lte: 'now', - }, - }, - }, - ], - }, - }, - aggs: { - datasets: { - composite: { - sources: [ - { - dataset: { - terms: { - field: 'event.dataset', - }, - }, - }, - ], - }, - }, - }, - }, - }; - - const bucketSelector = (response: InfraDatabaseSearchResponse<{}, DataSetResponse>) => - (response.aggregations && response.aggregations.datasets.buckets) || []; - const handleAfterKey = createAfterKeyHandler( - 'body.aggs.datasets.composite.after', - input => input?.aggregations?.datasets?.after_key - ); - - const buckets = await getAllCompositeData( - this.framework, - requestContext, - params, - bucketSelector, - handleAfterKey - ); - const dataSets = buckets.map(bucket => bucket.key.dataset); - const modules = dataSets.reduce((acc, dataset) => { - const module = first(dataset.split(/\./)); - return module ? uniq([...acc, module]) : acc; - }, [] as string[]); - return { modules, dataSets }; - } -} diff --git a/x-pack/legacy/plugins/infra/server/lib/adapters/fields/index.ts b/x-pack/legacy/plugins/infra/server/lib/adapters/fields/index.ts deleted file mode 100644 index 4e09b5d0e9e2d..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/adapters/fields/index.ts +++ /dev/null @@ -1,7 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export * from './adapter_types'; diff --git a/x-pack/legacy/plugins/infra/server/lib/adapters/framework/adapter_types.ts b/x-pack/legacy/plugins/infra/server/lib/adapters/framework/adapter_types.ts deleted file mode 100644 index b14536275cec3..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/adapters/framework/adapter_types.ts +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { SearchResponse, GenericParams } from 'elasticsearch'; -import { Lifecycle } from 'hapi'; -import { UsageCollectionSetup } from 'src/plugins/usage_collection/server'; -import { RouteMethod, RouteConfig } from '../../../../../../../../src/core/server'; -import { PluginSetupContract as FeaturesPluginSetup } from '../../../../../../../plugins/features/server'; -import { SpacesPluginSetup } from '../../../../../../../plugins/spaces/server'; -import { VisTypeTimeseriesSetup } from '../../../../../../../../src/plugins/vis_type_timeseries/server'; -import { APMPluginContract } from '../../../../../../../plugins/apm/server'; -import { HomeServerPluginSetup } from '../../../../../../../../src/plugins/home/server'; - -// NP_TODO: Compose real types from plugins we depend on, no "any" -export interface InfraServerPluginDeps { - home: HomeServerPluginSetup; - spaces: SpacesPluginSetup; - usageCollection: UsageCollectionSetup; - metrics: VisTypeTimeseriesSetup; - indexPatterns: { - indexPatternsServiceFactory: any; - }; - features: FeaturesPluginSetup; - apm: APMPluginContract; -} - -export interface CallWithRequestParams extends GenericParams { - max_concurrent_shard_requests?: number; - name?: string; - index?: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - size?: number; - terminate_after?: number; - fields?: string | string[]; -} - -export type InfraResponse = Lifecycle.ReturnValue; - -export interface InfraFrameworkPluginOptions { - register: any; - options: any; -} - -export interface InfraDatabaseResponse { - took: number; - timeout: boolean; -} - -export interface InfraDatabaseSearchResponse - extends InfraDatabaseResponse { - _shards: { - total: number; - successful: number; - skipped: number; - failed: number; - }; - aggregations?: Aggregations; - hits: { - total: { - value: number; - relation: string; - }; - hits: Hit[]; - }; -} - -export interface InfraDatabaseMultiResponse extends InfraDatabaseResponse { - responses: Array>; -} - -export interface InfraDatabaseFieldCapsResponse extends InfraDatabaseResponse { - indices: string[]; - fields: InfraFieldsResponse; -} - -export interface InfraDatabaseGetIndicesAliasResponse { - [indexName: string]: { - aliases: { - [aliasName: string]: any; - }; - }; -} - -export interface InfraDatabaseGetIndicesResponse { - [indexName: string]: { - aliases: { - [aliasName: string]: any; - }; - mappings: { - _meta: object; - dynamic_templates: any[]; - date_detection: boolean; - properties: { - [fieldName: string]: any; - }; - }; - settings: { index: object }; - }; -} - -export type SearchHit = SearchResponse['hits']['hits'][0]; - -export interface SortedSearchHit extends SearchHit { - sort: any[]; - _source: { - [field: string]: any; - }; -} - -export type InfraDateRangeAggregationBucket = { - from?: number; - to?: number; - doc_count: number; - key: string; -} & NestedAggregation; - -export interface InfraDateRangeAggregationResponse { - buckets: Array>; -} - -export interface InfraTopHitsAggregationResponse { - hits: { - hits: []; - }; -} - -export interface InfraMetadataAggregationBucket { - key: string; -} - -export interface InfraMetadataAggregationResponse { - buckets: InfraMetadataAggregationBucket[]; -} - -export interface InfraFieldsResponse { - [name: string]: InfraFieldDef; -} - -export interface InfraFieldDetails { - searchable: boolean; - aggregatable: boolean; - type: string; -} - -export interface InfraFieldDef { - [type: string]: InfraFieldDetails; -} - -export interface InfraTSVBResponse { - [key: string]: InfraTSVBPanel; -} - -export interface InfraTSVBPanel { - id: string; - series: InfraTSVBSeries[]; -} - -export interface InfraTSVBSeries { - id: string; - label: string; - data: InfraTSVBDataPoint[]; -} - -export type InfraTSVBDataPoint = [number, number]; - -export type InfraRouteConfig = { - method: RouteMethod; -} & RouteConfig; diff --git a/x-pack/legacy/plugins/infra/server/lib/adapters/framework/index.ts b/x-pack/legacy/plugins/infra/server/lib/adapters/framework/index.ts deleted file mode 100644 index 4e09b5d0e9e2d..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/adapters/framework/index.ts +++ /dev/null @@ -1,7 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export * from './adapter_types'; diff --git a/x-pack/legacy/plugins/infra/server/lib/adapters/framework/kibana_framework_adapter.ts b/x-pack/legacy/plugins/infra/server/lib/adapters/framework/kibana_framework_adapter.ts deleted file mode 100644 index 4409667d8390a..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/adapters/framework/kibana_framework_adapter.ts +++ /dev/null @@ -1,259 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -/* eslint-disable @typescript-eslint/array-type */ - -import { GenericParams } from 'elasticsearch'; -import { GraphQLSchema } from 'graphql'; -import { Legacy } from 'kibana'; -import { runHttpQuery } from 'apollo-server-core'; -import { schema, TypeOf } from '@kbn/config-schema'; -import { - InfraRouteConfig, - InfraTSVBResponse, - InfraServerPluginDeps, - CallWithRequestParams, - InfraDatabaseSearchResponse, - InfraDatabaseMultiResponse, - InfraDatabaseFieldCapsResponse, - InfraDatabaseGetIndicesResponse, - InfraDatabaseGetIndicesAliasResponse, -} from './adapter_types'; -import { TSVBMetricModel } from '../../../../common/inventory_models/types'; -import { - CoreSetup, - IRouter, - KibanaRequest, - RequestHandlerContext, - KibanaResponseFactory, - RouteMethod, -} from '../../../../../../../../src/core/server'; -import { RequestHandler } from '../../../../../../../../src/core/server'; -import { InfraConfig } from '../../../../../../../plugins/infra/server'; - -export class KibanaFramework { - public router: IRouter; - public plugins: InfraServerPluginDeps; - - constructor(core: CoreSetup, config: InfraConfig, plugins: InfraServerPluginDeps) { - this.router = core.http.createRouter(); - this.plugins = plugins; - } - - public registerRoute( - config: InfraRouteConfig, - handler: RequestHandler - ) { - const defaultOptions = { - tags: ['access:infra'], - }; - const routeConfig = { - path: config.path, - validate: config.validate, - // Currently we have no use of custom options beyond tags, this can be extended - // beyond defaultOptions if it's needed. - options: defaultOptions, - }; - switch (config.method) { - case 'get': - this.router.get(routeConfig, handler); - break; - case 'post': - this.router.post(routeConfig, handler); - break; - case 'delete': - this.router.delete(routeConfig, handler); - break; - case 'put': - this.router.put(routeConfig, handler); - break; - } - } - - public registerGraphQLEndpoint(routePath: string, gqlSchema: GraphQLSchema) { - // These endpoints are validated by GraphQL at runtime and with GraphQL generated types - const body = schema.object({}, { allowUnknowns: true }); - type Body = TypeOf; - - const routeOptions = { - path: `/api/infra${routePath}`, - validate: { - body, - }, - options: { - tags: ['access:infra'], - }, - }; - async function handler( - context: RequestHandlerContext, - request: KibanaRequest, - response: KibanaResponseFactory - ) { - try { - const query = - request.route.method === 'post' - ? (request.body as Record) - : (request.query as Record); - - const gqlResponse = await runHttpQuery([context, request], { - method: request.route.method.toUpperCase(), - options: (req: RequestHandlerContext, rawReq: KibanaRequest) => ({ - context: { req, rawReq }, - schema: gqlSchema, - }), - query, - }); - - return response.ok({ - body: gqlResponse, - headers: { - 'content-type': 'application/json', - }, - }); - } catch (error) { - const errorBody = { - message: error.message, - }; - - if ('HttpQueryError' !== error.name) { - return response.internalError({ - body: errorBody, - }); - } - - if (error.isGraphQLError === true) { - return response.customError({ - statusCode: error.statusCode, - body: errorBody, - headers: { - 'Content-Type': 'application/json', - }, - }); - } - - const { headers = [], statusCode = 500 } = error; - return response.customError({ - statusCode, - headers, - body: errorBody, - }); - } - } - this.router.post(routeOptions, handler); - this.router.get(routeOptions, handler); - } - - callWithRequest( - requestContext: RequestHandlerContext, - endpoint: 'search', - options?: CallWithRequestParams - ): Promise>; - callWithRequest( - requestContext: RequestHandlerContext, - endpoint: 'msearch', - options?: CallWithRequestParams - ): Promise>; - callWithRequest( - requestContext: RequestHandlerContext, - endpoint: 'fieldCaps', - options?: CallWithRequestParams - ): Promise; - callWithRequest( - requestContext: RequestHandlerContext, - endpoint: 'indices.existsAlias', - options?: CallWithRequestParams - ): Promise; - callWithRequest( - requestContext: RequestHandlerContext, - method: 'indices.getAlias', - options?: object - ): Promise; - callWithRequest( - requestContext: RequestHandlerContext, - method: 'indices.get' | 'ml.getBuckets', - options?: object - ): Promise; - callWithRequest( - requestContext: RequestHandlerContext, - endpoint: string, - options?: CallWithRequestParams - ): Promise; - - public async callWithRequest( - requestContext: RequestHandlerContext, - endpoint: string, - params: CallWithRequestParams - ) { - const { elasticsearch, uiSettings } = requestContext.core; - - const includeFrozen = await uiSettings.client.get('search:includeFrozen'); - if (endpoint === 'msearch') { - const maxConcurrentShardRequests = await uiSettings.client.get( - 'courier:maxConcurrentShardRequests' - ); - if (maxConcurrentShardRequests > 0) { - params = { ...params, max_concurrent_shard_requests: maxConcurrentShardRequests }; - } - } - - const frozenIndicesParams = ['search', 'msearch'].includes(endpoint) - ? { - ignore_throttled: !includeFrozen, - } - : {}; - - return elasticsearch.dataClient.callAsCurrentUser(endpoint, { - ...params, - ...frozenIndicesParams, - }); - } - - public getIndexPatternsService( - requestContext: RequestHandlerContext - ): Legacy.IndexPatternsService { - return this.plugins.indexPatterns.indexPatternsServiceFactory({ - callCluster: async (method: string, args: [GenericParams], ...rest: any[]) => { - const fieldCaps = await this.callWithRequest(requestContext, method, { - ...args, - allowNoIndices: true, - } as GenericParams); - return fieldCaps; - }, - }); - } - - public getSpaceId(request: KibanaRequest): string { - const spacesPlugin = this.plugins.spaces; - - if ( - spacesPlugin && - spacesPlugin.spacesService && - typeof spacesPlugin.spacesService.getSpaceId === 'function' - ) { - return spacesPlugin.spacesService.getSpaceId(request); - } else { - return 'default'; - } - } - - public async makeTSVBRequest( - requestContext: RequestHandlerContext, - model: TSVBMetricModel, - timerange: { min: number; max: number }, - filters: any[] - ): Promise { - const { getVisData } = this.plugins.metrics; - if (typeof getVisData !== 'function') { - throw new Error('TSVB is not available'); - } - const options = { - timerange, - panels: [model], - filters, - }; - return getVisData(requestContext, options); - } -} diff --git a/x-pack/legacy/plugins/infra/server/lib/adapters/log_entries/adapter_types.ts b/x-pack/legacy/plugins/infra/server/lib/adapters/log_entries/adapter_types.ts deleted file mode 100644 index 41bc2aa258807..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/adapters/log_entries/adapter_types.ts +++ /dev/null @@ -1,5 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ diff --git a/x-pack/legacy/plugins/infra/server/lib/adapters/log_entries/index.ts b/x-pack/legacy/plugins/infra/server/lib/adapters/log_entries/index.ts deleted file mode 100644 index 41bc2aa258807..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/adapters/log_entries/index.ts +++ /dev/null @@ -1,5 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ diff --git a/x-pack/legacy/plugins/infra/server/lib/adapters/log_entries/kibana_log_entries_adapter.ts b/x-pack/legacy/plugins/infra/server/lib/adapters/log_entries/kibana_log_entries_adapter.ts deleted file mode 100644 index ec45171baa7b0..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/adapters/log_entries/kibana_log_entries_adapter.ts +++ /dev/null @@ -1,390 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -/* eslint-disable @typescript-eslint/no-empty-interface */ - -import { timeMilliseconds } from 'd3-time'; -import * as runtimeTypes from 'io-ts'; -import first from 'lodash/fp/first'; -import get from 'lodash/fp/get'; -import has from 'lodash/fp/has'; -import zip from 'lodash/fp/zip'; -import { pipe } from 'fp-ts/lib/pipeable'; -import { map, fold } from 'fp-ts/lib/Either'; -import { identity, constant } from 'fp-ts/lib/function'; -import { RequestHandlerContext } from 'src/core/server'; -import { compareTimeKeys, isTimeKey, TimeKey } from '../../../../common/time'; -import { JsonObject } from '../../../../common/typed_json'; -import { - LogEntriesAdapter, - LogEntryDocument, - LogEntryQuery, - LogSummaryBucket, -} from '../../domains/log_entries_domain'; -import { InfraSourceConfiguration } from '../../sources'; -import { SortedSearchHit } from '../framework'; -import { KibanaFramework } from '../framework/kibana_framework_adapter'; - -const DAY_MILLIS = 24 * 60 * 60 * 1000; -const LOOKUP_OFFSETS = [0, 1, 7, 30, 365, 10000, Infinity].map(days => days * DAY_MILLIS); -const TIMESTAMP_FORMAT = 'epoch_millis'; - -interface LogItemHit { - _index: string; - _id: string; - _source: JsonObject; - sort: [number, number]; -} - -export class InfraKibanaLogEntriesAdapter implements LogEntriesAdapter { - constructor(private readonly framework: KibanaFramework) {} - - public async getAdjacentLogEntryDocuments( - requestContext: RequestHandlerContext, - sourceConfiguration: InfraSourceConfiguration, - fields: string[], - start: TimeKey, - direction: 'asc' | 'desc', - maxCount: number, - filterQuery?: LogEntryQuery, - highlightQuery?: LogEntryQuery - ): Promise { - if (maxCount <= 0) { - return []; - } - - const intervals = getLookupIntervals(start.time, direction); - - let documents: LogEntryDocument[] = []; - for (const [intervalStart, intervalEnd] of intervals) { - if (documents.length >= maxCount) { - break; - } - - const documentsInInterval = await this.getLogEntryDocumentsBetween( - requestContext, - sourceConfiguration, - fields, - intervalStart, - intervalEnd, - documents.length > 0 ? documents[documents.length - 1].key : start, - maxCount - documents.length, - filterQuery, - highlightQuery - ); - - documents = [...documents, ...documentsInInterval]; - } - - return direction === 'asc' ? documents : documents.reverse(); - } - - public async getContainedLogEntryDocuments( - requestContext: RequestHandlerContext, - sourceConfiguration: InfraSourceConfiguration, - fields: string[], - start: TimeKey, - end: TimeKey, - filterQuery?: LogEntryQuery, - highlightQuery?: LogEntryQuery - ): Promise { - const documents = await this.getLogEntryDocumentsBetween( - requestContext, - sourceConfiguration, - fields, - start.time, - end.time, - start, - 10000, - filterQuery, - highlightQuery - ); - - return documents.filter(document => compareTimeKeys(document.key, end) < 0); - } - - public async getContainedLogSummaryBuckets( - requestContext: RequestHandlerContext, - sourceConfiguration: InfraSourceConfiguration, - start: number, - end: number, - bucketSize: number, - filterQuery?: LogEntryQuery - ): Promise { - const bucketIntervalStarts = timeMilliseconds(new Date(start), new Date(end), bucketSize); - - const query = { - allowNoIndices: true, - index: sourceConfiguration.logAlias, - ignoreUnavailable: true, - body: { - aggregations: { - count_by_date: { - date_range: { - field: sourceConfiguration.fields.timestamp, - format: TIMESTAMP_FORMAT, - ranges: bucketIntervalStarts.map(bucketIntervalStart => ({ - from: bucketIntervalStart.getTime(), - to: bucketIntervalStart.getTime() + bucketSize, - })), - }, - aggregations: { - top_hits_by_key: { - top_hits: { - size: 1, - sort: [ - { [sourceConfiguration.fields.timestamp]: 'asc' }, - { [sourceConfiguration.fields.tiebreaker]: 'asc' }, - ], - _source: false, - }, - }, - }, - }, - }, - query: { - bool: { - filter: [ - ...createQueryFilterClauses(filterQuery), - { - range: { - [sourceConfiguration.fields.timestamp]: { - gte: start, - lte: end, - format: TIMESTAMP_FORMAT, - }, - }, - }, - ], - }, - }, - size: 0, - track_total_hits: false, - }, - }; - - const response = await this.framework.callWithRequest(requestContext, 'search', query); - - return pipe( - LogSummaryResponseRuntimeType.decode(response), - map(logSummaryResponse => - logSummaryResponse.aggregations.count_by_date.buckets.map( - convertDateRangeBucketToSummaryBucket - ) - ), - fold(constant([]), identity) - ); - } - - public async getLogItem( - requestContext: RequestHandlerContext, - id: string, - sourceConfiguration: InfraSourceConfiguration - ) { - const search = (searchOptions: object) => - this.framework.callWithRequest(requestContext, 'search', searchOptions); - - const params = { - index: sourceConfiguration.logAlias, - terminate_after: 1, - body: { - size: 1, - sort: [ - { [sourceConfiguration.fields.timestamp]: 'desc' }, - { [sourceConfiguration.fields.tiebreaker]: 'desc' }, - ], - query: { - ids: { - values: [id], - }, - }, - }, - }; - - const response = await search(params); - const document = first(response.hits.hits); - if (!document) { - throw new Error('Document not found'); - } - return document; - } - - private async getLogEntryDocumentsBetween( - requestContext: RequestHandlerContext, - sourceConfiguration: InfraSourceConfiguration, - fields: string[], - start: number, - end: number, - after: TimeKey | null, - maxCount: number, - filterQuery?: LogEntryQuery, - highlightQuery?: LogEntryQuery - ): Promise { - if (maxCount <= 0) { - return []; - } - - const sortDirection: 'asc' | 'desc' = start <= end ? 'asc' : 'desc'; - - const startRange = { - [sortDirection === 'asc' ? 'gte' : 'lte']: start, - }; - const endRange = - end === Infinity - ? {} - : { - [sortDirection === 'asc' ? 'lte' : 'gte']: end, - }; - - const highlightClause = highlightQuery - ? { - highlight: { - boundary_scanner: 'word', - fields: fields.reduce( - (highlightFieldConfigs, fieldName) => ({ - ...highlightFieldConfigs, - [fieldName]: {}, - }), - {} - ), - fragment_size: 1, - number_of_fragments: 100, - post_tags: [''], - pre_tags: [''], - highlight_query: highlightQuery, - }, - } - : {}; - - const searchAfterClause = isTimeKey(after) - ? { - search_after: [after.time, after.tiebreaker], - } - : {}; - - const query = { - allowNoIndices: true, - index: sourceConfiguration.logAlias, - ignoreUnavailable: true, - body: { - query: { - bool: { - filter: [ - ...createQueryFilterClauses(filterQuery), - { - range: { - [sourceConfiguration.fields.timestamp]: { - ...startRange, - ...endRange, - format: TIMESTAMP_FORMAT, - }, - }, - }, - ], - }, - }, - ...highlightClause, - ...searchAfterClause, - _source: fields, - size: maxCount, - sort: [ - { [sourceConfiguration.fields.timestamp]: sortDirection }, - { [sourceConfiguration.fields.tiebreaker]: sortDirection }, - ], - track_total_hits: false, - }, - }; - - const response = await this.framework.callWithRequest( - requestContext, - 'search', - query - ); - const hits = response.hits.hits; - const documents = hits.map(convertHitToLogEntryDocument(fields)); - - return documents; - } -} - -function getLookupIntervals(start: number, direction: 'asc' | 'desc'): Array<[number, number]> { - const offsetSign = direction === 'asc' ? 1 : -1; - const translatedOffsets = LOOKUP_OFFSETS.map(offset => start + offset * offsetSign); - const intervals = zip(translatedOffsets.slice(0, -1), translatedOffsets.slice(1)) as Array< - [number, number] - >; - return intervals; -} - -const convertHitToLogEntryDocument = (fields: string[]) => ( - hit: SortedSearchHit -): LogEntryDocument => ({ - gid: hit._id, - fields: fields.reduce( - (flattenedFields, fieldName) => - has(fieldName, hit._source) - ? { - ...flattenedFields, - [fieldName]: get(fieldName, hit._source), - } - : flattenedFields, - {} as { [fieldName: string]: string | number | boolean | null } - ), - highlights: hit.highlight || {}, - key: { - time: hit.sort[0], - tiebreaker: hit.sort[1], - }, -}); - -const convertDateRangeBucketToSummaryBucket = ( - bucket: LogSummaryDateRangeBucket -): LogSummaryBucket => ({ - entriesCount: bucket.doc_count, - start: bucket.from || 0, - end: bucket.to || 0, - topEntryKeys: bucket.top_hits_by_key.hits.hits.map(hit => ({ - tiebreaker: hit.sort[1], - time: hit.sort[0], - })), -}); - -const createQueryFilterClauses = (filterQuery: LogEntryQuery | undefined) => - filterQuery ? [filterQuery] : []; - -const LogSummaryDateRangeBucketRuntimeType = runtimeTypes.intersection([ - runtimeTypes.type({ - doc_count: runtimeTypes.number, - key: runtimeTypes.string, - top_hits_by_key: runtimeTypes.type({ - hits: runtimeTypes.type({ - hits: runtimeTypes.array( - runtimeTypes.type({ - sort: runtimeTypes.tuple([runtimeTypes.number, runtimeTypes.number]), - }) - ), - }), - }), - }), - runtimeTypes.partial({ - from: runtimeTypes.number, - to: runtimeTypes.number, - }), -]); - -export interface LogSummaryDateRangeBucket - extends runtimeTypes.TypeOf {} - -const LogSummaryResponseRuntimeType = runtimeTypes.type({ - aggregations: runtimeTypes.type({ - count_by_date: runtimeTypes.type({ - buckets: runtimeTypes.array(LogSummaryDateRangeBucketRuntimeType), - }), - }), -}); - -export interface LogSummaryResponse - extends runtimeTypes.TypeOf {} diff --git a/x-pack/legacy/plugins/infra/server/lib/adapters/metrics/adapter_types.ts b/x-pack/legacy/plugins/infra/server/lib/adapters/metrics/adapter_types.ts deleted file mode 100644 index 844eaf7604927..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/adapters/metrics/adapter_types.ts +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { RequestHandlerContext, KibanaRequest } from 'src/core/server'; -import { - InfraMetric, - InfraMetricData, - InfraNodeType, - InfraTimerangeInput, -} from '../../../graphql/types'; -import { InfraSourceConfiguration } from '../../sources'; - -export interface InfraMetricsRequestOptions { - nodeIds: { - nodeId: string; - cloudId?: string | null; - }; - nodeType: InfraNodeType; - sourceConfiguration: InfraSourceConfiguration; - timerange: InfraTimerangeInput; - metrics: InfraMetric[]; -} - -export interface InfraMetricsAdapter { - getMetrics( - requestContext: RequestHandlerContext, - options: InfraMetricsRequestOptions, - request: KibanaRequest - ): Promise; -} - -export enum InfraMetricModelQueryType { - lucene = 'lucene', - kuery = 'kuery', -} - -export enum InfraMetricModelMetricType { - avg = 'avg', - max = 'max', - min = 'min', - calculation = 'calculation', - cardinality = 'cardinality', - series_agg = 'series_agg', // eslint-disable-line @typescript-eslint/camelcase - positive_only = 'positive_only', // eslint-disable-line @typescript-eslint/camelcase - derivative = 'derivative', - count = 'count', - sum = 'sum', - cumulative_sum = 'cumulative_sum', // eslint-disable-line @typescript-eslint/camelcase -} - -export interface InfraMetricModel { - id: InfraMetric; - requires: string[]; - index_pattern: string | string[]; - interval: string; - time_field: string; - type: string; - series: InfraMetricModelSeries[]; - filter?: string; - map_field_to?: string; - id_type?: 'cloud' | 'node'; -} - -export interface InfraMetricModelSeries { - id: string; - metrics: InfraMetricModelMetric[]; - split_mode: string; - terms_field?: string; - terms_size?: number; - terms_order_by?: string; - filter?: { query: string; language: InfraMetricModelQueryType }; -} - -export interface InfraMetricModelBasicMetric { - id: string; - field?: string | null; - type: InfraMetricModelMetricType; -} - -export interface InfraMetricModelSeriesAgg { - id: string; - function: string; - type: InfraMetricModelMetricType.series_agg; // eslint-disable-line @typescript-eslint/camelcase -} - -export interface InfraMetricModelDerivative { - id: string; - field: string; - unit: string; - type: InfraMetricModelMetricType; -} - -export interface InfraMetricModelBucketScriptVariable { - field: string; - id: string; - name: string; -} - -export interface InfraMetricModelCount { - id: string; - type: InfraMetricModelMetricType.count; -} - -export interface InfraMetricModelBucketScript { - id: string; - script: string; - type: InfraMetricModelMetricType.calculation; - variables: InfraMetricModelBucketScriptVariable[]; -} - -export type InfraMetricModelMetric = - | InfraMetricModelCount - | InfraMetricModelBasicMetric - | InfraMetricModelBucketScript - | InfraMetricModelDerivative - | InfraMetricModelSeriesAgg; - -export type InfraMetricModelCreator = ( - timeField: string, - indexPattern: string | string[], - interval: string -) => InfraMetricModel; diff --git a/x-pack/legacy/plugins/infra/server/lib/adapters/metrics/index.ts b/x-pack/legacy/plugins/infra/server/lib/adapters/metrics/index.ts deleted file mode 100644 index 4e09b5d0e9e2d..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/adapters/metrics/index.ts +++ /dev/null @@ -1,7 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export * from './adapter_types'; diff --git a/x-pack/legacy/plugins/infra/server/lib/adapters/metrics/kibana_metrics_adapter.ts b/x-pack/legacy/plugins/infra/server/lib/adapters/metrics/kibana_metrics_adapter.ts deleted file mode 100644 index 6acb8afbfb249..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/adapters/metrics/kibana_metrics_adapter.ts +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { i18n } from '@kbn/i18n'; -import { flatten, get } from 'lodash'; -import { KibanaRequest, RequestHandlerContext } from 'src/core/server'; -import { InfraMetric, InfraMetricData } from '../../../graphql/types'; -import { KibanaFramework } from '../framework/kibana_framework_adapter'; -import { InfraMetricsAdapter, InfraMetricsRequestOptions } from './adapter_types'; -import { checkValidNode } from './lib/check_valid_node'; -import { metrics, findInventoryFields } from '../../../../common/inventory_models'; -import { TSVBMetricModelCreator } from '../../../../common/inventory_models/types'; -import { calculateMetricInterval } from '../../../utils/calculate_metric_interval'; - -export class KibanaMetricsAdapter implements InfraMetricsAdapter { - private framework: KibanaFramework; - - constructor(framework: KibanaFramework) { - this.framework = framework; - } - - public async getMetrics( - requestContext: RequestHandlerContext, - options: InfraMetricsRequestOptions, - rawRequest: KibanaRequest - ): Promise { - const indexPattern = `${options.sourceConfiguration.metricAlias},${options.sourceConfiguration.logAlias}`; - const fields = findInventoryFields(options.nodeType, options.sourceConfiguration.fields); - const nodeField = fields.id; - - const search = (searchOptions: object) => - this.framework.callWithRequest<{}, Aggregation>(requestContext, 'search', searchOptions); - - const validNode = await checkValidNode(search, indexPattern, nodeField, options.nodeIds.nodeId); - if (!validNode) { - throw new Error( - i18n.translate('xpack.infra.kibanaMetrics.nodeDoesNotExistErrorMessage', { - defaultMessage: '{nodeId} does not exist.', - values: { - nodeId: options.nodeIds.nodeId, - }, - }) - ); - } - - const requests = options.metrics.map(metricId => - this.makeTSVBRequest(metricId, options, nodeField, requestContext) - ); - - return Promise.all(requests) - .then(results => { - return results.map(result => { - const metricIds = Object.keys(result).filter( - k => !['type', 'uiRestrictions'].includes(k) - ); - - return metricIds.map((id: string) => { - const infraMetricId: InfraMetric = (InfraMetric as any)[id]; - if (!infraMetricId) { - throw new Error( - i18n.translate('xpack.infra.kibanaMetrics.invalidInfraMetricErrorMessage', { - defaultMessage: '{id} is not a valid InfraMetric', - values: { - id, - }, - }) - ); - } - const panel = result[infraMetricId]; - return { - id: infraMetricId, - series: panel.series.map(series => { - return { - id: series.id, - label: series.label, - data: series.data.map(point => ({ timestamp: point[0], value: point[1] })), - }; - }), - }; - }); - }); - }) - .then(result => flatten(result)); - } - - async makeTSVBRequest( - metricId: InfraMetric, - options: InfraMetricsRequestOptions, - nodeField: string, - requestContext: RequestHandlerContext - ) { - const createTSVBModel = get(metrics, ['tsvb', metricId]) as TSVBMetricModelCreator | undefined; - if (!createTSVBModel) { - throw new Error( - i18n.translate('xpack.infra.metrics.missingTSVBModelError', { - defaultMessage: 'The TSVB model for {metricId} does not exist for {nodeType}', - values: { - metricId, - nodeType: options.nodeType, - }, - }) - ); - } - - const indexPattern = `${options.sourceConfiguration.metricAlias},${options.sourceConfiguration.logAlias}`; - const timerange = { - min: options.timerange.from, - max: options.timerange.to, - }; - - const model = createTSVBModel( - options.sourceConfiguration.fields.timestamp, - indexPattern, - options.timerange.interval - ); - const calculatedInterval = await calculateMetricInterval( - this.framework, - requestContext, - { - indexPattern: `${options.sourceConfiguration.logAlias},${options.sourceConfiguration.metricAlias}`, - timestampField: options.sourceConfiguration.fields.timestamp, - timerange: options.timerange, - }, - model.requires - ); - - if (calculatedInterval) { - model.interval = `>=${calculatedInterval}s`; - } - - if (model.id_type === 'cloud' && !options.nodeIds.cloudId) { - throw new Error( - i18n.translate('xpack.infra.kibanaMetrics.cloudIdMissingErrorMessage', { - defaultMessage: - 'Model for {metricId} requires a cloudId, but none was given for {nodeId}.', - values: { - metricId, - nodeId: options.nodeIds.nodeId, - }, - }) - ); - } - const id = - model.id_type === 'cloud' ? (options.nodeIds.cloudId as string) : options.nodeIds.nodeId; - const filters = model.map_field_to - ? [{ match: { [model.map_field_to]: id } }] - : [{ match: { [nodeField]: id } }]; - - return this.framework.makeTSVBRequest(requestContext, model, timerange, filters); - } -} diff --git a/x-pack/legacy/plugins/infra/server/lib/adapters/metrics/lib/check_valid_node.ts b/x-pack/legacy/plugins/infra/server/lib/adapters/metrics/lib/check_valid_node.ts deleted file mode 100644 index bca509334b692..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/adapters/metrics/lib/check_valid_node.ts +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { InfraDatabaseSearchResponse } from '../../framework'; - -export const checkValidNode = async ( - search: (options: object) => Promise>, - indexPattern: string | string[], - field: string, - id: string -): Promise => { - const params = { - allowNoIndices: true, - ignoreUnavailable: true, - index: indexPattern, - terminateAfter: 1, - body: { - size: 0, - query: { - match: { - [field]: id, - }, - }, - }, - }; - - const result = await search(params); - return result && result.hits && result.hits.total && result.hits.total.value > 0; -}; diff --git a/x-pack/legacy/plugins/infra/server/lib/adapters/metrics/lib/errors.ts b/x-pack/legacy/plugins/infra/server/lib/adapters/metrics/lib/errors.ts deleted file mode 100644 index 750858f3ce1fa..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/adapters/metrics/lib/errors.ts +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { ApolloError } from 'apollo-server-errors'; -import { InfraMetricsErrorCodes } from '../../../../../common/errors'; - -export class InvalidNodeError extends ApolloError { - constructor(message: string) { - super(message, InfraMetricsErrorCodes.invalid_node); - Object.defineProperty(this, 'name', { value: 'InvalidNodeError' }); - } -} diff --git a/x-pack/legacy/plugins/infra/server/lib/adapters/source_status/elasticsearch_source_status_adapter.ts b/x-pack/legacy/plugins/infra/server/lib/adapters/source_status/elasticsearch_source_status_adapter.ts deleted file mode 100644 index 635f6ff9762c5..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/adapters/source_status/elasticsearch_source_status_adapter.ts +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { RequestHandlerContext } from 'src/core/server'; -import { InfraSourceStatusAdapter } from '../../source_status'; -import { InfraDatabaseGetIndicesResponse } from '../framework'; -import { KibanaFramework } from '../framework/kibana_framework_adapter'; - -export class InfraElasticsearchSourceStatusAdapter implements InfraSourceStatusAdapter { - constructor(private readonly framework: KibanaFramework) {} - - public async getIndexNames(requestContext: RequestHandlerContext, aliasName: string) { - const indexMaps = await Promise.all([ - this.framework - .callWithRequest(requestContext, 'indices.getAlias', { - name: aliasName, - filterPath: '*.settings.index.uuid', // to keep the response size as small as possible - }) - .catch(withDefaultIfNotFound({})), - this.framework - .callWithRequest(requestContext, 'indices.get', { - index: aliasName, - filterPath: '*.settings.index.uuid', // to keep the response size as small as possible - }) - .catch(withDefaultIfNotFound({})), - ]); - - return indexMaps.reduce( - (indexNames, indexMap) => [...indexNames, ...Object.keys(indexMap)], - [] as string[] - ); - } - - public async hasAlias(requestContext: RequestHandlerContext, aliasName: string) { - return await this.framework.callWithRequest(requestContext, 'indices.existsAlias', { - name: aliasName, - }); - } - - public async hasIndices(requestContext: RequestHandlerContext, indexNames: string) { - return await this.framework - .callWithRequest(requestContext, 'search', { - ignore_unavailable: true, - allow_no_indices: true, - index: indexNames, - size: 0, - terminate_after: 1, - }) - .then( - response => response._shards.total > 0, - err => { - if (err.status === 404) { - return false; - } - throw err; - } - ); - } -} - -const withDefaultIfNotFound = (defaultValue: DefaultValue) => ( - error: any -): DefaultValue => { - if (error && error.status === 404) { - return defaultValue; - } - throw error; -}; diff --git a/x-pack/legacy/plugins/infra/server/lib/adapters/source_status/index.ts b/x-pack/legacy/plugins/infra/server/lib/adapters/source_status/index.ts deleted file mode 100644 index f5adfe190f805..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/adapters/source_status/index.ts +++ /dev/null @@ -1,7 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export { InfraElasticsearchSourceStatusAdapter } from './elasticsearch_source_status_adapter'; diff --git a/x-pack/legacy/plugins/infra/server/lib/compose/kibana.ts b/x-pack/legacy/plugins/infra/server/lib/compose/kibana.ts deleted file mode 100644 index 305841aa52d36..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/compose/kibana.ts +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -import { FrameworkFieldsAdapter } from '../adapters/fields/framework_fields_adapter'; -import { KibanaFramework } from '../adapters/framework/kibana_framework_adapter'; -import { InfraKibanaLogEntriesAdapter } from '../adapters/log_entries/kibana_log_entries_adapter'; -import { KibanaMetricsAdapter } from '../adapters/metrics/kibana_metrics_adapter'; -import { InfraElasticsearchSourceStatusAdapter } from '../adapters/source_status'; -import { InfraFieldsDomain } from '../domains/fields_domain'; -import { InfraLogEntriesDomain } from '../domains/log_entries_domain'; -import { InfraMetricsDomain } from '../domains/metrics_domain'; -import { InfraBackendLibs, InfraDomainLibs } from '../infra_types'; -import { InfraLogAnalysis } from '../log_analysis'; -import { InfraSnapshot } from '../snapshot'; -import { InfraSourceStatus } from '../source_status'; -import { InfraSources } from '../sources'; -import { InfraConfig } from '../../../../../../plugins/infra/server'; -import { CoreSetup } from '../../../../../../../src/core/server'; -import { InfraServerPluginDeps } from '../adapters/framework/adapter_types'; - -export function compose(core: CoreSetup, config: InfraConfig, plugins: InfraServerPluginDeps) { - const framework = new KibanaFramework(core, config, plugins); - const sources = new InfraSources({ - config, - }); - const sourceStatus = new InfraSourceStatus(new InfraElasticsearchSourceStatusAdapter(framework), { - sources, - }); - const snapshot = new InfraSnapshot({ sources, framework }); - const logAnalysis = new InfraLogAnalysis({ framework }); - - // TODO: separate these out individually and do away with "domains" as a temporary group - const domainLibs: InfraDomainLibs = { - fields: new InfraFieldsDomain(new FrameworkFieldsAdapter(framework), { - sources, - }), - logEntries: new InfraLogEntriesDomain(new InfraKibanaLogEntriesAdapter(framework), { - sources, - }), - metrics: new InfraMetricsDomain(new KibanaMetricsAdapter(framework)), - }; - - const libs: InfraBackendLibs = { - configuration: config, // NP_TODO: Do we ever use this anywhere? - framework, - logAnalysis, - snapshot, - sources, - sourceStatus, - ...domainLibs, - }; - - return libs; -} diff --git a/x-pack/legacy/plugins/infra/server/lib/constants.ts b/x-pack/legacy/plugins/infra/server/lib/constants.ts deleted file mode 100644 index 0765256c4160c..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/constants.ts +++ /dev/null @@ -1,7 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export const CLOUD_METRICS_MODULES = ['aws']; diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/fields_domain.ts b/x-pack/legacy/plugins/infra/server/lib/domains/fields_domain.ts deleted file mode 100644 index a00c76216da4c..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/fields_domain.ts +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { RequestHandlerContext } from 'src/core/server'; -import { InfraIndexField, InfraIndexType } from '../../graphql/types'; -import { FieldsAdapter } from '../adapters/fields'; -import { InfraSources } from '../sources'; - -export class InfraFieldsDomain { - constructor( - private readonly adapter: FieldsAdapter, - private readonly libs: { sources: InfraSources } - ) {} - - public async getFields( - requestContext: RequestHandlerContext, - sourceId: string, - indexType: InfraIndexType - ): Promise { - const { configuration } = await this.libs.sources.getSourceConfiguration( - requestContext, - sourceId - ); - const includeMetricIndices = [InfraIndexType.ANY, InfraIndexType.METRICS].includes(indexType); - const includeLogIndices = [InfraIndexType.ANY, InfraIndexType.LOGS].includes(indexType); - - const fields = await this.adapter.getIndexFields( - requestContext, - `${includeMetricIndices ? configuration.metricAlias : ''},${ - includeLogIndices ? configuration.logAlias : '' - }`, - configuration.fields.timestamp - ); - - return fields; - } -} diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_apache2.test.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_apache2.test.ts deleted file mode 100644 index 367ae6a0cae89..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_apache2.test.ts +++ /dev/null @@ -1,263 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { getBuiltinRules } from '.'; -import { compileFormattingRules } from '../message'; - -const { format } = compileFormattingRules(getBuiltinRules([])); - -describe('Filebeat Rules', () => { - describe('in ECS format', () => { - test('Apache2 Access', () => { - const flattenedDocument = { - '@timestamp': '2016-12-26T16:22:13.000Z', - 'ecs.version': '1.0.0-beta2', - 'event.dataset': 'apache.access', - 'event.module': 'apache', - 'fileset.name': 'access', - 'http.request.method': 'GET', - 'http.request.referrer': '-', - 'http.response.body.bytes': 499, - 'http.response.status_code': 404, - 'http.version': '1.1', - 'input.type': 'log', - 'log.offset': 73, - 'service.type': 'apache', - 'source.address': '192.168.33.1', - 'source.ip': '192.168.33.1', - 'url.original': '/hello', - 'user.name': '-', - 'user_agent.device': 'Other', - 'user_agent.major': '50', - 'user_agent.minor': '0', - 'user_agent.name': 'Firefox', - 'user_agent.original': - 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:50.0) Gecko/20100101 Firefox/50.0', - 'user_agent.os.full_name': 'Mac OS X 10.12', - 'user_agent.os.major': '10', - 'user_agent.os.minor': '12', - 'user_agent.os.name': 'Mac OS X', - }; - const highlights = { - 'http.request.method': ['GET'], - }; - - expect(format(flattenedDocument, highlights)).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[", - }, - Object { - "field": "event.module", - "highlights": Array [], - "value": "apache", - }, - Object { - "constant": "][access] ", - }, - Object { - "field": "source.ip", - "highlights": Array [], - "value": "192.168.33.1", - }, - Object { - "constant": " ", - }, - Object { - "field": "user.name", - "highlights": Array [], - "value": "-", - }, - Object { - "constant": " \\"", - }, - Object { - "field": "http.request.method", - "highlights": Array [ - "GET", - ], - "value": "GET", - }, - Object { - "constant": " ", - }, - Object { - "field": "url.original", - "highlights": Array [], - "value": "/hello", - }, - Object { - "constant": " HTTP/", - }, - Object { - "field": "http.version", - "highlights": Array [], - "value": "1.1", - }, - Object { - "constant": "\\" ", - }, - Object { - "field": "http.response.status_code", - "highlights": Array [], - "value": "404", - }, - Object { - "constant": " ", - }, - Object { - "field": "http.response.body.bytes", - "highlights": Array [], - "value": "499", - }, -] -`); - }); - - test('Apache2 Error', () => { - const flattenedDocument = { - '@timestamp': '2016-12-26T16:22:08.000Z', - 'ecs.version': '1.0.0-beta2', - 'event.dataset': 'apache.error', - 'event.module': 'apache', - 'fileset.name': 'error', - 'input.type': 'log', - 'log.level': 'error', - 'log.offset': 0, - message: 'File does not exist: /var/www/favicon.ico', - 'service.type': 'apache', - 'source.address': '192.168.33.1', - 'source.ip': '192.168.33.1', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[apache][", - }, - Object { - "field": "log.level", - "highlights": Array [], - "value": "error", - }, - Object { - "constant": "] ", - }, - Object { - "field": "message", - "highlights": Array [], - "value": "File does not exist: /var/www/favicon.ico", - }, -] -`); - }); - }); - - describe('in pre-ECS format', () => { - test('Apache2 Access', () => { - const flattenedDocument = { - 'apache2.access': true, - 'apache2.access.remote_ip': '192.168.1.42', - 'apache2.access.user_name': 'admin', - 'apache2.access.method': 'GET', - 'apache2.access.url': '/faqs', - 'apache2.access.http_version': '1.1', - 'apache2.access.response_code': '200', - 'apache2.access.body_sent.bytes': 1024, - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[apache][access] ", - }, - Object { - "field": "apache2.access.remote_ip", - "highlights": Array [], - "value": "192.168.1.42", - }, - Object { - "constant": " ", - }, - Object { - "field": "apache2.access.user_name", - "highlights": Array [], - "value": "admin", - }, - Object { - "constant": " \\"", - }, - Object { - "field": "apache2.access.method", - "highlights": Array [], - "value": "GET", - }, - Object { - "constant": " ", - }, - Object { - "field": "apache2.access.url", - "highlights": Array [], - "value": "/faqs", - }, - Object { - "constant": " HTTP/", - }, - Object { - "field": "apache2.access.http_version", - "highlights": Array [], - "value": "1.1", - }, - Object { - "constant": "\\" ", - }, - Object { - "field": "apache2.access.response_code", - "highlights": Array [], - "value": "200", - }, - Object { - "constant": " ", - }, - Object { - "field": "apache2.access.body_sent.bytes", - "highlights": Array [], - "value": "1024", - }, -] -`); - }); - - test('Apache2 Error', () => { - const flattenedDocument = { - 'apache2.error.message': - 'AH00489: Apache/2.4.18 (Ubuntu) configured -- resuming normal operations', - 'apache2.error.level': 'notice', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[apache][", - }, - Object { - "field": "apache2.error.level", - "highlights": Array [], - "value": "notice", - }, - Object { - "constant": "] ", - }, - Object { - "field": "apache2.error.message", - "highlights": Array [], - "value": "AH00489: Apache/2.4.18 (Ubuntu) configured -- resuming normal operations", - }, -] -`); - }); - }); -}); diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_apache2.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_apache2.ts deleted file mode 100644 index fe7ebffe91329..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_apache2.ts +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export const filebeatApache2Rules = [ - { - // pre-ECS - when: { - exists: ['apache2.access'], - }, - format: [ - { - constant: '[apache][access] ', - }, - { - field: 'apache2.access.remote_ip', - }, - { - constant: ' ', - }, - { - field: 'apache2.access.user_name', - }, - { - constant: ' "', - }, - { - field: 'apache2.access.method', - }, - { - constant: ' ', - }, - { - field: 'apache2.access.url', - }, - { - constant: ' HTTP/', - }, - { - field: 'apache2.access.http_version', - }, - { - constant: '" ', - }, - { - field: 'apache2.access.response_code', - }, - { - constant: ' ', - }, - { - field: 'apache2.access.body_sent.bytes', - }, - ], - }, - { - // ECS - when: { - values: { - 'event.dataset': 'apache.error', - }, - }, - format: [ - { - constant: '[apache][', - }, - { - field: 'log.level', - }, - { - constant: '] ', - }, - { - field: 'message', - }, - ], - }, - { - // pre-ECS - when: { - exists: ['apache2.error.message'], - }, - format: [ - { - constant: '[apache][', - }, - { - field: 'apache2.error.level', - }, - { - constant: '] ', - }, - { - field: 'apache2.error.message', - }, - ], - }, -]; diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_auditd.test.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_auditd.test.ts deleted file mode 100644 index aa490c595d9fd..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_auditd.test.ts +++ /dev/null @@ -1,359 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { compileFormattingRules } from '../message'; -import { filebeatAuditdRules } from './filebeat_auditd'; - -const { format } = compileFormattingRules(filebeatAuditdRules); - -describe('Filebeat Rules', () => { - describe('in ECS format', () => { - test('auditd log with outcome', () => { - const flattenedDocument = { - '@timestamp': '2016-12-07T02:17:21.515Z', - 'auditd.log': { - addr: '96.241.146.97', - cipher: 'chacha20-poly1305@openssh.com', - direction: 'from-server', - ksize: '512', - laddr: '10.142.0.2', - lport: '22', - pfs: 'curve25519-sha256@libssh.org', - rport: '63927', - sequence: 406, - ses: '4294967295', - spid: '1299', - subj: 'system_u:system_r:sshd_t:s0-s0:c0.c1023', - }, - 'ecs.version': '1.0.0-beta2', - 'event.action': 'crypto_session', - 'event.dataset': 'auditd.log', - 'event.module': 'auditd', - 'event.outcome': 'success', - 'fileset.name': 'log', - 'input.type': 'log', - 'log.offset': 783, - message: 'op=start', - process: { executable: '/usr/sbin/sshd', pid: 1298 }, - 'service.type': 'auditd', - user: { 'audit.id': '4294967295', id: '0', 'saved.id': '74' }, - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[AuditD][", - }, - Object { - "field": "event.action", - "highlights": Array [], - "value": "crypto_session", - }, - Object { - "constant": "]", - }, - Object { - "constant": " ", - }, - Object { - "field": "event.outcome", - "highlights": Array [], - "value": "success", - }, - Object { - "constant": " ", - }, - Object { - "constant": "user", - }, - Object { - "constant": "=", - }, - Object { - "field": "user", - "highlights": Array [], - "value": "{\\"audit.id\\":\\"4294967295\\",\\"id\\":\\"0\\",\\"saved.id\\":\\"74\\"}", - }, - Object { - "constant": " ", - }, - Object { - "constant": "process", - }, - Object { - "constant": "=", - }, - Object { - "field": "process", - "highlights": Array [], - "value": "{\\"executable\\":\\"/usr/sbin/sshd\\",\\"pid\\":1298}", - }, - Object { - "constant": " ", - }, - Object { - "field": "auditd.log", - "highlights": Array [], - "value": "{\\"addr\\":\\"96.241.146.97\\",\\"cipher\\":\\"chacha20-poly1305@openssh.com\\",\\"direction\\":\\"from-server\\",\\"ksize\\":\\"512\\",\\"laddr\\":\\"10.142.0.2\\",\\"lport\\":\\"22\\",\\"pfs\\":\\"curve25519-sha256@libssh.org\\",\\"rport\\":\\"63927\\",\\"sequence\\":406,\\"ses\\":\\"4294967295\\",\\"spid\\":\\"1299\\",\\"subj\\":\\"system_u:system_r:sshd_t:s0-s0:c0.c1023\\"}", - }, - Object { - "constant": " ", - }, - Object { - "field": "message", - "highlights": Array [], - "value": "op=start", - }, -] -`); - }); - - test('auditd log without outcome', () => { - const flattenedDocument = { - '@timestamp': '2017-01-31T20:17:14.891Z', - 'auditd.log': { - a0: '9', - a1: '7f564b2672a0', - a2: 'b8', - a3: '0', - exit: '184', - items: '0', - sequence: 18877199, - ses: '4294967295', - success: 'yes', - syscall: '44', - tty: '(none)', - }, - 'ecs.version': '1.0.0-beta2', - 'event.action': 'syscall', - 'event.dataset': 'auditd.log', - 'event.module': 'auditd', - 'fileset.name': 'log', - 'host.architecture': 'x86_64', - 'input.type': 'log', - 'log.offset': 174, - process: { - executable: '/usr/libexec/strongswan/charon (deleted)', - name: 'charon', - pid: 1281, - ppid: 1240, - }, - 'service.type': 'auditd', - user: { - 'audit.id': '4294967295', - 'effective.group.id': '0', - 'effective.id': '0', - 'filesystem.group.id': '0', - 'filesystem.id': '0', - 'group.id': '0', - id: '0', - 'saved.group.id': '0', - 'saved.id': '0', - }, - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[AuditD][", - }, - Object { - "field": "event.action", - "highlights": Array [], - "value": "syscall", - }, - Object { - "constant": "]", - }, - Object { - "constant": " ", - }, - Object { - "constant": "user", - }, - Object { - "constant": "=", - }, - Object { - "field": "user", - "highlights": Array [], - "value": "{\\"audit.id\\":\\"4294967295\\",\\"effective.group.id\\":\\"0\\",\\"effective.id\\":\\"0\\",\\"filesystem.group.id\\":\\"0\\",\\"filesystem.id\\":\\"0\\",\\"group.id\\":\\"0\\",\\"id\\":\\"0\\",\\"saved.group.id\\":\\"0\\",\\"saved.id\\":\\"0\\"}", - }, - Object { - "constant": " ", - }, - Object { - "constant": "process", - }, - Object { - "constant": "=", - }, - Object { - "field": "process", - "highlights": Array [], - "value": "{\\"executable\\":\\"/usr/libexec/strongswan/charon (deleted)\\",\\"name\\":\\"charon\\",\\"pid\\":1281,\\"ppid\\":1240}", - }, - Object { - "constant": " ", - }, - Object { - "field": "auditd.log", - "highlights": Array [], - "value": "{\\"a0\\":\\"9\\",\\"a1\\":\\"7f564b2672a0\\",\\"a2\\":\\"b8\\",\\"a3\\":\\"0\\",\\"exit\\":\\"184\\",\\"items\\":\\"0\\",\\"sequence\\":18877199,\\"ses\\":\\"4294967295\\",\\"success\\":\\"yes\\",\\"syscall\\":\\"44\\",\\"tty\\":\\"(none)\\"}", - }, - Object { - "constant": " ", - }, - Object { - "field": "message", - "highlights": Array [], - "value": "undefined", - }, -] -`); - }); - }); - - describe('in pre-ECS format', () => { - test('auditd IPSEC rule', () => { - const event = { - '@timestamp': '2017-01-31T20:17:14.891Z', - 'auditd.log.auid': '4294967295', - 'auditd.log.dst': '192.168.0.0', - 'auditd.log.dst_prefixlen': '16', - 'auditd.log.op': 'SPD-delete', - 'auditd.log.record_type': 'MAC_IPSEC_EVENT', - 'auditd.log.res': '1', - 'auditd.log.sequence': 18877201, - 'auditd.log.ses': '4294967295', - 'auditd.log.src': '192.168.2.0', - 'auditd.log.src_prefixlen': '24', - 'ecs.version': '1.0.0-beta2', - 'event.dataset': 'auditd.log', - 'event.module': 'auditd', - 'fileset.name': 'log', - 'input.type': 'log', - 'log.offset': 0, - }; - const message = format(event, {}); - expect(message).toEqual([ - { constant: '[AuditD][' }, - { field: 'auditd.log.record_type', highlights: [], value: 'MAC_IPSEC_EVENT' }, - { constant: '] src:' }, - { field: 'auditd.log.src', highlights: [], value: '192.168.2.0' }, - { constant: ' dst:' }, - { field: 'auditd.log.dst', highlights: [], value: '192.168.0.0' }, - { constant: ' op:' }, - { field: 'auditd.log.op', highlights: [], value: 'SPD-delete' }, - ]); - }); - - test('AuditD SYSCALL rule', () => { - const event = { - '@timestamp': '2017-01-31T20:17:14.891Z', - 'auditd.log.a0': '9', - 'auditd.log.a1': '7f564b2672a0', - 'auditd.log.a2': 'b8', - 'auditd.log.a3': '0', - 'auditd.log.arch': 'x86_64', - 'auditd.log.auid': '4294967295', - 'auditd.log.comm': 'charon', - 'auditd.log.egid': '0', - 'auditd.log.euid': '0', - 'auditd.log.exe': '/usr/libexec/strongswan/charon (deleted)', - 'auditd.log.exit': '184', - 'auditd.log.fsgid': '0', - 'auditd.log.fsuid': '0', - 'auditd.log.gid': '0', - 'auditd.log.items': '0', - 'auditd.log.pid': '1281', - 'auditd.log.ppid': '1240', - 'auditd.log.record_type': 'SYSCALL', - 'auditd.log.sequence': 18877199, - 'auditd.log.ses': '4294967295', - 'auditd.log.sgid': '0', - 'auditd.log.success': 'yes', - 'auditd.log.suid': '0', - 'auditd.log.syscall': '44', - 'auditd.log.tty': '(none)', - 'auditd.log.uid': '0', - 'ecs.version': '1.0.0-beta2', - 'event.dataset': 'auditd.log', - 'event.module': 'auditd', - 'fileset.name': 'log', - 'input.type': 'log', - 'log.offset': 174, - }; - const message = format(event, {}); - expect(message).toEqual([ - { constant: '[AuditD][' }, - { field: 'auditd.log.record_type', highlights: [], value: 'SYSCALL' }, - { constant: '] exe:' }, - { - field: 'auditd.log.exe', - highlights: [], - value: '/usr/libexec/strongswan/charon (deleted)', - }, - { constant: ' gid:' }, - { field: 'auditd.log.gid', highlights: [], value: '0' }, - { constant: ' uid:' }, - { field: 'auditd.log.uid', highlights: [], value: '0' }, - { constant: ' tty:' }, - { field: 'auditd.log.tty', highlights: [], value: '(none)' }, - { constant: ' pid:' }, - { field: 'auditd.log.pid', highlights: [], value: '1281' }, - { constant: ' ppid:' }, - { field: 'auditd.log.ppid', highlights: [], value: '1240' }, - ]); - }); - - test('AuditD events with msg rule', () => { - const event = { - '@timestamp': '2017-01-31T20:17:14.891Z', - 'auditd.log.auid': '4294967295', - 'auditd.log.record_type': 'EXAMPLE', - 'auditd.log.msg': 'some kind of message', - 'ecs.version': '1.0.0-beta2', - 'event.dataset': 'auditd.log', - 'event.module': 'auditd', - 'fileset.name': 'log', - 'input.type': 'log', - 'log.offset': 174, - }; - const message = format(event, {}); - expect(message).toEqual([ - { constant: '[AuditD][' }, - { field: 'auditd.log.record_type', highlights: [], value: 'EXAMPLE' }, - { constant: '] ' }, - { - field: 'auditd.log.msg', - highlights: [], - value: 'some kind of message', - }, - ]); - }); - - test('AuditD catchall rule', () => { - const event = { - '@timestamp': '2017-01-31T20:17:14.891Z', - 'auditd.log.auid': '4294967295', - 'auditd.log.record_type': 'EXAMPLE', - 'ecs.version': '1.0.0-beta2', - 'event.dataset': 'auditd.log', - 'event.module': 'auditd', - 'fileset.name': 'log', - 'input.type': 'log', - 'log.offset': 174, - }; - const message = format(event, {}); - expect(message).toEqual([ - { constant: '[AuditD][' }, - { field: 'auditd.log.record_type', highlights: [], value: 'EXAMPLE' }, - { constant: '] Event without message.' }, - ]); - }); - }); -}); diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_auditd.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_auditd.ts deleted file mode 100644 index d2557cf1599ce..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_auditd.ts +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { labelField } from './helpers'; - -const commonActionField = [{ constant: '[AuditD][' }, { field: 'event.action' }, { constant: ']' }]; -const commonOutcomeField = [{ constant: ' ' }, { field: 'event.outcome' }]; - -export const filebeatAuditdRules = [ - { - // ECS format with outcome - when: { - exists: ['ecs.version', 'event.action', 'event.outcome', 'auditd.log'], - }, - format: [ - ...commonActionField, - ...commonOutcomeField, - ...labelField('user', 'user'), - ...labelField('process', 'process'), - { constant: ' ' }, - { field: 'auditd.log' }, - { constant: ' ' }, - { field: 'message' }, - ], - }, - { - // ECS format without outcome - when: { - exists: ['ecs.version', 'event.action', 'auditd.log'], - }, - format: [ - ...commonActionField, - ...labelField('user', 'user'), - ...labelField('process', 'process'), - { constant: ' ' }, - { field: 'auditd.log' }, - { constant: ' ' }, - { field: 'message' }, - ], - }, - { - // pre-ECS IPSEC_EVENT Rule - when: { - exists: ['auditd.log.record_type', 'auditd.log.src', 'auditd.log.dst', 'auditd.log.op'], - values: { - 'auditd.log.record_type': 'MAC_IPSEC_EVENT', - }, - }, - format: [ - { constant: '[AuditD][' }, - { field: 'auditd.log.record_type' }, - { constant: '] src:' }, - { field: 'auditd.log.src' }, - { constant: ' dst:' }, - { field: 'auditd.log.dst' }, - { constant: ' op:' }, - { field: 'auditd.log.op' }, - ], - }, - { - // pre-ECS SYSCALL Rule - when: { - exists: [ - 'auditd.log.record_type', - 'auditd.log.exe', - 'auditd.log.gid', - 'auditd.log.uid', - 'auditd.log.tty', - 'auditd.log.pid', - 'auditd.log.ppid', - ], - values: { - 'auditd.log.record_type': 'SYSCALL', - }, - }, - format: [ - { constant: '[AuditD][' }, - { field: 'auditd.log.record_type' }, - { constant: '] exe:' }, - { field: 'auditd.log.exe' }, - { constant: ' gid:' }, - { field: 'auditd.log.gid' }, - { constant: ' uid:' }, - { field: 'auditd.log.uid' }, - { constant: ' tty:' }, - { field: 'auditd.log.tty' }, - { constant: ' pid:' }, - { field: 'auditd.log.pid' }, - { constant: ' ppid:' }, - { field: 'auditd.log.ppid' }, - ], - }, - { - // pre-ECS Events with `msg` Rule - when: { - exists: ['auditd.log.record_type', 'auditd.log.msg'], - }, - format: [ - { constant: '[AuditD][' }, - { field: 'auditd.log.record_type' }, - { constant: '] ' }, - { field: 'auditd.log.msg' }, - ], - }, - { - // pre-ECS Events with `msg` Rule - when: { - exists: ['auditd.log.record_type'], - }, - format: [ - { constant: '[AuditD][' }, - { field: 'auditd.log.record_type' }, - { constant: '] Event without message.' }, - ], - }, -]; diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_haproxy.test.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_haproxy.test.ts deleted file mode 100644 index 752b61684887e..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_haproxy.test.ts +++ /dev/null @@ -1,791 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { compileFormattingRules } from '../message'; -import { filebeatHaproxyRules } from './filebeat_haproxy'; - -const { format } = compileFormattingRules(filebeatHaproxyRules); - -describe('Filebeat Rules', () => { - describe('in ECS format', () => { - test('haproxy default log', () => { - const flattenedDocument = { - 'destination.ip': '1.2.3.4', - 'destination.port': 5000, - 'ecs.version': '1.0.0-beta2', - 'event.dataset': 'haproxy.log', - 'event.module': 'haproxy', - 'fileset.name': 'log', - 'haproxy.frontend_name': 'main', - 'haproxy.mode': 'HTTP', - 'haproxy.source': '1.2.3.4', - 'input.type': 'log', - 'log.offset': 0, - 'process.name': 'haproxy', - 'process.pid': 24551, - 'service.type': 'haproxy', - 'source.address': '1.2.3.4', - 'source.geo.continent_name': 'North America', - 'source.geo.country_iso_code': 'US', - 'source.geo.location.lat': 37.751, - 'source.geo.location.lon': -97.822, - 'source.ip': '1.2.3.4', - 'source.port': 40780, - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[HAProxy] ", - }, - Object { - "field": "source.address", - "highlights": Array [], - "value": "1.2.3.4", - }, - Object { - "constant": ":", - }, - Object { - "field": "source.port", - "highlights": Array [], - "value": "40780", - }, - Object { - "constant": " ", - }, - Object { - "field": "haproxy.frontend_name", - "highlights": Array [], - "value": "main", - }, -] -`); - }); - - test('haproxy tcp log', () => { - const flattenedDocument = { - 'ecs.version': '1.0.0-beta2', - 'event.dataset': 'haproxy.log', - 'event.duration': 1000000, - 'event.module': 'haproxy', - 'fileset.name': 'log', - 'haproxy.backend_name': 'app', - 'haproxy.backend_queue': 0, - 'haproxy.bytes_read': 212, - 'haproxy.connection_wait_time_ms': -1, - 'haproxy.connections.active': 1, - 'haproxy.connections.backend': 0, - 'haproxy.connections.frontend': 1, - 'haproxy.connections.retries': 0, - 'haproxy.connections.server': 0, - 'haproxy.frontend_name': 'main', - 'haproxy.server_name': '', - 'haproxy.server_queue': 0, - 'haproxy.source': '127.0.0.1', - 'haproxy.termination_state': 'SC', - 'haproxy.total_waiting_time_ms': -1, - 'input.type': 'log', - 'log.offset': 0, - 'process.name': 'haproxy', - 'process.pid': 25457, - 'service.type': 'haproxy', - 'source.address': '127.0.0.1', - 'source.ip': '127.0.0.1', - 'source.port': 40962, - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[HAProxy][tcp] ", - }, - Object { - "field": "source.address", - "highlights": Array [], - "value": "127.0.0.1", - }, - Object { - "constant": ":", - }, - Object { - "field": "source.port", - "highlights": Array [], - "value": "40962", - }, - Object { - "constant": " ", - }, - Object { - "field": "haproxy.frontend_name", - "highlights": Array [], - "value": "main", - }, - Object { - "constant": " -> ", - }, - Object { - "field": "haproxy.backend_name", - "highlights": Array [], - "value": "app", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.server_name", - "highlights": Array [], - "value": "", - }, - Object { - "constant": " ", - }, - Object { - "field": "haproxy.connections.active", - "highlights": Array [], - "value": "1", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.connections.frontend", - "highlights": Array [], - "value": "1", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.connections.backend", - "highlights": Array [], - "value": "0", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.connections.server", - "highlights": Array [], - "value": "0", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.connections.retries", - "highlights": Array [], - "value": "0", - }, - Object { - "constant": " ", - }, - Object { - "field": "haproxy.server_queue", - "highlights": Array [], - "value": "0", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.backend_queue", - "highlights": Array [], - "value": "0", - }, -] -`); - }); - - test('haproxy http log', () => { - const flattenedDocument = { - 'ecs.version': '1.0.0-beta2', - 'event.dataset': 'haproxy.log', - 'event.duration': 2000000, - 'event.module': 'haproxy', - 'fileset.name': 'log', - 'haproxy.backend_name': 'docs_microservice', - 'haproxy.backend_queue': 0, - 'haproxy.bytes_read': 168, - 'haproxy.connection_wait_time_ms': 1, - 'haproxy.connections.active': 6, - 'haproxy.connections.backend': 0, - 'haproxy.connections.frontend': 6, - 'haproxy.connections.retries': 0, - 'haproxy.connections.server': 0, - 'haproxy.frontend_name': 'incoming~', - 'haproxy.http.request.captured_cookie': '-', - 'haproxy.http.request.captured_headers': ['docs.example.internal'], - 'haproxy.http.request.raw_request_line': - 'GET /component---src-pages-index-js-4b15624544f97cf0bb8f.js HTTP/1.1', - 'haproxy.http.request.time_wait_ms': 0, - 'haproxy.http.request.time_wait_without_data_ms': 0, - 'haproxy.http.response.captured_cookie': '-', - 'haproxy.http.response.captured_headers': [], - 'haproxy.server_name': 'docs', - 'haproxy.server_queue': 0, - 'haproxy.termination_state': '----', - 'haproxy.total_waiting_time_ms': 0, - 'http.response.bytes': 168, - 'http.response.status_code': 304, - 'input.type': 'log', - 'log.offset': 0, - 'process.name': 'haproxy', - 'process.pid': 32450, - 'service.type': 'haproxy', - 'source.address': '1.2.3.4', - 'source.geo.continent_name': 'North America', - 'source.geo.country_iso_code': 'US', - 'source.geo.location.lat': 37.751, - 'source.geo.location.lon': -97.822, - 'source.ip': '1.2.3.4', - 'source.port': 38862, - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[HAProxy][http] ", - }, - Object { - "field": "source.address", - "highlights": Array [], - "value": "1.2.3.4", - }, - Object { - "constant": ":", - }, - Object { - "field": "source.port", - "highlights": Array [], - "value": "38862", - }, - Object { - "constant": " ", - }, - Object { - "field": "haproxy.frontend_name", - "highlights": Array [], - "value": "incoming~", - }, - Object { - "constant": " -> ", - }, - Object { - "field": "haproxy.backend_name", - "highlights": Array [], - "value": "docs_microservice", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.server_name", - "highlights": Array [], - "value": "docs", - }, - Object { - "constant": " \\"", - }, - Object { - "field": "haproxy.http.request.raw_request_line", - "highlights": Array [], - "value": "GET /component---src-pages-index-js-4b15624544f97cf0bb8f.js HTTP/1.1", - }, - Object { - "constant": "\\" ", - }, - Object { - "field": "http.response.status_code", - "highlights": Array [], - "value": "304", - }, - Object { - "constant": " ", - }, - Object { - "field": "haproxy.http.request.time_wait_ms", - "highlights": Array [], - "value": "0", - }, - Object { - "constant": "/", - }, - Object { - "field": "event.duration", - "highlights": Array [], - "value": "2000000", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.connection_wait_time_ms", - "highlights": Array [], - "value": "1", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.http.request.time_wait_without_data_ms", - "highlights": Array [], - "value": "0", - }, - Object { - "constant": "/", - }, - Object { - "field": "event.duration", - "highlights": Array [], - "value": "2000000", - }, - Object { - "constant": " ", - }, - Object { - "field": "haproxy.connections.active", - "highlights": Array [], - "value": "6", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.connections.frontend", - "highlights": Array [], - "value": "6", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.connections.backend", - "highlights": Array [], - "value": "0", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.connections.server", - "highlights": Array [], - "value": "0", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.connections.retries", - "highlights": Array [], - "value": "0", - }, - Object { - "constant": " ", - }, - Object { - "field": "haproxy.server_queue", - "highlights": Array [], - "value": "0", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.backend_queue", - "highlights": Array [], - "value": "0", - }, -] -`); - }); - }); - - describe('in pre-ECS format', () => { - test('haproxy default log', () => { - const flattenedDocument = { - 'event.dataset': 'haproxy.log', - 'fileset.module': 'haproxy', - 'fileset.name': 'log', - 'haproxy.client.ip': '1.2.3.4', - 'haproxy.client.port': '40780', - 'haproxy.destination.ip': '1.2.3.4', - 'haproxy.destination.port': '5000', - 'haproxy.frontend_name': 'main', - 'haproxy.geoip.continent_name': 'North America', - 'haproxy.geoip.country_iso_code': 'US', - 'haproxy.geoip.location.lat': 37.751, - 'haproxy.geoip.location.lon': -97.822, - 'haproxy.mode': 'HTTP', - 'haproxy.pid': '24551', - 'haproxy.process_name': 'haproxy', - 'haproxy.source': '1.2.3.4', - 'input.type': 'log', - offset: 0, - 'prospector.type': 'log', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[HAProxy] ", - }, - Object { - "field": "haproxy.client.ip", - "highlights": Array [], - "value": "1.2.3.4", - }, - Object { - "constant": ":", - }, - Object { - "field": "haproxy.client.port", - "highlights": Array [], - "value": "40780", - }, - Object { - "constant": " ", - }, - Object { - "field": "haproxy.frontend_name", - "highlights": Array [], - "value": "main", - }, -] -`); - }); - - test('haproxy tcp log', () => { - const flattenedDocument = { - 'event.dataset': 'haproxy.log', - 'fileset.module': 'haproxy', - 'fileset.name': 'log', - 'haproxy.backend_name': 'app', - 'haproxy.backend_queue': 0, - 'haproxy.bytes_read': 212, - 'haproxy.client.ip': '127.0.0.1', - 'haproxy.client.port': 40962, - 'haproxy.connection_wait_time_ms': -1, - 'haproxy.connections.active': 1, - 'haproxy.connections.backend': 0, - 'haproxy.connections.frontend': 1, - 'haproxy.connections.retries': 0, - 'haproxy.connections.server': 0, - 'haproxy.frontend_name': 'main', - 'haproxy.pid': 25457, - 'haproxy.process_name': 'haproxy', - 'haproxy.server_name': '', - 'haproxy.server_queue': 0, - 'haproxy.source': '127.0.0.1', - 'haproxy.tcp.processing_time_ms': 0, - 'haproxy.termination_state': 'SC', - 'haproxy.total_waiting_time_ms': -1, - 'input.type': 'log', - offset: 0, - 'prospector.type': 'log', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[HAProxy][tcp] ", - }, - Object { - "field": "haproxy.client.ip", - "highlights": Array [], - "value": "127.0.0.1", - }, - Object { - "constant": ":", - }, - Object { - "field": "haproxy.client.port", - "highlights": Array [], - "value": "40962", - }, - Object { - "constant": " ", - }, - Object { - "field": "haproxy.frontend_name", - "highlights": Array [], - "value": "main", - }, - Object { - "constant": " -> ", - }, - Object { - "field": "haproxy.backend_name", - "highlights": Array [], - "value": "app", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.server_name", - "highlights": Array [], - "value": "", - }, - Object { - "constant": " ", - }, - Object { - "field": "haproxy.connections.active", - "highlights": Array [], - "value": "1", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.connections.frontend", - "highlights": Array [], - "value": "1", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.connections.backend", - "highlights": Array [], - "value": "0", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.connections.server", - "highlights": Array [], - "value": "0", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.connections.retries", - "highlights": Array [], - "value": "0", - }, - Object { - "constant": " ", - }, - Object { - "field": "haproxy.server_queue", - "highlights": Array [], - "value": "0", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.backend_queue", - "highlights": Array [], - "value": "0", - }, -] -`); - }); - - test('haproxy http log', () => { - const flattenedDocument = { - 'event.dataset': 'haproxy.log', - 'fileset.module': 'haproxy', - 'fileset.name': 'log', - 'haproxy.backend_name': 'docs_microservice', - 'haproxy.backend_queue': 0, - 'haproxy.bytes_read': 168, - 'haproxy.client.ip': '1.2.3.4', - 'haproxy.client.port': 38862, - 'haproxy.connection_wait_time_ms': 1, - 'haproxy.connections.active': 6, - 'haproxy.connections.backend': 0, - 'haproxy.connections.frontend': 6, - 'haproxy.connections.retries': 0, - 'haproxy.connections.server': 0, - 'haproxy.frontend_name': 'incoming~', - 'haproxy.geoip.continent_name': 'North America', - 'haproxy.geoip.country_iso_code': 'US', - 'haproxy.geoip.location.lat': 37.751, - 'haproxy.geoip.location.lon': -97.822, - 'haproxy.http.request.captured_cookie': '-', - 'haproxy.http.request.raw_request_line': - 'GET /component---src-pages-index-js-4b15624544f97cf0bb8f.js HTTP/1.1', - 'haproxy.http.request.time_active_ms': 2, - 'haproxy.http.request.time_wait_ms': 0, - 'haproxy.http.request.time_wait_without_data_ms': 0, - 'haproxy.http.response.captured_cookie': '-', - 'haproxy.http.response.status_code': 304, - 'haproxy.pid': 32450, - 'haproxy.process_name': 'haproxy', - 'haproxy.server_name': 'docs', - 'haproxy.server_queue': 0, - 'haproxy.termination_state': '----', - 'haproxy.total_waiting_time_ms': 0, - 'input.type': 'log', - offset: 0, - 'prospector.type': 'log', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[HAProxy][http] ", - }, - Object { - "field": "haproxy.client.ip", - "highlights": Array [], - "value": "1.2.3.4", - }, - Object { - "constant": ":", - }, - Object { - "field": "haproxy.client.port", - "highlights": Array [], - "value": "38862", - }, - Object { - "constant": " ", - }, - Object { - "field": "haproxy.frontend_name", - "highlights": Array [], - "value": "incoming~", - }, - Object { - "constant": " -> ", - }, - Object { - "field": "haproxy.backend_name", - "highlights": Array [], - "value": "docs_microservice", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.server_name", - "highlights": Array [], - "value": "docs", - }, - Object { - "constant": " \\"", - }, - Object { - "field": "haproxy.http.request.raw_request_line", - "highlights": Array [], - "value": "GET /component---src-pages-index-js-4b15624544f97cf0bb8f.js HTTP/1.1", - }, - Object { - "constant": "\\" ", - }, - Object { - "field": "haproxy.http.response.status_code", - "highlights": Array [], - "value": "304", - }, - Object { - "constant": " ", - }, - Object { - "field": "haproxy.http.request.time_wait_ms", - "highlights": Array [], - "value": "0", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.total_waiting_time_ms", - "highlights": Array [], - "value": "0", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.connection_wait_time_ms", - "highlights": Array [], - "value": "1", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.http.request.time_wait_without_data_ms", - "highlights": Array [], - "value": "0", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.http.request.time_active_ms", - "highlights": Array [], - "value": "2", - }, - Object { - "constant": " ", - }, - Object { - "field": "haproxy.connections.active", - "highlights": Array [], - "value": "6", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.connections.frontend", - "highlights": Array [], - "value": "6", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.connections.backend", - "highlights": Array [], - "value": "0", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.connections.server", - "highlights": Array [], - "value": "0", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.connections.retries", - "highlights": Array [], - "value": "0", - }, - Object { - "constant": " ", - }, - Object { - "field": "haproxy.server_queue", - "highlights": Array [], - "value": "0", - }, - Object { - "constant": "/", - }, - Object { - "field": "haproxy.backend_queue", - "highlights": Array [], - "value": "0", - }, -] -`); - }); - }); -}); diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_haproxy.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_haproxy.ts deleted file mode 100644 index 97836b0a8186f..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_haproxy.ts +++ /dev/null @@ -1,329 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -const ecsFrontendFields = [ - { - field: 'source.address', - }, - { - constant: ':', - }, - { - field: 'source.port', - }, - { - constant: ' ', - }, - { - field: 'haproxy.frontend_name', - }, -]; - -const preEcsFrontendFields = [ - { - field: 'haproxy.client.ip', - }, - { - constant: ':', - }, - { - field: 'haproxy.client.port', - }, - { - constant: ' ', - }, - { - field: 'haproxy.frontend_name', - }, -]; - -const commonBackendFields = [ - { - constant: ' -> ', - }, - { - field: 'haproxy.backend_name', - }, - { - constant: '/', - }, - { - field: 'haproxy.server_name', - }, -]; - -const commonConnectionStatsFields = [ - { - field: 'haproxy.connections.active', - }, - { - constant: '/', - }, - { - field: 'haproxy.connections.frontend', - }, - { - constant: '/', - }, - { - field: 'haproxy.connections.backend', - }, - { - constant: '/', - }, - { - field: 'haproxy.connections.server', - }, - { - constant: '/', - }, - { - field: 'haproxy.connections.retries', - }, -]; - -const commonQueueStatsFields = [ - { - field: 'haproxy.server_queue', - }, - { - constant: '/', - }, - { - field: 'haproxy.backend_queue', - }, -]; - -export const filebeatHaproxyRules = [ - { - // ECS - when: { - exists: ['ecs.version', 'haproxy.http.request.raw_request_line'], - }, - format: [ - { - constant: '[HAProxy][http] ', - }, - ...ecsFrontendFields, - ...commonBackendFields, - { - constant: ' "', - }, - { - field: 'haproxy.http.request.raw_request_line', - }, - { - constant: '" ', - }, - { - field: 'http.response.status_code', - }, - { - constant: ' ', - }, - { - field: 'haproxy.http.request.time_wait_ms', - }, - { - constant: '/', - }, - { - field: 'event.duration', - }, - { - constant: '/', - }, - { - field: 'haproxy.connection_wait_time_ms', - }, - { - constant: '/', - }, - { - field: 'haproxy.http.request.time_wait_without_data_ms', - }, - { - constant: '/', - }, - { - field: 'event.duration', - }, - { - constant: ' ', - }, - ...commonConnectionStatsFields, - { - constant: ' ', - }, - ...commonQueueStatsFields, - ], - }, - { - // ECS - when: { - exists: ['ecs.version', 'haproxy.connections.active'], - }, - format: [ - { - constant: '[HAProxy][tcp] ', - }, - ...ecsFrontendFields, - ...commonBackendFields, - { - constant: ' ', - }, - ...commonConnectionStatsFields, - { - constant: ' ', - }, - ...commonQueueStatsFields, - ], - }, - { - // ECS - when: { - exists: ['ecs.version', 'haproxy.error_message'], - }, - format: [ - { - constant: '[HAProxy] ', - }, - ...ecsFrontendFields, - { - constant: ' ', - }, - { - field: 'haproxy.error_message', - }, - ], - }, - { - // ECS - when: { - exists: ['ecs.version', 'haproxy.frontend_name'], - }, - format: [ - { - constant: '[HAProxy] ', - }, - ...ecsFrontendFields, - ], - }, - { - // pre-ECS - when: { - exists: ['haproxy.http.request.raw_request_line'], - }, - format: [ - { - constant: '[HAProxy][http] ', - }, - ...preEcsFrontendFields, - ...commonBackendFields, - { - constant: ' "', - }, - { - field: 'haproxy.http.request.raw_request_line', - }, - { - constant: '" ', - }, - { - field: 'haproxy.http.response.status_code', - }, - { - constant: ' ', - }, - { - field: 'haproxy.http.request.time_wait_ms', - }, - { - constant: '/', - }, - { - field: 'haproxy.total_waiting_time_ms', - }, - { - constant: '/', - }, - { - field: 'haproxy.connection_wait_time_ms', - }, - { - constant: '/', - }, - { - field: 'haproxy.http.request.time_wait_without_data_ms', - }, - { - constant: '/', - }, - { - field: 'haproxy.http.request.time_active_ms', - }, - { - constant: ' ', - }, - ...commonConnectionStatsFields, - { - constant: ' ', - }, - ...commonQueueStatsFields, - ], - }, - { - // pre-ECS - when: { - exists: ['haproxy.connections.active'], - }, - format: [ - { - constant: '[HAProxy][tcp] ', - }, - ...preEcsFrontendFields, - ...commonBackendFields, - { - constant: ' ', - }, - ...commonConnectionStatsFields, - { - constant: ' ', - }, - ...commonQueueStatsFields, - ], - }, - { - // pre-ECS - when: { - exists: ['haproxy.error_message'], - }, - format: [ - { - constant: '[HAProxy] ', - }, - ...preEcsFrontendFields, - { - constant: ' ', - }, - { - field: 'haproxy.error_message', - }, - ], - }, - { - // pre-ECS - when: { - exists: ['haproxy.frontend_name'], - }, - format: [ - { - constant: '[HAProxy] ', - }, - ...preEcsFrontendFields, - ], - }, -]; diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_icinga.test.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_icinga.test.ts deleted file mode 100644 index 120137f15b883..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_icinga.test.ts +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { compileFormattingRules } from '../message'; -import { filebeatIcingaRules } from './filebeat_icinga'; - -const { format } = compileFormattingRules(filebeatIcingaRules); - -describe('Filebeat Rules', () => { - describe('in pre-ECS format', () => { - test('icinga debug log', () => { - const flattenedDocument = { - '@timestamp': '2017-04-04T11:43:09.000Z', - 'event.dataset': 'icinga.debug', - 'fileset.module': 'icinga', - 'fileset.name': 'debug', - 'icinga.debug.facility': 'GraphiteWriter', - 'icinga.debug.message': - "Add to metric list:'icinga2.demo.services.procs.procs.perfdata.procs.warn 250 1491306189'.", - 'icinga.debug.severity': 'debug', - 'input.type': 'log', - offset: 0, - 'prospector.type': 'log', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[Icinga][", - }, - Object { - "field": "icinga.debug.facility", - "highlights": Array [], - "value": "GraphiteWriter", - }, - Object { - "constant": "][", - }, - Object { - "field": "icinga.debug.severity", - "highlights": Array [], - "value": "debug", - }, - Object { - "constant": "] ", - }, - Object { - "field": "icinga.debug.message", - "highlights": Array [], - "value": "Add to metric list:'icinga2.demo.services.procs.procs.perfdata.procs.warn 250 1491306189'.", - }, -] -`); - }); - - test('icinga main log', () => { - const flattenedDocument = { - '@timestamp': '2017-04-04T09:16:34.000Z', - 'event.dataset': 'icinga.main', - 'fileset.module': 'icinga', - 'fileset.name': 'main', - 'icinga.main.facility': 'Notification', - 'icinga.main.message': - "Sending 'Recovery' notification 'demo!load!mail-icingaadmin for user 'on-call'", - 'icinga.main.severity': 'information', - 'input.type': 'log', - offset: 0, - 'prospector.type': 'log', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[Icinga][", - }, - Object { - "field": "icinga.main.facility", - "highlights": Array [], - "value": "Notification", - }, - Object { - "constant": "][", - }, - Object { - "field": "icinga.main.severity", - "highlights": Array [], - "value": "information", - }, - Object { - "constant": "] ", - }, - Object { - "field": "icinga.main.message", - "highlights": Array [], - "value": "Sending 'Recovery' notification 'demo!load!mail-icingaadmin for user 'on-call'", - }, -] -`); - }); - - test('icinga startup log', () => { - const flattenedDocument = { - 'event.dataset': 'icinga.startup', - 'fileset.module': 'icinga', - 'fileset.name': 'startup', - 'icinga.startup.facility': 'cli', - 'icinga.startup.message': 'Icinga application loader (version: r2.6.3-1)', - 'icinga.startup.severity': 'information', - 'input.type': 'log', - offset: 0, - 'prospector.type': 'log', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[Icinga][", - }, - Object { - "field": "icinga.startup.facility", - "highlights": Array [], - "value": "cli", - }, - Object { - "constant": "][", - }, - Object { - "field": "icinga.startup.severity", - "highlights": Array [], - "value": "information", - }, - Object { - "constant": "] ", - }, - Object { - "field": "icinga.startup.message", - "highlights": Array [], - "value": "Icinga application loader (version: r2.6.3-1)", - }, -] -`); - }); - }); -}); diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_icinga.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_icinga.ts deleted file mode 100644 index c04a746e6bf41..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_icinga.ts +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export const filebeatIcingaRules = [ - { - // pre-ECS - when: { - exists: ['icinga.main.message'], - }, - format: [ - { - constant: '[Icinga][', - }, - { - field: 'icinga.main.facility', - }, - { - constant: '][', - }, - { - field: 'icinga.main.severity', - }, - { - constant: '] ', - }, - { - field: 'icinga.main.message', - }, - ], - }, - { - // pre-ECS - when: { - exists: ['icinga.debug.message'], - }, - format: [ - { - constant: '[Icinga][', - }, - { - field: 'icinga.debug.facility', - }, - { - constant: '][', - }, - { - field: 'icinga.debug.severity', - }, - { - constant: '] ', - }, - { - field: 'icinga.debug.message', - }, - ], - }, - { - // pre-ECS - when: { - exists: ['icinga.startup.message'], - }, - format: [ - { - constant: '[Icinga][', - }, - { - field: 'icinga.startup.facility', - }, - { - constant: '][', - }, - { - field: 'icinga.startup.severity', - }, - { - constant: '] ', - }, - { - field: 'icinga.startup.message', - }, - ], - }, -]; diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_iis.test.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_iis.test.ts deleted file mode 100644 index 72449c03b63a6..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_iis.test.ts +++ /dev/null @@ -1,562 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { getBuiltinRules } from '.'; -import { compileFormattingRules } from '../message'; - -const { format } = compileFormattingRules(getBuiltinRules([])); - -describe('Filebeat Rules', () => { - describe('in ECS format', () => { - test('iis access log', () => { - const flattenedDocument = { - '@timestamp': '2018-01-01T10:11:12.000Z', - 'destination.address': '127.0.0.1', - 'destination.domain': 'example.com', - 'destination.ip': '127.0.0.1', - 'destination.port': 80, - 'ecs.version': '1.0.0-beta2', - 'event.dataset': 'iis.access', - 'event.duration': 789000000, - 'event.module': 'iis', - 'fileset.name': 'access', - 'http.request.body.bytes': 456, - 'http.request.method': 'GET', - 'http.request.referrer': '-', - 'http.response.body.bytes': 123, - 'http.response.status_code': 200, - 'http.version': '1.1', - 'iis.access.cookie': '-', - 'iis.access.server_name': 'MACHINE-NAME', - 'iis.access.site_name': 'W3SVC1', - 'iis.access.sub_status': 0, - 'iis.access.win32_status': 0, - 'input.type': 'log', - 'log.offset': 1204, - 'service.type': 'iis', - 'source.address': '85.181.35.98', - 'source.geo.city_name': 'Berlin', - 'source.geo.continent_name': 'Europe', - 'source.geo.country_iso_code': 'DE', - 'source.geo.location.lat': 52.4908, - 'source.geo.location.lon': 13.3275, - 'source.geo.region_iso_code': 'DE-BE', - 'source.geo.region_name': 'Land Berlin', - 'source.ip': '85.181.35.98', - 'url.path': '/', - 'url.query': 'q=100', - 'user.name': '-', - 'user_agent.device.name': 'Other', - 'user_agent.name': 'Chrome', - 'user_agent.original': - 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36', - 'user_agent.os.full': 'Mac OS X 10.14.0', - 'user_agent.os.name': 'Mac OS X', - 'user_agent.os.version': '10.14.0', - 'user_agent.version': '70.0.3538', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[", - }, - Object { - "field": "event.module", - "highlights": Array [], - "value": "iis", - }, - Object { - "constant": "][access] ", - }, - Object { - "field": "source.ip", - "highlights": Array [], - "value": "85.181.35.98", - }, - Object { - "constant": " ", - }, - Object { - "field": "user.name", - "highlights": Array [], - "value": "-", - }, - Object { - "constant": " \\"", - }, - Object { - "field": "http.request.method", - "highlights": Array [], - "value": "GET", - }, - Object { - "constant": " ", - }, - Object { - "field": "url.path", - "highlights": Array [], - "value": "/", - }, - Object { - "constant": "?", - }, - Object { - "field": "url.query", - "highlights": Array [], - "value": "q=100", - }, - Object { - "constant": " HTTP/", - }, - Object { - "field": "http.version", - "highlights": Array [], - "value": "1.1", - }, - Object { - "constant": "\\" ", - }, - Object { - "field": "http.response.status_code", - "highlights": Array [], - "value": "200", - }, - Object { - "constant": " ", - }, - Object { - "field": "http.response.body.bytes", - "highlights": Array [], - "value": "123", - }, -] -`); - }); - - test('iis 7.5 access log', () => { - const flattenedDocument = { - '@timestamp': '2018-08-28T18:24:25.000Z', - 'destination.address': '10.100.220.70', - 'destination.ip': '10.100.220.70', - 'destination.port': 80, - 'ecs.version': '1.0.0-beta2', - 'event.dataset': 'iis.access', - 'event.duration': 792000000, - 'event.module': 'iis', - 'fileset.name': 'access', - 'http.request.method': 'GET', - 'http.response.status_code': 404, - 'iis.access.sub_status': 4, - 'iis.access.win32_status': 2, - 'input.type': 'log', - 'log.offset': 244, - 'service.type': 'iis', - 'source.address': '10.100.118.31', - 'source.ip': '10.100.118.31', - 'url.path': '/', - 'url.query': 'q=100', - 'user.name': '-', - 'user_agent.device.name': 'Other', - 'user_agent.name': 'IE', - 'user_agent.original': - 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.3; WOW64; Trident/7.0; .NET4.0E; .NET4.0C; .NET CLR 3.5.30729; .NET CLR[ 2.0.50727](tel: 2050727); .NET CLR 3.0.30729)', - 'user_agent.os.name': 'Windows 8.1', - 'user_agent.version': '7.0', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[", - }, - Object { - "field": "event.module", - "highlights": Array [], - "value": "iis", - }, - Object { - "constant": "][access] ", - }, - Object { - "field": "source.ip", - "highlights": Array [], - "value": "10.100.118.31", - }, - Object { - "constant": " ", - }, - Object { - "field": "user.name", - "highlights": Array [], - "value": "-", - }, - Object { - "constant": " \\"", - }, - Object { - "field": "http.request.method", - "highlights": Array [], - "value": "GET", - }, - Object { - "constant": " ", - }, - Object { - "field": "url.path", - "highlights": Array [], - "value": "/", - }, - Object { - "constant": "?", - }, - Object { - "field": "url.query", - "highlights": Array [], - "value": "q=100", - }, - Object { - "constant": " HTTP/", - }, - Object { - "field": "http.version", - "highlights": Array [], - "value": "undefined", - }, - Object { - "constant": "\\" ", - }, - Object { - "field": "http.response.status_code", - "highlights": Array [], - "value": "404", - }, - Object { - "constant": " ", - }, - Object { - "field": "http.response.body.bytes", - "highlights": Array [], - "value": "undefined", - }, -] -`); - }); - - test('iis error log', () => { - const flattenedDocument = { - '@timestamp': '2018-01-01T08:09:10.000Z', - 'destination.address': '172.31.77.6', - 'destination.ip': '172.31.77.6', - 'destination.port': 80, - 'ecs.version': '1.0.0-beta2', - 'event.dataset': 'iis.error', - 'event.module': 'iis', - 'fileset.name': 'error', - 'http.request.method': 'GET', - 'http.response.status_code': 503, - 'http.version': '1.1', - 'iis.error.queue_name': '-', - 'iis.error.reason_phrase': 'ConnLimit', - 'input.type': 'log', - 'log.offset': 186, - 'service.type': 'iis', - 'source.address': '172.31.77.6', - 'source.ip': '172.31.77.6', - 'source.port': 2094, - 'url.original': '/qos/1kbfile.txt', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[iis][error] ", - }, - Object { - "field": "source.ip", - "highlights": Array [], - "value": "172.31.77.6", - }, - Object { - "constant": " ", - }, - Object { - "field": "iis.error.reason_phrase", - "highlights": Array [], - "value": "ConnLimit", - }, -] -`); - }); - }); - - describe('in pre-ECS format', () => { - test('iis access log', () => { - const flattenedDocument = { - '@timestamp': '2018-01-01T08:09:10.000Z', - 'event.dataset': 'iis.access', - 'fileset.module': 'iis', - 'fileset.name': 'access', - 'iis.access.geoip.city_name': 'Berlin', - 'iis.access.geoip.continent_name': 'Europe', - 'iis.access.geoip.country_iso_code': 'DE', - 'iis.access.geoip.location.lat': 52.4908, - 'iis.access.geoip.location.lon': 13.3275, - 'iis.access.geoip.region_iso_code': 'DE-BE', - 'iis.access.geoip.region_name': 'Land Berlin', - 'iis.access.method': 'GET', - 'iis.access.port': '80', - 'iis.access.query_string': 'q=100', - 'iis.access.referrer': '-', - 'iis.access.remote_ip': '85.181.35.98', - 'iis.access.request_time_ms': '123', - 'iis.access.response_code': '200', - 'iis.access.server_ip': '127.0.0.1', - 'iis.access.sub_status': '0', - 'iis.access.url': '/', - 'iis.access.user_agent.device': 'Other', - 'iis.access.user_agent.major': '57', - 'iis.access.user_agent.minor': '0', - 'iis.access.user_agent.name': 'Firefox', - 'iis.access.user_agent.original': - 'Mozilla/5.0+(Windows+NT+6.1;+Win64;+x64;+rv:57.0)+Gecko/20100101+Firefox/57.0', - 'iis.access.user_agent.os': 'Windows', - 'iis.access.user_agent.os_name': 'Windows', - 'iis.access.user_name': '-', - 'iis.access.win32_status': '0', - 'input.type': 'log', - offset: 257, - 'prospector.type': 'log', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[iis][access] ", - }, - Object { - "field": "iis.access.remote_ip", - "highlights": Array [], - "value": "85.181.35.98", - }, - Object { - "constant": " ", - }, - Object { - "field": "iis.access.user_name", - "highlights": Array [], - "value": "-", - }, - Object { - "constant": " \\"", - }, - Object { - "field": "iis.access.method", - "highlights": Array [], - "value": "GET", - }, - Object { - "constant": " ", - }, - Object { - "field": "iis.access.url", - "highlights": Array [], - "value": "/", - }, - Object { - "constant": " HTTP/", - }, - Object { - "field": "iis.access.http_version", - "highlights": Array [], - "value": "undefined", - }, - Object { - "constant": "\\" ", - }, - Object { - "field": "iis.access.response_code", - "highlights": Array [], - "value": "200", - }, - Object { - "constant": " ", - }, - Object { - "field": "iis.access.body_sent.bytes", - "highlights": Array [], - "value": "undefined", - }, -] -`); - }); - - test('iis 7.5 access log', () => { - const flattenedDocument = { - '@timestamp': '2018-08-28T18:24:25.000Z', - 'event.dataset': 'iis.access', - 'fileset.module': 'iis', - 'fileset.name': 'access', - 'iis.access.method': 'GET', - 'iis.access.port': '80', - 'iis.access.query_string': '-', - 'iis.access.remote_ip': '10.100.118.31', - 'iis.access.request_time_ms': '792', - 'iis.access.response_code': '404', - 'iis.access.server_ip': '10.100.220.70', - 'iis.access.sub_status': '4', - 'iis.access.url': '/', - 'iis.access.user_agent.device': 'Other', - 'iis.access.user_agent.name': 'Other', - 'iis.access.user_agent.original': - 'Mozilla/4.0+(compatible;+MSIE+7.0;+Windows+NT+6.3;+WOW64;+Trident/7.0;+.NET4.0E;+.NET4.0C;+.NET+CLR+3.5.30729;+.NET+CLR[+2.0.50727](tel:+2050727);+.NET+CLR+3.0.30729)', - 'iis.access.user_agent.os': 'Windows', - 'iis.access.user_agent.os_name': 'Windows', - 'iis.access.user_name': '-', - 'iis.access.win32_status': '2', - 'input.type': 'log', - offset: 244, - 'prospector.type': 'log', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[iis][access] ", - }, - Object { - "field": "iis.access.remote_ip", - "highlights": Array [], - "value": "10.100.118.31", - }, - Object { - "constant": " ", - }, - Object { - "field": "iis.access.user_name", - "highlights": Array [], - "value": "-", - }, - Object { - "constant": " \\"", - }, - Object { - "field": "iis.access.method", - "highlights": Array [], - "value": "GET", - }, - Object { - "constant": " ", - }, - Object { - "field": "iis.access.url", - "highlights": Array [], - "value": "/", - }, - Object { - "constant": " HTTP/", - }, - Object { - "field": "iis.access.http_version", - "highlights": Array [], - "value": "undefined", - }, - Object { - "constant": "\\" ", - }, - Object { - "field": "iis.access.response_code", - "highlights": Array [], - "value": "404", - }, - Object { - "constant": " ", - }, - Object { - "field": "iis.access.body_sent.bytes", - "highlights": Array [], - "value": "undefined", - }, -] -`); - }); - - test('iis error log', () => { - const flattenedDocument = { - '@timestamp': '2018-01-01T08:09:10.000Z', - 'event.dataset': 'iis.error', - 'fileset.module': 'iis', - 'fileset.name': 'error', - 'iis.error.http_version': '1.1', - 'iis.error.method': 'GET', - 'iis.error.queue_name': '-', - 'iis.error.reason_phrase': 'ConnLimit', - 'iis.error.remote_ip': '172.31.77.6', - 'iis.error.remote_port': '2094', - 'iis.error.response_code': '503', - 'iis.error.server_ip': '172.31.77.6', - 'iis.error.server_port': '80', - 'iis.error.url': '/qos/1kbfile.txt', - 'input.type': 'log', - offset: 186, - 'prospector.type': 'log', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[iis][error] ", - }, - Object { - "field": "iis.error.remote_ip", - "highlights": Array [], - "value": "172.31.77.6", - }, - Object { - "constant": " \\"", - }, - Object { - "field": "iis.error.method", - "highlights": Array [], - "value": "GET", - }, - Object { - "constant": " ", - }, - Object { - "field": "iis.error.url", - "highlights": Array [], - "value": "/qos/1kbfile.txt", - }, - Object { - "constant": " HTTP/", - }, - Object { - "field": "iis.error.http_version", - "highlights": Array [], - "value": "1.1", - }, - Object { - "constant": "\\" ", - }, - Object { - "field": "iis.error.response_code", - "highlights": Array [], - "value": "503", - }, - Object { - "constant": " ", - }, - Object { - "field": "iis.error.reason_phrase", - "highlights": Array [], - "value": "ConnLimit", - }, -] -`); - }); - }); -}); diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_iis.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_iis.ts deleted file mode 100644 index ea3485440bb74..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_iis.ts +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export const filebeatIisRules = [ - { - // pre-ECS - when: { - exists: ['iis.access.method'], - }, - format: [ - { - constant: '[iis][access] ', - }, - { - field: 'iis.access.remote_ip', - }, - { - constant: ' ', - }, - { - field: 'iis.access.user_name', - }, - { - constant: ' "', - }, - { - field: 'iis.access.method', - }, - { - constant: ' ', - }, - { - field: 'iis.access.url', - }, - { - constant: ' HTTP/', - }, - { - field: 'iis.access.http_version', - }, - { - constant: '" ', - }, - { - field: 'iis.access.response_code', - }, - { - constant: ' ', - }, - { - field: 'iis.access.body_sent.bytes', - }, - ], - }, - { - // pre-ECS - when: { - exists: ['iis.error.url'], - }, - format: [ - { - constant: '[iis][error] ', - }, - { - field: 'iis.error.remote_ip', - }, - { - constant: ' "', - }, - { - field: 'iis.error.method', - }, - { - constant: ' ', - }, - { - field: 'iis.error.url', - }, - { - constant: ' HTTP/', - }, - { - field: 'iis.error.http_version', - }, - { - constant: '" ', - }, - { - field: 'iis.error.response_code', - }, - { - constant: ' ', - }, - { - field: 'iis.error.reason_phrase', - }, - ], - }, - { - // ECS - when: { - exists: ['ecs.version', 'iis.error.reason_phrase'], - }, - format: [ - { - constant: '[iis][error] ', - }, - { - field: 'source.ip', - }, - { - constant: ' ', - }, - { - field: 'iis.error.reason_phrase', - }, - ], - }, - { - // pre-ECS - when: { - exists: ['iis.error.reason_phrase'], - }, - format: [ - { - constant: '[iis][error] ', - }, - { - field: 'iis.error.remote_ip', - }, - { - constant: ' ', - }, - { - field: 'iis.error.reason_phrase', - }, - ], - }, -]; diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_kafka.test.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_kafka.test.ts deleted file mode 100644 index 19cb5f6e31118..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_kafka.test.ts +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { getBuiltinRules } from '.'; -import { compileFormattingRules } from '../message'; - -const { format } = compileFormattingRules(getBuiltinRules([])); - -describe('Filebeat Rules', () => { - describe('in ECS format', () => { - test('kafka log', () => { - const flattenedDocument = { - '@timestamp': '2017-08-04T10:48:21.063Z', - 'ecs.version': '1.0.0-beta2', - 'event.dataset': 'kafka.log', - 'event.module': 'kafka', - 'fileset.name': 'log', - 'input.type': 'log', - 'kafka.log.class': 'kafka.controller.KafkaController', - 'kafka.log.component': 'Controller 0', - 'log.level': 'INFO', - 'log.offset': 131, - message: '0 successfully elected as the controller', - 'service.type': 'kafka', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[", - }, - Object { - "field": "event.dataset", - "highlights": Array [], - "value": "kafka.log", - }, - Object { - "constant": "][", - }, - Object { - "field": "log.level", - "highlights": Array [], - "value": "INFO", - }, - Object { - "constant": "] ", - }, - Object { - "field": "message", - "highlights": Array [], - "value": "0 successfully elected as the controller", - }, -] -`); - }); - }); -}); diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_logstash.test.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_logstash.test.ts deleted file mode 100644 index edc534d9c345f..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_logstash.test.ts +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { getBuiltinRules } from '.'; -import { compileFormattingRules } from '../message'; - -const { format } = compileFormattingRules(getBuiltinRules([])); - -describe('Filebeat Rules', () => { - describe('in ECS format', () => { - test('logstash log', () => { - const flattenedDocument = { - '@timestamp': '2017-10-23T14:20:12.046Z', - 'ecs.version': '1.0.0-beta2', - 'event.dataset': 'logstash.log', - 'event.module': 'logstash', - 'fileset.name': 'log', - 'input.type': 'log', - 'log.level': 'INFO', - 'log.offset': 0, - 'logstash.log.module': 'logstash.modules.scaffold', - message: - 'Initializing module {:module_name=>"fb_apache", :directory=>"/usr/share/logstash/modules/fb_apache/configuration"}', - 'service.type': 'logstash', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[", - }, - Object { - "field": "event.dataset", - "highlights": Array [], - "value": "logstash.log", - }, - Object { - "constant": "][", - }, - Object { - "field": "log.level", - "highlights": Array [], - "value": "INFO", - }, - Object { - "constant": "] ", - }, - Object { - "field": "message", - "highlights": Array [], - "value": "Initializing module {:module_name=>\\"fb_apache\\", :directory=>\\"/usr/share/logstash/modules/fb_apache/configuration\\"}", - }, -] -`); - }); - - test('logstash slowlog', () => { - const flattenedDocument = { - '@timestamp': '2017-10-30T09:57:58.243Z', - 'ecs.version': '1.0.0-beta2', - 'event.dataset': 'logstash.slowlog', - 'event.duration': 3027675106, - 'event.module': 'logstash', - 'fileset.name': 'slowlog', - 'input.type': 'log', - 'log.level': 'WARN', - 'log.offset': 0, - 'logstash.slowlog': { - event: - '"{\\"@version\\":\\"1\\",\\"@timestamp\\":\\"2017-10-30T13:57:55.130Z\\",\\"host\\":\\"sashimi\\",\\"sequence\\":0,\\"message\\":\\"Hello world!\\"}"', - module: 'slowlog.logstash.filters.sleep', - plugin_name: 'sleep', - plugin_params: - '{"time"=>3, "id"=>"e4e12a4e3082615c5427079bf4250dbfa338ebac10f8ea9912d7b98a14f56b8c"}', - plugin_type: 'filters', - took_in_millis: 3027, - }, - 'service.type': 'logstash', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[Logstash][", - }, - Object { - "field": "log.level", - "highlights": Array [], - "value": "WARN", - }, - Object { - "constant": "] ", - }, - Object { - "field": "logstash.slowlog", - "highlights": Array [], - "value": "{\\"event\\":\\"\\\\\\"{\\\\\\\\\\\\\\"@version\\\\\\\\\\\\\\":\\\\\\\\\\\\\\"1\\\\\\\\\\\\\\",\\\\\\\\\\\\\\"@timestamp\\\\\\\\\\\\\\":\\\\\\\\\\\\\\"2017-10-30T13:57:55.130Z\\\\\\\\\\\\\\",\\\\\\\\\\\\\\"host\\\\\\\\\\\\\\":\\\\\\\\\\\\\\"sashimi\\\\\\\\\\\\\\",\\\\\\\\\\\\\\"sequence\\\\\\\\\\\\\\":0,\\\\\\\\\\\\\\"message\\\\\\\\\\\\\\":\\\\\\\\\\\\\\"Hello world!\\\\\\\\\\\\\\"}\\\\\\"\\",\\"module\\":\\"slowlog.logstash.filters.sleep\\",\\"plugin_name\\":\\"sleep\\",\\"plugin_params\\":\\"{\\\\\\"time\\\\\\"=>3, \\\\\\"id\\\\\\"=>\\\\\\"e4e12a4e3082615c5427079bf4250dbfa338ebac10f8ea9912d7b98a14f56b8c\\\\\\"}\\",\\"plugin_type\\":\\"filters\\",\\"took_in_millis\\":3027}", - }, -] -`); - }); - }); - - describe('in pre-ECS format', () => { - test('logstash log', () => { - const flattenedDocument = { - '@timestamp': '2017-10-23T14:20:12.046Z', - 'event.dataset': 'logstash.log', - 'fileset.module': 'logstash', - 'fileset.name': 'log', - 'input.type': 'log', - 'logstash.log.level': 'INFO', - 'logstash.log.message': - 'Initializing module {:module_name=>"fb_apache", :directory=>"/usr/share/logstash/modules/fb_apache/configuration"}', - 'logstash.log.module': 'logstash.modules.scaffold', - offset: 0, - 'prospector.type': 'log', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[Logstash][", - }, - Object { - "field": "logstash.log.level", - "highlights": Array [], - "value": "INFO", - }, - Object { - "constant": "] ", - }, - Object { - "field": "logstash.log.module", - "highlights": Array [], - "value": "logstash.modules.scaffold", - }, - Object { - "constant": " - ", - }, - Object { - "field": "logstash.log.message", - "highlights": Array [], - "value": "Initializing module {:module_name=>\\"fb_apache\\", :directory=>\\"/usr/share/logstash/modules/fb_apache/configuration\\"}", - }, -] -`); - }); - - test('logstash slowlog', () => { - const flattenedDocument = { - '@timestamp': '2017-10-30T09:57:58.243Z', - 'event.dataset': 'logstash.slowlog', - 'fileset.module': 'logstash', - 'fileset.name': 'slowlog', - 'input.type': 'log', - 'logstash.slowlog.event': - '"{\\"@version\\":\\"1\\",\\"@timestamp\\":\\"2017-10-30T13:57:55.130Z\\",\\"host\\":\\"sashimi\\",\\"sequence\\":0,\\"message\\":\\"Hello world!\\"}"', - 'logstash.slowlog.level': 'WARN', - 'logstash.slowlog.message': - 'event processing time {:plugin_params=>{"time"=>3, "id"=>"e4e12a4e3082615c5427079bf4250dbfa338ebac10f8ea9912d7b98a14f56b8c"}, :took_in_nanos=>3027675106, :took_in_millis=>3027, :event=>"{\\"@version\\":\\"1\\",\\"@timestamp\\":\\"2017-10-30T13:57:55.130Z\\",\\"host\\":\\"sashimi\\",\\"sequence\\":0,\\"message\\":\\"Hello world!\\"}"}', - 'logstash.slowlog.module': 'slowlog.logstash.filters.sleep', - 'logstash.slowlog.plugin_name': 'sleep', - 'logstash.slowlog.plugin_params': - '{"time"=>3, "id"=>"e4e12a4e3082615c5427079bf4250dbfa338ebac10f8ea9912d7b98a14f56b8c"}', - 'logstash.slowlog.plugin_type': 'filters', - 'logstash.slowlog.took_in_millis': 3027, - 'logstash.slowlog.took_in_nanos': 3027675106, - offset: 0, - 'prospector.type': 'log', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[Logstash][", - }, - Object { - "field": "logstash.slowlog.level", - "highlights": Array [], - "value": "WARN", - }, - Object { - "constant": "] ", - }, - Object { - "field": "logstash.slowlog.module", - "highlights": Array [], - "value": "slowlog.logstash.filters.sleep", - }, - Object { - "constant": " - ", - }, - Object { - "field": "logstash.slowlog.message", - "highlights": Array [], - "value": "event processing time {:plugin_params=>{\\"time\\"=>3, \\"id\\"=>\\"e4e12a4e3082615c5427079bf4250dbfa338ebac10f8ea9912d7b98a14f56b8c\\"}, :took_in_nanos=>3027675106, :took_in_millis=>3027, :event=>\\"{\\\\\\"@version\\\\\\":\\\\\\"1\\\\\\",\\\\\\"@timestamp\\\\\\":\\\\\\"2017-10-30T13:57:55.130Z\\\\\\",\\\\\\"host\\\\\\":\\\\\\"sashimi\\\\\\",\\\\\\"sequence\\\\\\":0,\\\\\\"message\\\\\\":\\\\\\"Hello world!\\\\\\"}\\"}", - }, -] -`); - }); - }); -}); diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_logstash.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_logstash.ts deleted file mode 100644 index 39b2058ca7cdb..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_logstash.ts +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export const filebeatLogstashRules = [ - { - // pre-ECS - when: { - exists: ['logstash.log.message'], - }, - format: [ - { - constant: '[Logstash][', - }, - { - field: 'logstash.log.level', - }, - { - constant: '] ', - }, - { - field: 'logstash.log.module', - }, - { - constant: ' - ', - }, - { - field: 'logstash.log.message', - }, - ], - }, - { - // ECS - when: { - exists: ['ecs.version', 'logstash.slowlog'], - }, - format: [ - { - constant: '[Logstash][', - }, - { - field: 'log.level', - }, - { - constant: '] ', - }, - { - field: 'logstash.slowlog', - }, - ], - }, - { - // pre-ECS - when: { - exists: ['logstash.slowlog.message'], - }, - format: [ - { - constant: '[Logstash][', - }, - { - field: 'logstash.slowlog.level', - }, - { - constant: '] ', - }, - { - field: 'logstash.slowlog.module', - }, - { - constant: ' - ', - }, - { - field: 'logstash.slowlog.message', - }, - ], - }, -]; diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mongodb.test.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mongodb.test.ts deleted file mode 100644 index 3df7ebec241cc..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mongodb.test.ts +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { compileFormattingRules } from '../message'; -import { filebeatMongodbRules } from './filebeat_mongodb'; - -const { format } = compileFormattingRules(filebeatMongodbRules); - -describe('Filebeat Rules', () => { - describe('in pre-ECS format', () => { - test('mongodb log', () => { - const flattenedDocument = { - '@timestamp': '2018-02-05T12:44:56.677Z', - 'event.dataset': 'mongodb.log', - 'fileset.module': 'mongodb', - 'fileset.name': 'log', - 'input.type': 'log', - 'mongodb.log.component': 'STORAGE', - 'mongodb.log.context': 'initandlisten', - 'mongodb.log.message': - 'wiredtiger_open config: create,cache_size=8G,session_max=20000,eviction=(threads_max=4),config_base=false,statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000),checkpoint=(wait=60,log_size=2GB),statistics_log=(wait=0),', - 'mongodb.log.severity': 'I', - offset: 281, - 'prospector.type': 'log', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[MongoDB][", - }, - Object { - "field": "mongodb.log.component", - "highlights": Array [], - "value": "STORAGE", - }, - Object { - "constant": "] ", - }, - Object { - "field": "mongodb.log.message", - "highlights": Array [], - "value": "wiredtiger_open config: create,cache_size=8G,session_max=20000,eviction=(threads_max=4),config_base=false,statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000),checkpoint=(wait=60,log_size=2GB),statistics_log=(wait=0),", - }, -] -`); - }); - }); -}); diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mongodb.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mongodb.ts deleted file mode 100644 index 06a4964875898..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mongodb.ts +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export const filebeatMongodbRules = [ - { - // pre-ECS - when: { - exists: ['mongodb.log.message'], - }, - format: [ - { - constant: '[MongoDB][', - }, - { - field: 'mongodb.log.component', - }, - { - constant: '] ', - }, - { - field: 'mongodb.log.message', - }, - ], - }, -]; diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mysql.test.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mysql.test.ts deleted file mode 100644 index 0329d53f92d08..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mysql.test.ts +++ /dev/null @@ -1,219 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { getBuiltinRules } from '.'; -import { compileFormattingRules } from '../message'; - -const { format } = compileFormattingRules(getBuiltinRules([])); - -describe('Filebeat Rules', () => { - describe('in ECS format', () => { - test('mysql error log', () => { - const flattenedDocument = { - '@timestamp': '2016-12-09T12:08:33.335Z', - 'ecs.version': '1.0.0-beta2', - 'event.dataset': 'mysql.error', - 'event.module': 'mysql', - 'fileset.name': 'error', - 'input.type': 'log', - 'log.level': 'Warning', - 'log.offset': 92, - message: - 'TIMESTAMP with implicit DEFAULT value is deprecated. Please use --explicit_defaults_for_timestamp server option (see documentation for more details).', - 'mysql.thread_id': 0, - 'service.type': 'mysql', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[", - }, - Object { - "field": "event.dataset", - "highlights": Array [], - "value": "mysql.error", - }, - Object { - "constant": "][", - }, - Object { - "field": "log.level", - "highlights": Array [], - "value": "Warning", - }, - Object { - "constant": "] ", - }, - Object { - "field": "message", - "highlights": Array [], - "value": "TIMESTAMP with implicit DEFAULT value is deprecated. Please use --explicit_defaults_for_timestamp server option (see documentation for more details).", - }, -] -`); - }); - - test('mysql slowlog', () => { - const flattenedDocument = { - '@timestamp': '2018-08-07T08:27:47.000Z', - 'ecs.version': '1.0.0-beta2', - 'event.dataset': 'mysql.slowlog', - 'event.duration': 4071491000, - 'event.module': 'mysql', - 'fileset.name': 'slowlog', - 'input.type': 'log', - 'log.flags': ['multiline'], - 'log.offset': 526, - 'mysql.slowlog.current_user': 'appuser', - 'mysql.slowlog.lock_time.sec': 0.000212, - 'mysql.slowlog.query': - 'SELECT mcu.mcu_guid, mcu.cus_guid, mcu.mcu_url, mcu.mcu_crawlelements, mcu.mcu_order, GROUP_CONCAT(mca.mca_guid SEPARATOR ";") as mca_guid\n FROM kat_mailcustomerurl mcu, kat_customer cus, kat_mailcampaign mca\n WHERE cus.cus_guid = mcu.cus_guid\n AND cus.pro_code = \'CYB\'\n AND cus.cus_offline = 0\n AND mca.cus_guid = cus.cus_guid\n AND (mcu.mcu_date IS NULL OR mcu.mcu_date < CURDATE())\n AND mcu.mcu_crawlelements IS NOT NULL\n GROUP BY mcu.mcu_guid\n ORDER BY mcu.mcu_order ASC\n LIMIT 1000;', - 'mysql.slowlog.rows_examined': 1489615, - 'mysql.slowlog.rows_sent': 1000, - 'mysql.thread_id': 10997316, - 'service.type': 'mysql', - 'source.domain': 'apphost', - 'source.ip': '1.1.1.1', - 'user.name': 'appuser', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[MySQL][slowlog] ", - }, - Object { - "field": "user.name", - "highlights": Array [], - "value": "appuser", - }, - Object { - "constant": "@", - }, - Object { - "field": "source.domain", - "highlights": Array [], - "value": "apphost", - }, - Object { - "constant": " [", - }, - Object { - "field": "source.ip", - "highlights": Array [], - "value": "1.1.1.1", - }, - Object { - "constant": "] ", - }, - Object { - "constant": " - ", - }, - Object { - "field": "event.duration", - "highlights": Array [], - "value": "4071491000", - }, - Object { - "constant": " ns - ", - }, - Object { - "field": "mysql.slowlog.query", - "highlights": Array [], - "value": "SELECT mcu.mcu_guid, mcu.cus_guid, mcu.mcu_url, mcu.mcu_crawlelements, mcu.mcu_order, GROUP_CONCAT(mca.mca_guid SEPARATOR \\";\\") as mca_guid - FROM kat_mailcustomerurl mcu, kat_customer cus, kat_mailcampaign mca - WHERE cus.cus_guid = mcu.cus_guid - AND cus.pro_code = 'CYB' - AND cus.cus_offline = 0 - AND mca.cus_guid = cus.cus_guid - AND (mcu.mcu_date IS NULL OR mcu.mcu_date < CURDATE()) - AND mcu.mcu_crawlelements IS NOT NULL - GROUP BY mcu.mcu_guid - ORDER BY mcu.mcu_order ASC - LIMIT 1000;", - }, -] -`); - }); - }); - - describe('in pre-ECS format', () => { - test('mysql error log', () => { - const errorDoc = { - 'mysql.error.message': - "Access denied for user 'petclinicdd'@'47.153.152.234' (using password: YES)", - }; - const message = format(errorDoc, {}); - expect(message).toEqual([ - { - constant: '[MySQL][error] ', - }, - { - field: 'mysql.error.message', - highlights: [], - value: "Access denied for user 'petclinicdd'@'47.153.152.234' (using password: YES)", - }, - ]); - }); - - test('mysql slow log', () => { - const errorDoc = { - 'mysql.slowlog.query': 'select * from hosts', - 'mysql.slowlog.query_time.sec': 5, - 'mysql.slowlog.user': 'admin', - 'mysql.slowlog.ip': '192.168.1.42', - 'mysql.slowlog.host': 'webserver-01', - }; - const message = format(errorDoc, {}); - expect(message).toEqual([ - { - constant: '[MySQL][slowlog] ', - }, - { - field: 'mysql.slowlog.user', - highlights: [], - value: 'admin', - }, - { - constant: '@', - }, - { - field: 'mysql.slowlog.host', - highlights: [], - value: 'webserver-01', - }, - { - constant: ' [', - }, - { - field: 'mysql.slowlog.ip', - highlights: [], - value: '192.168.1.42', - }, - { - constant: '] ', - }, - { - constant: ' - ', - }, - { - field: 'mysql.slowlog.query_time.sec', - highlights: [], - value: '5', - }, - { - constant: ' s - ', - }, - { - field: 'mysql.slowlog.query', - highlights: [], - value: 'select * from hosts', - }, - ]); - }); - }); -}); diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mysql.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mysql.ts deleted file mode 100644 index e90977f9bf8fa..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_mysql.ts +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export const filebeatMySQLRules = [ - { - // pre-ECS - when: { - exists: ['mysql.error.message'], - }, - format: [ - { - constant: '[MySQL][error] ', - }, - { - field: 'mysql.error.message', - }, - ], - }, - { - // ECS - when: { - exists: ['ecs.version', 'mysql.slowlog.query'], - }, - format: [ - { - constant: '[MySQL][slowlog] ', - }, - { - field: 'user.name', - }, - { - constant: '@', - }, - { - field: 'source.domain', - }, - { - constant: ' [', - }, - { - field: 'source.ip', - }, - { - constant: '] ', - }, - { - constant: ' - ', - }, - { - field: 'event.duration', - }, - { - constant: ' ns - ', - }, - { - field: 'mysql.slowlog.query', - }, - ], - }, - { - // pre-ECS - when: { - exists: ['mysql.slowlog.user', 'mysql.slowlog.query_time.sec', 'mysql.slowlog.query'], - }, - format: [ - { - constant: '[MySQL][slowlog] ', - }, - { - field: 'mysql.slowlog.user', - }, - { - constant: '@', - }, - { - field: 'mysql.slowlog.host', - }, - { - constant: ' [', - }, - { - field: 'mysql.slowlog.ip', - }, - { - constant: '] ', - }, - { - constant: ' - ', - }, - { - field: 'mysql.slowlog.query_time.sec', - }, - { - constant: ' s - ', - }, - { - field: 'mysql.slowlog.query', - }, - ], - }, -]; diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_nginx.test.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_nginx.test.ts deleted file mode 100644 index 0bc8ae1e907b8..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_nginx.test.ts +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { getBuiltinRules } from '.'; -import { compileFormattingRules } from '../message'; - -const { format } = compileFormattingRules(getBuiltinRules([])); - -describe('Filebeat Rules', () => { - describe('in ECS format', () => { - test('Nginx Access', () => { - const flattenedDocument = { - '@timestamp': '2017-05-29T19:02:48.000Z', - 'ecs.version': '1.0.0-beta2', - 'event.dataset': 'nginx.access', - 'event.module': 'nginx', - 'fileset.name': 'access', - 'http.request.method': 'GET', - 'http.request.referrer': '-', - 'http.response.body.bytes': 612, - 'http.response.status_code': 404, - 'http.version': '1.1', - 'input.type': 'log', - 'log.offset': 183, - 'service.type': 'nginx', - 'source.ip': '172.17.0.1', - 'url.original': '/stringpatch', - 'user.name': '-', - 'user_agent.device': 'Other', - 'user_agent.major': '15', - 'user_agent.minor': '0', - 'user_agent.name': 'Firefox Alpha', - 'user_agent.original': - 'Mozilla/5.0 (Windows NT 6.1; rv:15.0) Gecko/20120716 Firefox/15.0a2', - 'user_agent.os.full_name': 'Windows 7', - 'user_agent.os.name': 'Windows 7', - 'user_agent.patch': 'a2', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[", - }, - Object { - "field": "event.module", - "highlights": Array [], - "value": "nginx", - }, - Object { - "constant": "][access] ", - }, - Object { - "field": "source.ip", - "highlights": Array [], - "value": "172.17.0.1", - }, - Object { - "constant": " ", - }, - Object { - "field": "user.name", - "highlights": Array [], - "value": "-", - }, - Object { - "constant": " \\"", - }, - Object { - "field": "http.request.method", - "highlights": Array [], - "value": "GET", - }, - Object { - "constant": " ", - }, - Object { - "field": "url.original", - "highlights": Array [], - "value": "/stringpatch", - }, - Object { - "constant": " HTTP/", - }, - Object { - "field": "http.version", - "highlights": Array [], - "value": "1.1", - }, - Object { - "constant": "\\" ", - }, - Object { - "field": "http.response.status_code", - "highlights": Array [], - "value": "404", - }, - Object { - "constant": " ", - }, - Object { - "field": "http.response.body.bytes", - "highlights": Array [], - "value": "612", - }, -] -`); - }); - - test('Nginx Error', () => { - const flattenedDocument = { - '@timestamp': '2016-10-25T14:49:34.000Z', - 'ecs.version': '1.0.0-beta2', - 'event.dataset': 'nginx.error', - 'event.module': 'nginx', - 'fileset.name': 'error', - 'input.type': 'log', - 'log.level': 'error', - 'log.offset': 0, - message: - 'open() "/usr/local/Cellar/nginx/1.10.2_1/html/favicon.ico" failed (2: No such file or directory), client: 127.0.0.1, server: localhost, request: "GET /favicon.ico HTTP/1.1", host: "localhost:8080", referrer: "http://localhost:8080/"', - 'nginx.error.connection_id': 1, - 'process.pid': 54053, - 'process.thread.id': 0, - 'service.type': 'nginx', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[nginx]", - }, - Object { - "constant": "[", - }, - Object { - "field": "log.level", - "highlights": Array [], - "value": "error", - }, - Object { - "constant": "] ", - }, - Object { - "field": "message", - "highlights": Array [], - "value": "open() \\"/usr/local/Cellar/nginx/1.10.2_1/html/favicon.ico\\" failed (2: No such file or directory), client: 127.0.0.1, server: localhost, request: \\"GET /favicon.ico HTTP/1.1\\", host: \\"localhost:8080\\", referrer: \\"http://localhost:8080/\\"", - }, -] -`); - }); - }); - - describe('in pre-ECS format', () => { - test('Nginx Access', () => { - const flattenedDocument = { - 'nginx.access': true, - 'nginx.access.remote_ip': '192.168.1.42', - 'nginx.access.user_name': 'admin', - 'nginx.access.method': 'GET', - 'nginx.access.url': '/faq', - 'nginx.access.http_version': '1.1', - 'nginx.access.body_sent.bytes': 1024, - 'nginx.access.response_code': 200, - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[nginx][access] ", - }, - Object { - "field": "nginx.access.remote_ip", - "highlights": Array [], - "value": "192.168.1.42", - }, - Object { - "constant": " ", - }, - Object { - "field": "nginx.access.user_name", - "highlights": Array [], - "value": "admin", - }, - Object { - "constant": " \\"", - }, - Object { - "field": "nginx.access.method", - "highlights": Array [], - "value": "GET", - }, - Object { - "constant": " ", - }, - Object { - "field": "nginx.access.url", - "highlights": Array [], - "value": "/faq", - }, - Object { - "constant": " HTTP/", - }, - Object { - "field": "nginx.access.http_version", - "highlights": Array [], - "value": "1.1", - }, - Object { - "constant": "\\" ", - }, - Object { - "field": "nginx.access.response_code", - "highlights": Array [], - "value": "200", - }, - Object { - "constant": " ", - }, - Object { - "field": "nginx.access.body_sent.bytes", - "highlights": Array [], - "value": "1024", - }, -] -`); - }); - - test('Nginx Error', () => { - const flattenedDocument = { - 'nginx.error.message': - 'connect() failed (111: Connection refused) while connecting to upstream, client: 127.0.0.1, server: localhost, request: "GET /php-status?json= HTTP/1.1", upstream: "fastcgi://[::1]:9000", host: "localhost"', - 'nginx.error.level': 'error', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[nginx]", - }, - Object { - "constant": "[", - }, - Object { - "field": "nginx.error.level", - "highlights": Array [], - "value": "error", - }, - Object { - "constant": "] ", - }, - Object { - "field": "nginx.error.message", - "highlights": Array [], - "value": "connect() failed (111: Connection refused) while connecting to upstream, client: 127.0.0.1, server: localhost, request: \\"GET /php-status?json= HTTP/1.1\\", upstream: \\"fastcgi://[::1]:9000\\", host: \\"localhost\\"", - }, -] -`); - }); - }); -}); diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_nginx.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_nginx.ts deleted file mode 100644 index 0fd70dc25bb88..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_nginx.ts +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export const filebeatNginxRules = [ - { - // pre-ECS - when: { - exists: ['nginx.access.method'], - }, - format: [ - { - constant: '[nginx][access] ', - }, - { - field: 'nginx.access.remote_ip', - }, - { - constant: ' ', - }, - { - field: 'nginx.access.user_name', - }, - { - constant: ' "', - }, - { - field: 'nginx.access.method', - }, - { - constant: ' ', - }, - { - field: 'nginx.access.url', - }, - { - constant: ' HTTP/', - }, - { - field: 'nginx.access.http_version', - }, - { - constant: '" ', - }, - { - field: 'nginx.access.response_code', - }, - { - constant: ' ', - }, - { - field: 'nginx.access.body_sent.bytes', - }, - ], - }, - { - // ECS - when: { - values: { - 'event.dataset': 'nginx.error', - }, - }, - format: [ - { - constant: '[nginx]', - }, - { - constant: '[', - }, - { - field: 'log.level', - }, - { - constant: '] ', - }, - { - field: 'message', - }, - ], - }, - { - // pre-ECS - when: { - exists: ['nginx.error.message'], - }, - format: [ - { - constant: '[nginx]', - }, - { - constant: '[', - }, - { - field: 'nginx.error.level', - }, - { - constant: '] ', - }, - { - field: 'nginx.error.message', - }, - ], - }, -]; diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_osquery.test.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_osquery.test.ts deleted file mode 100644 index 8dc70053e2022..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_osquery.test.ts +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { compileFormattingRules } from '../message'; -import { filebeatOsqueryRules } from './filebeat_osquery'; - -const { format } = compileFormattingRules(filebeatOsqueryRules); - -describe('Filebeat Rules', () => { - describe('in pre-ECS format', () => { - test('osquery result log', () => { - const flattenedDocument = { - '@timestamp': '2017-12-28T14:40:08.000Z', - 'event.dataset': 'osquery.result', - 'fileset.module': 'osquery', - 'fileset.name': 'result', - 'input.type': 'log', - offset: 0, - 'osquery.result.action': 'removed', - 'osquery.result.calendar_time': 'Thu Dec 28 14:40:08 2017 UTC', - 'osquery.result.columns': { - blocks: '122061322', - blocks_available: '75966945', - blocks_free: '121274885', - blocks_size: '4096', - device: '/dev/disk1s4', - device_alias: '/dev/disk1s4', - flags: '345018372', - inodes: '9223372036854775807', - inodes_free: '9223372036854775804', - path: '/private/var/vm', - type: 'apfs', - }, - 'osquery.result.counter': '1', - 'osquery.result.decorations.host_uuid': '4AB2906D-5516-5794-AF54-86D1D7F533F3', - 'osquery.result.decorations.username': 'tsg', - 'osquery.result.epoch': '0', - 'osquery.result.host_identifier': '192-168-0-4.rdsnet.ro', - 'osquery.result.name': 'pack_it-compliance_mounts', - 'osquery.result.unix_time': '1514472008', - 'prospector.type': 'log', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[Osquery][", - }, - Object { - "field": "osquery.result.action", - "highlights": Array [], - "value": "removed", - }, - Object { - "constant": "] ", - }, - Object { - "field": "osquery.result.host_identifier", - "highlights": Array [], - "value": "192-168-0-4.rdsnet.ro", - }, - Object { - "constant": " ", - }, - Object { - "field": "osquery.result.columns", - "highlights": Array [], - "value": "{\\"blocks\\":\\"122061322\\",\\"blocks_available\\":\\"75966945\\",\\"blocks_free\\":\\"121274885\\",\\"blocks_size\\":\\"4096\\",\\"device\\":\\"/dev/disk1s4\\",\\"device_alias\\":\\"/dev/disk1s4\\",\\"flags\\":\\"345018372\\",\\"inodes\\":\\"9223372036854775807\\",\\"inodes_free\\":\\"9223372036854775804\\",\\"path\\":\\"/private/var/vm\\",\\"type\\":\\"apfs\\"}", - }, -] -`); - }); - }); -}); diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_osquery.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_osquery.ts deleted file mode 100644 index b3a6ee8c5cb47..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_osquery.ts +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export const filebeatOsqueryRules = [ - { - // pre-ECS - when: { - exists: ['osquery.result.name'], - }, - format: [ - { - constant: '[Osquery][', - }, - { - field: 'osquery.result.action', - }, - { - constant: '] ', - }, - { - field: 'osquery.result.host_identifier', - }, - { - constant: ' ', - }, - { - field: 'osquery.result.columns', - }, - ], - }, -]; diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_redis.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_redis.ts deleted file mode 100644 index 788c65f92c4b4..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_redis.ts +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export const filebeatRedisRules = [ - { - when: { - exists: ['redis.log.message'], - }, - format: [ - { - constant: '[Redis]', - }, - { - constant: '[', - }, - { - field: 'redis.log.level', - }, - { - constant: '] ', - }, - { - field: 'redis.log.message', - }, - ], - }, -]; diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_system.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_system.ts deleted file mode 100644 index cb695abcccdc8..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_system.ts +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export const filebeatSystemRules = [ - { - when: { - exists: ['system.syslog.message'], - }, - format: [ - { - constant: '[System][syslog] ', - }, - { - field: 'system.syslog.program', - }, - { - constant: ' - ', - }, - { - field: 'system.syslog.message', - }, - ], - }, - { - when: { - exists: ['system.auth.message'], - }, - format: [ - { - constant: '[System][auth] ', - }, - { - field: 'system.auth.program', - }, - { - constant: ' - ', - }, - { - field: 'system.auth.message', - }, - ], - }, - { - when: { - exists: ['system.auth.ssh.event'], - }, - format: [ - { - constant: '[System][auth][ssh]', - }, - { - constant: ' ', - }, - { - field: 'system.auth.ssh.event', - }, - { - constant: ' user ', - }, - { - field: 'system.auth.user', - }, - { - constant: ' from ', - }, - { - field: 'system.auth.ssh.ip', - }, - ], - }, - { - when: { - exists: ['system.auth.ssh.dropped_ip'], - }, - format: [ - { - constant: '[System][auth][ssh]', - }, - { - constant: ' Dropped connection from ', - }, - { - field: 'system.auth.ssh.dropped_ip', - }, - ], - }, -]; diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_traefik.test.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_traefik.test.ts deleted file mode 100644 index b19124558fdd0..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_traefik.test.ts +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { compileFormattingRules } from '../message'; -import { filebeatTraefikRules } from './filebeat_traefik'; - -const { format } = compileFormattingRules(filebeatTraefikRules); - -describe('Filebeat Rules', () => { - describe('in pre-ECS format', () => { - test('traefik access log', () => { - const flattenedDocument = { - '@timestamp': '2017-10-02T20:22:08.000Z', - 'event.dataset': 'traefik.access', - 'fileset.module': 'traefik', - 'fileset.name': 'access', - 'input.type': 'log', - offset: 280, - 'prospector.type': 'log', - 'traefik.access.backend_url': 'http://172.19.0.3:5601', - 'traefik.access.body_sent.bytes': 0, - 'traefik.access.duration': 3, - 'traefik.access.frontend_name': 'Host-host1', - 'traefik.access.geoip.city_name': 'Berlin', - 'traefik.access.geoip.continent_name': 'Europe', - 'traefik.access.geoip.country_iso_code': 'DE', - 'traefik.access.geoip.location.lat': 52.4908, - 'traefik.access.geoip.location.lon': 13.3275, - 'traefik.access.geoip.region_iso_code': 'DE-BE', - 'traefik.access.geoip.region_name': 'Land Berlin', - 'traefik.access.http_version': '1.1', - 'traefik.access.method': 'GET', - 'traefik.access.referrer': 'http://example.com/login', - 'traefik.access.remote_ip': '85.181.35.98', - 'traefik.access.request_count': 271, - 'traefik.access.response_code': '304', - 'traefik.access.url': '/ui/favicons/favicon.ico', - 'traefik.access.user_agent.device': 'Other', - 'traefik.access.user_agent.major': '61', - 'traefik.access.user_agent.minor': '0', - 'traefik.access.user_agent.name': 'Chrome', - 'traefik.access.user_agent.original': - 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36', - 'traefik.access.user_agent.os': 'Linux', - 'traefik.access.user_agent.os_name': 'Linux', - 'traefik.access.user_agent.patch': '3163', - 'traefik.access.user_identifier': '-', - 'traefik.access.user_name': '-', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[traefik][access] ", - }, - Object { - "field": "traefik.access.remote_ip", - "highlights": Array [], - "value": "85.181.35.98", - }, - Object { - "constant": " ", - }, - Object { - "field": "traefik.access.frontend_name", - "highlights": Array [], - "value": "Host-host1", - }, - Object { - "constant": " -> ", - }, - Object { - "field": "traefik.access.backend_url", - "highlights": Array [], - "value": "http://172.19.0.3:5601", - }, - Object { - "constant": " \\"", - }, - Object { - "field": "traefik.access.method", - "highlights": Array [], - "value": "GET", - }, - Object { - "constant": " ", - }, - Object { - "field": "traefik.access.url", - "highlights": Array [], - "value": "/ui/favicons/favicon.ico", - }, - Object { - "constant": " HTTP/", - }, - Object { - "field": "traefik.access.http_version", - "highlights": Array [], - "value": "1.1", - }, - Object { - "constant": "\\" ", - }, - Object { - "field": "traefik.access.response_code", - "highlights": Array [], - "value": "304", - }, - Object { - "constant": " ", - }, - Object { - "field": "traefik.access.body_sent.bytes", - "highlights": Array [], - "value": "0", - }, -] -`); - }); - }); -}); diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_traefik.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_traefik.ts deleted file mode 100644 index e62c688b9c22f..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/filebeat_traefik.ts +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export const filebeatTraefikRules = [ - { - // pre-ECS - when: { - exists: ['traefik.access.method'], - }, - format: [ - { - constant: '[traefik][access] ', - }, - { - field: 'traefik.access.remote_ip', - }, - { - constant: ' ', - }, - { - field: 'traefik.access.frontend_name', - }, - { - constant: ' -> ', - }, - { - field: 'traefik.access.backend_url', - }, - { - constant: ' "', - }, - { - field: 'traefik.access.method', - }, - { - constant: ' ', - }, - { - field: 'traefik.access.url', - }, - { - constant: ' HTTP/', - }, - { - field: 'traefik.access.http_version', - }, - { - constant: '" ', - }, - { - field: 'traefik.access.response_code', - }, - { - constant: ' ', - }, - { - field: 'traefik.access.body_sent.bytes', - }, - ], - }, -]; diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic.test.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic.test.ts deleted file mode 100644 index d168273626cfa..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic.test.ts +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { getBuiltinRules } from '.'; -import { compileFormattingRules } from '../message'; - -const { format } = compileFormattingRules( - getBuiltinRules(['first_generic_message', 'second_generic_message']) -); - -describe('Generic Rules', () => { - describe('configurable message rules', () => { - test('includes the event.dataset and log.level if present', () => { - const flattenedDocument = { - '@timestamp': '2016-12-26T16:22:13.000Z', - 'event.dataset': 'generic.test', - 'log.level': 'TEST_LEVEL', - first_generic_message: 'TEST_MESSAGE', - }; - const highlights = { - first_generic_message: ['TEST'], - }; - - expect(format(flattenedDocument, highlights)).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[", - }, - Object { - "field": "event.dataset", - "highlights": Array [], - "value": "generic.test", - }, - Object { - "constant": "][", - }, - Object { - "field": "log.level", - "highlights": Array [], - "value": "TEST_LEVEL", - }, - Object { - "constant": "] ", - }, - Object { - "field": "first_generic_message", - "highlights": Array [ - "TEST", - ], - "value": "TEST_MESSAGE", - }, -] -`); - }); - - test('includes the log.level if present', () => { - const flattenedDocument = { - '@timestamp': '2016-12-26T16:22:13.000Z', - 'log.level': 'TEST_LEVEL', - first_generic_message: 'TEST_MESSAGE', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[", - }, - Object { - "field": "log.level", - "highlights": Array [], - "value": "TEST_LEVEL", - }, - Object { - "constant": "] ", - }, - Object { - "field": "first_generic_message", - "highlights": Array [], - "value": "TEST_MESSAGE", - }, -] -`); - }); - - test('includes the message', () => { - const firstFlattenedDocument = { - '@timestamp': '2016-12-26T16:22:13.000Z', - first_generic_message: 'FIRST_TEST_MESSAGE', - }; - - expect(format(firstFlattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "field": "first_generic_message", - "highlights": Array [], - "value": "FIRST_TEST_MESSAGE", - }, -] -`); - - const secondFlattenedDocument = { - '@timestamp': '2016-12-26T16:22:13.000Z', - second_generic_message: 'SECOND_TEST_MESSAGE', - }; - - expect(format(secondFlattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "field": "second_generic_message", - "highlights": Array [], - "value": "SECOND_TEST_MESSAGE", - }, -] -`); - }); - }); - - describe('log.original fallback', () => { - test('includes the event.dataset if present', () => { - const flattenedDocument = { - '@timestamp': '2016-12-26T16:22:13.000Z', - 'event.dataset': 'generic.test', - 'log.original': 'TEST_MESSAGE', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "constant": "[", - }, - Object { - "field": "event.dataset", - "highlights": Array [], - "value": "generic.test", - }, - Object { - "constant": "] ", - }, - Object { - "field": "log.original", - "highlights": Array [], - "value": "TEST_MESSAGE", - }, -] -`); - }); - - test('includes the original message', () => { - const flattenedDocument = { - '@timestamp': '2016-12-26T16:22:13.000Z', - 'log.original': 'TEST_MESSAGE', - }; - - expect(format(flattenedDocument, {})).toMatchInlineSnapshot(` -Array [ - Object { - "field": "log.original", - "highlights": Array [], - "value": "TEST_MESSAGE", - }, -] -`); - }); - }); -}); diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic.ts deleted file mode 100644 index 941cfc72afce6..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic.ts +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { LogMessageFormattingRule } from '../rule_types'; - -const BUILTIN_GENERIC_MESSAGE_FIELDS = ['message', '@message']; - -export const getGenericRules = (genericMessageFields: string[]) => [ - ...Array.from(new Set([...genericMessageFields, ...BUILTIN_GENERIC_MESSAGE_FIELDS])).reduce< - LogMessageFormattingRule[] - >((genericRules, fieldName) => [...genericRules, ...createGenericRulesForField(fieldName)], []), - { - when: { - exists: ['event.dataset', 'log.original'], - }, - format: [ - { - constant: '[', - }, - { - field: 'event.dataset', - }, - { - constant: '] ', - }, - { - field: 'log.original', - }, - ], - }, - { - when: { - exists: ['log.original'], - }, - format: [ - { - field: 'log.original', - }, - ], - }, -]; - -const createGenericRulesForField = (fieldName: string) => [ - { - when: { - exists: ['event.dataset', 'log.level', fieldName], - }, - format: [ - { - constant: '[', - }, - { - field: 'event.dataset', - }, - { - constant: '][', - }, - { - field: 'log.level', - }, - { - constant: '] ', - }, - { - field: fieldName, - }, - ], - }, - { - when: { - exists: ['log.level', fieldName], - }, - format: [ - { - constant: '[', - }, - { - field: 'log.level', - }, - { - constant: '] ', - }, - { - field: fieldName, - }, - ], - }, - { - when: { - exists: [fieldName], - }, - format: [ - { - field: fieldName, - }, - ], - }, -]; diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic_webserver.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic_webserver.ts deleted file mode 100644 index 50f38ad0515b2..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/generic_webserver.ts +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -const commonPrefixFields = [ - { constant: '[' }, - { field: 'event.module' }, - { constant: '][access] ' }, -]; - -export const genericWebserverRules = [ - { - // ECS with parsed url - when: { - exists: ['ecs.version', 'http.response.status_code', 'url.path'], - }, - format: [ - ...commonPrefixFields, - { - field: 'source.ip', - }, - { - constant: ' ', - }, - { - field: 'user.name', - }, - { - constant: ' "', - }, - { - field: 'http.request.method', - }, - { - constant: ' ', - }, - { - field: 'url.path', - }, - { - constant: '?', - }, - { - field: 'url.query', - }, - { - constant: ' HTTP/', - }, - { - field: 'http.version', - }, - { - constant: '" ', - }, - { - field: 'http.response.status_code', - }, - { - constant: ' ', - }, - { - field: 'http.response.body.bytes', - }, - ], - }, - { - // ECS with original url - when: { - exists: ['ecs.version', 'http.response.status_code'], - }, - format: [ - ...commonPrefixFields, - { - field: 'source.ip', - }, - { - constant: ' ', - }, - { - field: 'user.name', - }, - { - constant: ' "', - }, - { - field: 'http.request.method', - }, - { - constant: ' ', - }, - { - field: 'url.original', - }, - { - constant: ' HTTP/', - }, - { - field: 'http.version', - }, - { - constant: '" ', - }, - { - field: 'http.response.status_code', - }, - { - constant: ' ', - }, - { - field: 'http.response.body.bytes', - }, - ], - }, -]; diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/helpers.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/helpers.ts deleted file mode 100644 index 9a6fa30e17e89..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/helpers.ts +++ /dev/null @@ -1,12 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export const labelField = (label: string, field: string) => [ - { constant: ' ' }, - { constant: label }, - { constant: '=' }, - { field }, -]; diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/index.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/index.ts deleted file mode 100644 index 3f4d7eb901212..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/builtin_rules/index.ts +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { filebeatApache2Rules } from './filebeat_apache2'; -import { filebeatAuditdRules } from './filebeat_auditd'; -import { filebeatHaproxyRules } from './filebeat_haproxy'; -import { filebeatIcingaRules } from './filebeat_icinga'; -import { filebeatIisRules } from './filebeat_iis'; -import { filebeatLogstashRules } from './filebeat_logstash'; -import { filebeatMongodbRules } from './filebeat_mongodb'; -import { filebeatMySQLRules } from './filebeat_mysql'; -import { filebeatNginxRules } from './filebeat_nginx'; -import { filebeatOsqueryRules } from './filebeat_osquery'; -import { filebeatRedisRules } from './filebeat_redis'; -import { filebeatSystemRules } from './filebeat_system'; -import { filebeatTraefikRules } from './filebeat_traefik'; - -import { getGenericRules } from './generic'; -import { genericWebserverRules } from './generic_webserver'; - -export const getBuiltinRules = (genericMessageFields: string[]) => [ - ...filebeatApache2Rules, - ...filebeatNginxRules, - ...filebeatRedisRules, - ...filebeatSystemRules, - ...filebeatMySQLRules, - ...filebeatAuditdRules, - ...filebeatHaproxyRules, - ...filebeatIcingaRules, - ...filebeatIisRules, - ...filebeatLogstashRules, - ...filebeatMongodbRules, - ...filebeatOsqueryRules, - ...filebeatTraefikRules, - ...genericWebserverRules, - ...getGenericRules(genericMessageFields), - { - when: { - exists: ['log.path'], - }, - format: [ - { - constant: 'failed to format message from ', - }, - { - field: 'log.path', - }, - ], - }, - { - when: { - exists: [], - }, - format: [ - { - constant: 'failed to find message', - }, - ], - }, -]; diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/convert_document_source_to_log_item_fields.test.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/convert_document_source_to_log_item_fields.test.ts deleted file mode 100644 index 98d1e2cd89b01..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/convert_document_source_to_log_item_fields.test.ts +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { convertDocumentSourceToLogItemFields } from './convert_document_source_to_log_item_fields'; - -describe('convertDocumentSourceToLogItemFields', () => { - test('should convert document', () => { - const doc = { - agent: { - hostname: 'demo-stack-client-01', - id: '7adef8b6-2ab7-45cd-a0d5-b3baad735f1b', - type: 'filebeat', - ephemeral_id: 'a0c8164b-3564-4e32-b0bf-f4db5a7ae566', - version: '7.0.0', - }, - tags: ['prod', 'web'], - metadata: [ - { key: 'env', value: 'prod' }, - { key: 'stack', value: 'web' }, - ], - host: { - hostname: 'packer-virtualbox-iso-1546820004', - name: 'demo-stack-client-01', - }, - }; - - const fields = convertDocumentSourceToLogItemFields(doc); - expect(fields).toEqual([ - { - field: 'agent.hostname', - value: 'demo-stack-client-01', - }, - { - field: 'agent.id', - value: '7adef8b6-2ab7-45cd-a0d5-b3baad735f1b', - }, - { - field: 'agent.type', - value: 'filebeat', - }, - { - field: 'agent.ephemeral_id', - value: 'a0c8164b-3564-4e32-b0bf-f4db5a7ae566', - }, - { - field: 'agent.version', - value: '7.0.0', - }, - { - field: 'tags', - value: '["prod","web"]', - }, - { - field: 'metadata', - value: '[{"key":"env","value":"prod"},{"key":"stack","value":"web"}]', - }, - { - field: 'host.hostname', - value: 'packer-virtualbox-iso-1546820004', - }, - { - field: 'host.name', - value: 'demo-stack-client-01', - }, - ]); - }); -}); diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/convert_document_source_to_log_item_fields.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/convert_document_source_to_log_item_fields.ts deleted file mode 100644 index 099e7c3b5038c..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/convert_document_source_to_log_item_fields.ts +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import stringify from 'json-stable-stringify'; -import { isArray, isPlainObject } from 'lodash'; - -import { JsonObject } from '../../../../common/typed_json'; -import { LogEntriesItemField } from '../../../../common/http_api'; - -const isJsonObject = (subject: any): subject is JsonObject => { - return isPlainObject(subject); -}; - -const serializeValue = (value: any): string => { - if (isArray(value) || isPlainObject(value)) { - return stringify(value); - } - return `${value}`; -}; - -export const convertDocumentSourceToLogItemFields = ( - source: JsonObject, - path: string[] = [], - fields: LogEntriesItemField[] = [] -): LogEntriesItemField[] => { - return Object.keys(source).reduce((acc, key) => { - const value = source[key]; - const nextPath = [...path, key]; - if (isJsonObject(value)) { - return convertDocumentSourceToLogItemFields(value, nextPath, acc); - } - const field = { field: nextPath.join('.'), value: serializeValue(value) }; - return [...acc, field]; - }, fields); -}; diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/index.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/index.ts deleted file mode 100644 index 2cb8140febdcd..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/index.ts +++ /dev/null @@ -1,7 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export * from './log_entries_domain'; diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/log_entries_domain.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/log_entries_domain.ts deleted file mode 100644 index 347f0dcf795bc..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/log_entries_domain.ts +++ /dev/null @@ -1,436 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import stringify from 'json-stable-stringify'; -import { sortBy } from 'lodash'; - -import { RequestHandlerContext } from 'src/core/server'; -import { TimeKey } from '../../../../common/time'; -import { JsonObject } from '../../../../common/typed_json'; -import { - LogEntriesSummaryBucket, - LogEntriesSummaryHighlightsBucket, - LogEntriesItem, -} from '../../../../common/http_api'; -import { InfraLogEntry, InfraLogMessageSegment } from '../../../graphql/types'; -import { - InfraSourceConfiguration, - InfraSources, - SavedSourceConfigurationFieldColumnRuntimeType, - SavedSourceConfigurationMessageColumnRuntimeType, - SavedSourceConfigurationTimestampColumnRuntimeType, -} from '../../sources'; -import { getBuiltinRules } from './builtin_rules'; -import { convertDocumentSourceToLogItemFields } from './convert_document_source_to_log_item_fields'; -import { - CompiledLogMessageFormattingRule, - Fields, - Highlights, - compileFormattingRules, -} from './message'; - -export class InfraLogEntriesDomain { - constructor( - private readonly adapter: LogEntriesAdapter, - private readonly libs: { sources: InfraSources } - ) {} - - public async getLogEntriesAround( - requestContext: RequestHandlerContext, - sourceId: string, - key: TimeKey, - maxCountBefore: number, - maxCountAfter: number, - filterQuery?: LogEntryQuery, - highlightQuery?: LogEntryQuery - ): Promise<{ entriesBefore: InfraLogEntry[]; entriesAfter: InfraLogEntry[] }> { - if (maxCountBefore <= 0 && maxCountAfter <= 0) { - return { - entriesBefore: [], - entriesAfter: [], - }; - } - - const { configuration } = await this.libs.sources.getSourceConfiguration( - requestContext, - sourceId - ); - const messageFormattingRules = compileFormattingRules( - getBuiltinRules(configuration.fields.message) - ); - const requiredFields = getRequiredFields(configuration, messageFormattingRules); - - const documentsBefore = await this.adapter.getAdjacentLogEntryDocuments( - requestContext, - configuration, - requiredFields, - key, - 'desc', - Math.max(maxCountBefore, 1), - filterQuery, - highlightQuery - ); - const lastKeyBefore = - documentsBefore.length > 0 - ? documentsBefore[documentsBefore.length - 1].key - : { - time: key.time - 1, - tiebreaker: 0, - }; - - const documentsAfter = await this.adapter.getAdjacentLogEntryDocuments( - requestContext, - configuration, - requiredFields, - lastKeyBefore, - 'asc', - maxCountAfter, - filterQuery, - highlightQuery - ); - - return { - entriesBefore: (maxCountBefore > 0 ? documentsBefore : []).map( - convertLogDocumentToEntry(sourceId, configuration.logColumns, messageFormattingRules.format) - ), - entriesAfter: documentsAfter.map( - convertLogDocumentToEntry(sourceId, configuration.logColumns, messageFormattingRules.format) - ), - }; - } - - public async getLogEntriesBetween( - requestContext: RequestHandlerContext, - sourceId: string, - startKey: TimeKey, - endKey: TimeKey, - filterQuery?: LogEntryQuery, - highlightQuery?: LogEntryQuery - ): Promise { - const { configuration } = await this.libs.sources.getSourceConfiguration( - requestContext, - sourceId - ); - const messageFormattingRules = compileFormattingRules( - getBuiltinRules(configuration.fields.message) - ); - const requiredFields = getRequiredFields(configuration, messageFormattingRules); - const documents = await this.adapter.getContainedLogEntryDocuments( - requestContext, - configuration, - requiredFields, - startKey, - endKey, - filterQuery, - highlightQuery - ); - const entries = documents.map( - convertLogDocumentToEntry(sourceId, configuration.logColumns, messageFormattingRules.format) - ); - return entries; - } - - public async getLogEntryHighlights( - requestContext: RequestHandlerContext, - sourceId: string, - startKey: TimeKey, - endKey: TimeKey, - highlights: Array<{ - query: string; - countBefore: number; - countAfter: number; - }>, - filterQuery?: LogEntryQuery - ): Promise { - const { configuration } = await this.libs.sources.getSourceConfiguration( - requestContext, - sourceId - ); - const messageFormattingRules = compileFormattingRules( - getBuiltinRules(configuration.fields.message) - ); - const requiredFields = getRequiredFields(configuration, messageFormattingRules); - - const documentSets = await Promise.all( - highlights.map(async highlight => { - const highlightQuery = createHighlightQueryDsl(highlight.query, requiredFields); - const query = filterQuery - ? { - bool: { - filter: [filterQuery, highlightQuery], - }, - } - : highlightQuery; - const [documentsBefore, documents, documentsAfter] = await Promise.all([ - this.adapter.getAdjacentLogEntryDocuments( - requestContext, - configuration, - requiredFields, - startKey, - 'desc', - highlight.countBefore, - query, - highlightQuery - ), - this.adapter.getContainedLogEntryDocuments( - requestContext, - configuration, - requiredFields, - startKey, - endKey, - query, - highlightQuery - ), - this.adapter.getAdjacentLogEntryDocuments( - requestContext, - configuration, - requiredFields, - endKey, - 'asc', - highlight.countAfter, - query, - highlightQuery - ), - ]); - const entries = [...documentsBefore, ...documents, ...documentsAfter].map( - convertLogDocumentToEntry( - sourceId, - configuration.logColumns, - messageFormattingRules.format - ) - ); - - return entries; - }) - ); - - return documentSets; - } - - public async getLogSummaryBucketsBetween( - requestContext: RequestHandlerContext, - sourceId: string, - start: number, - end: number, - bucketSize: number, - filterQuery?: LogEntryQuery - ): Promise { - const { configuration } = await this.libs.sources.getSourceConfiguration( - requestContext, - sourceId - ); - const dateRangeBuckets = await this.adapter.getContainedLogSummaryBuckets( - requestContext, - configuration, - start, - end, - bucketSize, - filterQuery - ); - return dateRangeBuckets; - } - - public async getLogSummaryHighlightBucketsBetween( - requestContext: RequestHandlerContext, - sourceId: string, - start: number, - end: number, - bucketSize: number, - highlightQueries: string[], - filterQuery?: LogEntryQuery - ): Promise { - const { configuration } = await this.libs.sources.getSourceConfiguration( - requestContext, - sourceId - ); - const messageFormattingRules = compileFormattingRules( - getBuiltinRules(configuration.fields.message) - ); - const requiredFields = getRequiredFields(configuration, messageFormattingRules); - - const summaries = await Promise.all( - highlightQueries.map(async highlightQueryPhrase => { - const highlightQuery = createHighlightQueryDsl(highlightQueryPhrase, requiredFields); - const query = filterQuery - ? { - bool: { - must: [filterQuery, highlightQuery], - }, - } - : highlightQuery; - const summaryBuckets = await this.adapter.getContainedLogSummaryBuckets( - requestContext, - configuration, - start, - end, - bucketSize, - query - ); - const summaryHighlightBuckets = summaryBuckets - .filter(logSummaryBucketHasEntries) - .map(convertLogSummaryBucketToSummaryHighlightBucket); - return summaryHighlightBuckets; - }) - ); - - return summaries; - } - - public async getLogItem( - requestContext: RequestHandlerContext, - id: string, - sourceConfiguration: InfraSourceConfiguration - ): Promise { - const document = await this.adapter.getLogItem(requestContext, id, sourceConfiguration); - const defaultFields = [ - { field: '_index', value: document._index }, - { field: '_id', value: document._id }, - ]; - - return { - id: document._id, - index: document._index, - key: { - time: document.sort[0], - tiebreaker: document.sort[1], - }, - fields: sortBy( - [...defaultFields, ...convertDocumentSourceToLogItemFields(document._source)], - 'field' - ), - }; - } -} - -interface LogItemHit { - _index: string; - _id: string; - _source: JsonObject; - sort: [number, number]; -} - -export interface LogEntriesAdapter { - getAdjacentLogEntryDocuments( - requestContext: RequestHandlerContext, - sourceConfiguration: InfraSourceConfiguration, - fields: string[], - start: TimeKey, - direction: 'asc' | 'desc', - maxCount: number, - filterQuery?: LogEntryQuery, - highlightQuery?: LogEntryQuery - ): Promise; - - getContainedLogEntryDocuments( - requestContext: RequestHandlerContext, - sourceConfiguration: InfraSourceConfiguration, - fields: string[], - start: TimeKey, - end: TimeKey, - filterQuery?: LogEntryQuery, - highlightQuery?: LogEntryQuery - ): Promise; - - getContainedLogSummaryBuckets( - requestContext: RequestHandlerContext, - sourceConfiguration: InfraSourceConfiguration, - start: number, - end: number, - bucketSize: number, - filterQuery?: LogEntryQuery - ): Promise; - - getLogItem( - requestContext: RequestHandlerContext, - id: string, - source: InfraSourceConfiguration - ): Promise; -} - -export type LogEntryQuery = JsonObject; - -export interface LogEntryDocument { - fields: Fields; - gid: string; - highlights: Highlights; - key: TimeKey; -} - -export interface LogSummaryBucket { - entriesCount: number; - start: number; - end: number; - topEntryKeys: TimeKey[]; -} - -const convertLogDocumentToEntry = ( - sourceId: string, - logColumns: InfraSourceConfiguration['logColumns'], - formatLogMessage: (fields: Fields, highlights: Highlights) => InfraLogMessageSegment[] -) => (document: LogEntryDocument): InfraLogEntry => ({ - key: document.key, - gid: document.gid, - source: sourceId, - columns: logColumns.map(logColumn => { - if (SavedSourceConfigurationTimestampColumnRuntimeType.is(logColumn)) { - return { - columnId: logColumn.timestampColumn.id, - timestamp: document.key.time, - }; - } else if (SavedSourceConfigurationMessageColumnRuntimeType.is(logColumn)) { - return { - columnId: logColumn.messageColumn.id, - message: formatLogMessage(document.fields, document.highlights), - }; - } else { - return { - columnId: logColumn.fieldColumn.id, - field: logColumn.fieldColumn.field, - highlights: document.highlights[logColumn.fieldColumn.field] || [], - value: stringify(document.fields[logColumn.fieldColumn.field] || null), - }; - } - }), -}); - -const logSummaryBucketHasEntries = (bucket: LogSummaryBucket) => - bucket.entriesCount > 0 && bucket.topEntryKeys.length > 0; - -const convertLogSummaryBucketToSummaryHighlightBucket = ( - bucket: LogSummaryBucket -): LogEntriesSummaryHighlightsBucket => ({ - entriesCount: bucket.entriesCount, - start: bucket.start, - end: bucket.end, - representativeKey: bucket.topEntryKeys[0], -}); - -const getRequiredFields = ( - configuration: InfraSourceConfiguration, - messageFormattingRules: CompiledLogMessageFormattingRule -): string[] => { - const fieldsFromCustomColumns = configuration.logColumns.reduce( - (accumulatedFields, logColumn) => { - if (SavedSourceConfigurationFieldColumnRuntimeType.is(logColumn)) { - return [...accumulatedFields, logColumn.fieldColumn.field]; - } - return accumulatedFields; - }, - [] - ); - const fieldsFromFormattingRules = messageFormattingRules.requiredFields; - - return Array.from(new Set([...fieldsFromCustomColumns, ...fieldsFromFormattingRules])); -}; - -const createHighlightQueryDsl = (phrase: string, fields: string[]) => ({ - multi_match: { - fields, - lenient: true, - query: phrase, - type: 'phrase', - }, -}); diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/message.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/message.ts deleted file mode 100644 index 58cffc7584979..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/message.ts +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import stringify from 'json-stable-stringify'; - -import { InfraLogMessageSegment } from '../../../graphql/types'; -import { - LogMessageFormattingCondition, - LogMessageFormattingInstruction, - LogMessageFormattingRule, -} from './rule_types'; - -export function compileFormattingRules( - rules: LogMessageFormattingRule[] -): CompiledLogMessageFormattingRule { - const compiledRules = rules.map(compileRule); - - return { - requiredFields: Array.from( - new Set( - compiledRules.reduce( - (combinedRequiredFields, { requiredFields }) => [ - ...combinedRequiredFields, - ...requiredFields, - ], - [] as string[] - ) - ) - ), - format(fields, highlights): InfraLogMessageSegment[] { - for (const compiledRule of compiledRules) { - if (compiledRule.fulfillsCondition(fields)) { - return compiledRule.format(fields, highlights); - } - } - - return []; - }, - fulfillsCondition() { - return true; - }, - }; -} - -const compileRule = (rule: LogMessageFormattingRule): CompiledLogMessageFormattingRule => { - const { conditionFields, fulfillsCondition } = compileCondition(rule.when); - const { formattingFields, format } = compileFormattingInstructions(rule.format); - - return { - requiredFields: [...conditionFields, ...formattingFields], - fulfillsCondition, - format, - }; -}; - -const compileCondition = ( - condition: LogMessageFormattingCondition -): CompiledLogMessageFormattingCondition => - [compileExistsCondition, compileFieldValueCondition].reduce( - (compiledCondition, compile) => compile(condition) || compiledCondition, - catchAllCondition - ); - -const catchAllCondition: CompiledLogMessageFormattingCondition = { - conditionFields: [] as string[], - fulfillsCondition: () => false, -}; - -const compileExistsCondition = (condition: LogMessageFormattingCondition) => - 'exists' in condition - ? { - conditionFields: condition.exists, - fulfillsCondition: (fields: Fields) => - condition.exists.every(fieldName => fieldName in fields), - } - : null; - -const compileFieldValueCondition = (condition: LogMessageFormattingCondition) => - 'values' in condition - ? { - conditionFields: Object.keys(condition.values), - fulfillsCondition: (fields: Fields) => - Object.entries(condition.values).every( - ([fieldName, expectedValue]) => fields[fieldName] === expectedValue - ), - } - : null; - -const compileFormattingInstructions = ( - formattingInstructions: LogMessageFormattingInstruction[] -): CompiledLogMessageFormattingInstruction => - formattingInstructions.reduce( - (combinedFormattingInstructions, formattingInstruction) => { - const compiledFormattingInstruction = compileFormattingInstruction(formattingInstruction); - - return { - formattingFields: [ - ...combinedFormattingInstructions.formattingFields, - ...compiledFormattingInstruction.formattingFields, - ], - format: (fields: Fields, highlights: Highlights) => [ - ...combinedFormattingInstructions.format(fields, highlights), - ...compiledFormattingInstruction.format(fields, highlights), - ], - }; - }, - { - formattingFields: [], - format: () => [], - } as CompiledLogMessageFormattingInstruction - ); - -const compileFormattingInstruction = ( - formattingInstruction: LogMessageFormattingInstruction -): CompiledLogMessageFormattingInstruction => - [compileFieldReferenceFormattingInstruction, compileConstantFormattingInstruction].reduce( - (compiledFormattingInstruction, compile) => - compile(formattingInstruction) || compiledFormattingInstruction, - catchAllFormattingInstruction - ); - -const catchAllFormattingInstruction: CompiledLogMessageFormattingInstruction = { - formattingFields: [], - format: () => [ - { - constant: 'invalid format', - }, - ], -}; - -const compileFieldReferenceFormattingInstruction = ( - formattingInstruction: LogMessageFormattingInstruction -): CompiledLogMessageFormattingInstruction | null => - 'field' in formattingInstruction - ? { - formattingFields: [formattingInstruction.field], - format: (fields, highlights) => { - const value = fields[formattingInstruction.field]; - const highlightedValues = highlights[formattingInstruction.field]; - return [ - { - field: formattingInstruction.field, - value: typeof value === 'object' ? stringify(value) : `${value}`, - highlights: highlightedValues || [], - }, - ]; - }, - } - : null; - -const compileConstantFormattingInstruction = ( - formattingInstruction: LogMessageFormattingInstruction -): CompiledLogMessageFormattingInstruction | null => - 'constant' in formattingInstruction - ? { - formattingFields: [] as string[], - format: () => [ - { - constant: formattingInstruction.constant, - }, - ], - } - : null; - -export interface Fields { - [fieldName: string]: string | number | object | boolean | null; -} - -export interface Highlights { - [fieldName: string]: string[]; -} - -export interface CompiledLogMessageFormattingRule { - requiredFields: string[]; - fulfillsCondition(fields: Fields): boolean; - format(fields: Fields, highlights: Highlights): InfraLogMessageSegment[]; -} - -export interface CompiledLogMessageFormattingCondition { - conditionFields: string[]; - fulfillsCondition(fields: Fields): boolean; -} - -export interface CompiledLogMessageFormattingInstruction { - formattingFields: string[]; - format(fields: Fields, highlights: Highlights): InfraLogMessageSegment[]; -} diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/rule_types.ts b/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/rule_types.ts deleted file mode 100644 index 6107fc362f8e3..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/log_entries_domain/rule_types.ts +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export interface LogMessageFormattingRule { - when: LogMessageFormattingCondition; - format: LogMessageFormattingInstruction[]; -} - -export type LogMessageFormattingCondition = - | LogMessageFormattingExistsCondition - | LogMessageFormattingFieldValueCondition; - -export interface LogMessageFormattingExistsCondition { - exists: string[]; -} - -export interface LogMessageFormattingFieldValueCondition { - values: { - [fieldName: string]: string | number | boolean | null; - }; -} - -export type LogMessageFormattingInstruction = - | LogMessageFormattingFieldReference - | LogMessageFormattingConstant; - -export interface LogMessageFormattingFieldReference { - field: string; -} - -export interface LogMessageFormattingConstant { - constant: string; -} diff --git a/x-pack/legacy/plugins/infra/server/lib/domains/metrics_domain.ts b/x-pack/legacy/plugins/infra/server/lib/domains/metrics_domain.ts deleted file mode 100644 index e53e45afae5c4..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/domains/metrics_domain.ts +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { KibanaRequest, RequestHandlerContext } from 'src/core/server'; -import { InfraMetricData } from '../../graphql/types'; -import { InfraMetricsAdapter, InfraMetricsRequestOptions } from '../adapters/metrics/adapter_types'; - -export class InfraMetricsDomain { - private adapter: InfraMetricsAdapter; - - constructor(adapter: InfraMetricsAdapter) { - this.adapter = adapter; - } - - public async getMetrics( - requestContext: RequestHandlerContext, - options: InfraMetricsRequestOptions, - rawRequest: KibanaRequest - ): Promise { - return await this.adapter.getMetrics(requestContext, options, rawRequest); - } -} diff --git a/x-pack/legacy/plugins/infra/server/lib/infra_types.ts b/x-pack/legacy/plugins/infra/server/lib/infra_types.ts deleted file mode 100644 index 46d32885600df..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/infra_types.ts +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { InfraSourceConfiguration } from '../../public/graphql/types'; -import { InfraFieldsDomain } from './domains/fields_domain'; -import { InfraLogEntriesDomain } from './domains/log_entries_domain'; -import { InfraMetricsDomain } from './domains/metrics_domain'; -import { InfraLogAnalysis } from './log_analysis/log_analysis'; -import { InfraSnapshot } from './snapshot'; -import { InfraSources } from './sources'; -import { InfraSourceStatus } from './source_status'; -import { InfraConfig } from '../../../../../plugins/infra/server'; -import { KibanaFramework } from './adapters/framework/kibana_framework_adapter'; - -// NP_TODO: We shouldn't need this context anymore but I am -// not sure how the graphql stuff uses it, so we can't remove it yet -export interface InfraContext { - req: any; - rawReq?: any; -} - -export interface InfraDomainLibs { - fields: InfraFieldsDomain; - logEntries: InfraLogEntriesDomain; - metrics: InfraMetricsDomain; -} - -export interface InfraBackendLibs extends InfraDomainLibs { - configuration: InfraConfig; - framework: KibanaFramework; - logAnalysis: InfraLogAnalysis; - snapshot: InfraSnapshot; - sources: InfraSources; - sourceStatus: InfraSourceStatus; -} - -export interface InfraConfiguration { - enabled: boolean; - query: { - partitionSize: number; - partitionFactor: number; - }; - sources: { - default: InfraSourceConfiguration; - }; -} diff --git a/x-pack/legacy/plugins/infra/server/lib/log_analysis/errors.ts b/x-pack/legacy/plugins/infra/server/lib/log_analysis/errors.ts deleted file mode 100644 index dc5c87c61fdce..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/log_analysis/errors.ts +++ /dev/null @@ -1,12 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export class NoLogRateResultsIndexError extends Error { - constructor(message?: string) { - super(message); - Object.setPrototypeOf(this, new.target.prototype); - } -} diff --git a/x-pack/legacy/plugins/infra/server/lib/log_analysis/index.ts b/x-pack/legacy/plugins/infra/server/lib/log_analysis/index.ts deleted file mode 100644 index 0b58c71c1db7b..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/log_analysis/index.ts +++ /dev/null @@ -1,8 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export * from './errors'; -export * from './log_analysis'; diff --git a/x-pack/legacy/plugins/infra/server/lib/log_analysis/log_analysis.ts b/x-pack/legacy/plugins/infra/server/lib/log_analysis/log_analysis.ts deleted file mode 100644 index fac49a7980f26..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/log_analysis/log_analysis.ts +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { pipe } from 'fp-ts/lib/pipeable'; -import { map, fold } from 'fp-ts/lib/Either'; -import { identity } from 'fp-ts/lib/function'; -import { getJobId } from '../../../common/log_analysis'; -import { throwErrors, createPlainError } from '../../../common/runtime_types'; -import { KibanaFramework } from '../adapters/framework/kibana_framework_adapter'; -import { NoLogRateResultsIndexError } from './errors'; -import { - logRateModelPlotResponseRT, - createLogEntryRateQuery, - LogRateModelPlotBucket, - CompositeTimestampPartitionKey, -} from './queries'; -import { RequestHandlerContext, KibanaRequest } from '../../../../../../../src/core/server'; - -const COMPOSITE_AGGREGATION_BATCH_SIZE = 1000; - -export class InfraLogAnalysis { - constructor( - private readonly libs: { - framework: KibanaFramework; - } - ) {} - - public getJobIds(request: KibanaRequest, sourceId: string) { - return { - logEntryRate: getJobId(this.libs.framework.getSpaceId(request), sourceId, 'log-entry-rate'), - }; - } - - public async getLogEntryRateBuckets( - requestContext: RequestHandlerContext, - sourceId: string, - startTime: number, - endTime: number, - bucketDuration: number, - request: KibanaRequest - ) { - const logRateJobId = this.getJobIds(request, sourceId).logEntryRate; - let mlModelPlotBuckets: LogRateModelPlotBucket[] = []; - let afterLatestBatchKey: CompositeTimestampPartitionKey | undefined; - - while (true) { - const mlModelPlotResponse = await this.libs.framework.callWithRequest( - requestContext, - 'search', - createLogEntryRateQuery( - logRateJobId, - startTime, - endTime, - bucketDuration, - COMPOSITE_AGGREGATION_BATCH_SIZE, - afterLatestBatchKey - ) - ); - - if (mlModelPlotResponse._shards.total === 0) { - throw new NoLogRateResultsIndexError( - `Failed to find ml result index for job ${logRateJobId}.` - ); - } - - const { after_key: afterKey, buckets: latestBatchBuckets } = pipe( - logRateModelPlotResponseRT.decode(mlModelPlotResponse), - map(response => response.aggregations.timestamp_partition_buckets), - fold(throwErrors(createPlainError), identity) - ); - - mlModelPlotBuckets = [...mlModelPlotBuckets, ...latestBatchBuckets]; - afterLatestBatchKey = afterKey; - - if (latestBatchBuckets.length < COMPOSITE_AGGREGATION_BATCH_SIZE) { - break; - } - } - - return mlModelPlotBuckets.reduce< - Array<{ - partitions: Array<{ - analysisBucketCount: number; - anomalies: Array<{ - actualLogEntryRate: number; - anomalyScore: number; - duration: number; - startTime: number; - typicalLogEntryRate: number; - }>; - averageActualLogEntryRate: number; - maximumAnomalyScore: number; - numberOfLogEntries: number; - partitionId: string; - }>; - startTime: number; - }> - >((histogramBuckets, timestampPartitionBucket) => { - const previousHistogramBucket = histogramBuckets[histogramBuckets.length - 1]; - const partition = { - analysisBucketCount: timestampPartitionBucket.filter_model_plot.doc_count, - anomalies: timestampPartitionBucket.filter_records.top_hits_record.hits.hits.map( - ({ _source: record }) => ({ - actualLogEntryRate: record.actual[0], - anomalyScore: record.record_score, - duration: record.bucket_span * 1000, - startTime: record.timestamp, - typicalLogEntryRate: record.typical[0], - }) - ), - averageActualLogEntryRate: - timestampPartitionBucket.filter_model_plot.average_actual.value || 0, - maximumAnomalyScore: - timestampPartitionBucket.filter_records.maximum_record_score.value || 0, - numberOfLogEntries: timestampPartitionBucket.filter_model_plot.sum_actual.value || 0, - partitionId: timestampPartitionBucket.key.partition, - }; - if ( - previousHistogramBucket && - previousHistogramBucket.startTime === timestampPartitionBucket.key.timestamp - ) { - return [ - ...histogramBuckets.slice(0, -1), - { - ...previousHistogramBucket, - partitions: [...previousHistogramBucket.partitions, partition], - }, - ]; - } else { - return [ - ...histogramBuckets, - { - partitions: [partition], - startTime: timestampPartitionBucket.key.timestamp, - }, - ]; - } - }, []); - } -} diff --git a/x-pack/legacy/plugins/infra/server/lib/log_analysis/queries/index.ts b/x-pack/legacy/plugins/infra/server/lib/log_analysis/queries/index.ts deleted file mode 100644 index 1749421277719..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/log_analysis/queries/index.ts +++ /dev/null @@ -1,7 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export * from './log_entry_rate'; diff --git a/x-pack/legacy/plugins/infra/server/lib/log_analysis/queries/log_entry_rate.ts b/x-pack/legacy/plugins/infra/server/lib/log_analysis/queries/log_entry_rate.ts deleted file mode 100644 index 2dd0880cbf8cb..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/log_analysis/queries/log_entry_rate.ts +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import * as rt from 'io-ts'; - -const ML_ANOMALY_INDEX_PREFIX = '.ml-anomalies-'; - -export const createLogEntryRateQuery = ( - logRateJobId: string, - startTime: number, - endTime: number, - bucketDuration: number, - size: number, - afterKey?: CompositeTimestampPartitionKey -) => ({ - allowNoIndices: true, - body: { - query: { - bool: { - filter: [ - { - range: { - timestamp: { - gte: startTime, - lt: endTime, - }, - }, - }, - { - terms: { - result_type: ['model_plot', 'record'], - }, - }, - { - term: { - detector_index: { - value: 0, - }, - }, - }, - ], - }, - }, - aggs: { - timestamp_partition_buckets: { - composite: { - after: afterKey, - size, - sources: [ - { - timestamp: { - date_histogram: { - field: 'timestamp', - fixed_interval: `${bucketDuration}ms`, - order: 'asc', - }, - }, - }, - { - partition: { - terms: { - field: 'partition_field_value', - order: 'asc', - }, - }, - }, - ], - }, - aggs: { - filter_model_plot: { - filter: { - term: { - result_type: 'model_plot', - }, - }, - aggs: { - average_actual: { - avg: { - field: 'actual', - }, - }, - sum_actual: { - sum: { - field: 'actual', - }, - }, - }, - }, - filter_records: { - filter: { - term: { - result_type: 'record', - }, - }, - aggs: { - maximum_record_score: { - max: { - field: 'record_score', - }, - }, - top_hits_record: { - top_hits: { - _source: Object.keys(logRateMlRecordRT.props), - size: 100, - sort: [ - { - timestamp: 'asc', - }, - ], - }, - }, - }, - }, - }, - }, - }, - }, - ignoreUnavailable: true, - index: `${ML_ANOMALY_INDEX_PREFIX}${logRateJobId}`, - size: 0, - trackScores: false, - trackTotalHits: false, -}); - -const logRateMlRecordRT = rt.type({ - actual: rt.array(rt.number), - bucket_span: rt.number, - record_score: rt.number, - timestamp: rt.number, - typical: rt.array(rt.number), -}); - -const metricAggregationRT = rt.type({ - value: rt.union([rt.number, rt.null]), -}); - -const compositeTimestampPartitionKeyRT = rt.type({ - partition: rt.string, - timestamp: rt.number, -}); - -export type CompositeTimestampPartitionKey = rt.TypeOf; - -export const logRateModelPlotBucketRT = rt.type({ - key: compositeTimestampPartitionKeyRT, - filter_records: rt.type({ - doc_count: rt.number, - maximum_record_score: metricAggregationRT, - top_hits_record: rt.type({ - hits: rt.type({ - hits: rt.array( - rt.type({ - _source: logRateMlRecordRT, - }) - ), - }), - }), - }), - filter_model_plot: rt.type({ - doc_count: rt.number, - average_actual: metricAggregationRT, - sum_actual: metricAggregationRT, - }), -}); - -export type LogRateModelPlotBucket = rt.TypeOf; - -export const logRateModelPlotResponseRT = rt.type({ - aggregations: rt.type({ - timestamp_partition_buckets: rt.intersection([ - rt.type({ - buckets: rt.array(logRateModelPlotBucketRT), - }), - rt.partial({ - after_key: compositeTimestampPartitionKeyRT, - }), - ]), - }), -}); diff --git a/x-pack/legacy/plugins/infra/server/lib/snapshot/constants.ts b/x-pack/legacy/plugins/infra/server/lib/snapshot/constants.ts deleted file mode 100644 index 0420878dbcf50..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/snapshot/constants.ts +++ /dev/null @@ -1,9 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -// TODO: Make SNAPSHOT_COMPOSITE_REQUEST_SIZE configurable from kibana.yml - -export const SNAPSHOT_COMPOSITE_REQUEST_SIZE = 75; diff --git a/x-pack/legacy/plugins/infra/server/lib/snapshot/create_timerange_with_interval.ts b/x-pack/legacy/plugins/infra/server/lib/snapshot/create_timerange_with_interval.ts deleted file mode 100644 index 6c27e54a78bee..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/snapshot/create_timerange_with_interval.ts +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { uniq } from 'lodash'; -import { RequestHandlerContext } from 'kibana/server'; -import { InfraSnapshotRequestOptions } from './types'; -import { InfraTimerangeInput } from '../../../public/graphql/types'; -import { getMetricsAggregations } from './query_helpers'; -import { calculateMetricInterval } from '../../utils/calculate_metric_interval'; -import { SnapshotModel, SnapshotModelMetricAggRT } from '../../../common/inventory_models/types'; -import { KibanaFramework } from '../adapters/framework/kibana_framework_adapter'; - -export const createTimeRangeWithInterval = async ( - framework: KibanaFramework, - requestContext: RequestHandlerContext, - options: InfraSnapshotRequestOptions -): Promise => { - const aggregations = getMetricsAggregations(options); - const modules = aggregationsToModules(aggregations); - const interval = - (await calculateMetricInterval( - framework, - requestContext, - { - indexPattern: options.sourceConfiguration.metricAlias, - timestampField: options.sourceConfiguration.fields.timestamp, - timerange: { from: options.timerange.from, to: options.timerange.to }, - }, - modules, - options.nodeType - )) || 60000; - return { - interval: `${interval}s`, - from: options.timerange.to - interval * 5000, // We need at least 5 buckets worth of data - to: options.timerange.to, - }; -}; - -const aggregationsToModules = (aggregations: SnapshotModel): string[] => { - return uniq( - Object.values(aggregations) - .reduce((modules, agg) => { - if (SnapshotModelMetricAggRT.is(agg)) { - return modules.concat(Object.values(agg).map(a => a?.field)); - } - return modules; - }, [] as Array) - .filter(v => v) - .map(field => - field! - .split(/\./) - .slice(0, 2) - .join('.') - ) - ) as string[]; -}; diff --git a/x-pack/legacy/plugins/infra/server/lib/snapshot/index.ts b/x-pack/legacy/plugins/infra/server/lib/snapshot/index.ts deleted file mode 100644 index 8db54da803648..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/snapshot/index.ts +++ /dev/null @@ -1,7 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export * from './snapshot'; diff --git a/x-pack/legacy/plugins/infra/server/lib/snapshot/query_helpers.ts b/x-pack/legacy/plugins/infra/server/lib/snapshot/query_helpers.ts deleted file mode 100644 index 44d32c7b915a8..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/snapshot/query_helpers.ts +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { i18n } from '@kbn/i18n'; -import { findInventoryModel, findInventoryFields } from '../../../common/inventory_models/index'; -import { InfraSnapshotRequestOptions } from './types'; -import { getIntervalInSeconds } from '../../utils/get_interval_in_seconds'; -import { SnapshotModelRT, SnapshotModel } from '../../../common/inventory_models/types'; - -interface GroupBySource { - [id: string]: { - terms: { - field: string | null | undefined; - missing_bucket?: boolean; - }; - }; -} - -export const getFieldByNodeType = (options: InfraSnapshotRequestOptions) => { - const inventoryFields = findInventoryFields(options.nodeType, options.sourceConfiguration.fields); - return inventoryFields.id; -}; - -export const getGroupedNodesSources = (options: InfraSnapshotRequestOptions) => { - const fields = findInventoryFields(options.nodeType, options.sourceConfiguration.fields); - const sources: GroupBySource[] = options.groupBy.map(gb => { - return { [`${gb.field}`]: { terms: { field: gb.field } } }; - }); - sources.push({ - id: { - terms: { field: fields.id }, - }, - }); - sources.push({ - name: { terms: { field: fields.name, missing_bucket: true } }, - }); - return sources; -}; - -export const getMetricsSources = (options: InfraSnapshotRequestOptions) => { - const fields = findInventoryFields(options.nodeType, options.sourceConfiguration.fields); - return [{ id: { terms: { field: fields.id } } }]; -}; - -export const getMetricsAggregations = (options: InfraSnapshotRequestOptions): SnapshotModel => { - const inventoryModel = findInventoryModel(options.nodeType); - const aggregation = inventoryModel.metrics.snapshot?.[options.metric.type]; - if (!SnapshotModelRT.is(aggregation)) { - throw new Error( - i18n.translate('xpack.infra.snapshot.missingSnapshotMetricError', { - defaultMessage: 'The aggregation for {metric} for {nodeType} is not available.', - values: { - nodeType: options.nodeType, - metric: options.metric.type, - }, - }) - ); - } - return aggregation; -}; - -export const getDateHistogramOffset = (options: InfraSnapshotRequestOptions): string => { - const { from, interval } = options.timerange; - const fromInSeconds = Math.floor(from / 1000); - const bucketSizeInSeconds = getIntervalInSeconds(interval); - - // negative offset to align buckets with full intervals (e.g. minutes) - const offset = (fromInSeconds % bucketSizeInSeconds) - bucketSizeInSeconds; - return `${offset}s`; -}; diff --git a/x-pack/legacy/plugins/infra/server/lib/snapshot/response_helpers.test.ts b/x-pack/legacy/plugins/infra/server/lib/snapshot/response_helpers.test.ts deleted file mode 100644 index 28146624a8a89..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/snapshot/response_helpers.test.ts +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { isIPv4, getIPFromBucket, InfraSnapshotNodeGroupByBucket } from './response_helpers'; -import { InfraNodeType } from '../../graphql/types'; - -describe('InfraOps ResponseHelpers', () => { - describe('isIPv4', () => { - it('should return true for IPv4', () => { - expect(isIPv4('192.168.2.4')).toBe(true); - }); - it('should return false for anything else', () => { - expect(isIPv4('0:0:0:0:0:0:0:1')).toBe(false); - }); - }); - - describe('getIPFromBucket', () => { - it('should return IPv4 address', () => { - const bucket: InfraSnapshotNodeGroupByBucket = { - key: { - id: 'example-01', - name: 'example-01', - }, - ip: { - hits: { - total: { value: 1 }, - hits: [ - { - _index: 'metricbeat-2019-01-01', - _type: '_doc', - _id: '29392939', - _score: null, - sort: [], - _source: { - host: { - ip: ['2001:db8:85a3::8a2e:370:7334', '192.168.1.4'], - }, - }, - }, - ], - }, - }, - }; - expect(getIPFromBucket(InfraNodeType.host, bucket)).toBe('192.168.1.4'); - }); - it('should NOT return ipv6 address', () => { - const bucket: InfraSnapshotNodeGroupByBucket = { - key: { - id: 'example-01', - name: 'example-01', - }, - ip: { - hits: { - total: { value: 1 }, - hits: [ - { - _index: 'metricbeat-2019-01-01', - _type: '_doc', - _id: '29392939', - _score: null, - sort: [], - _source: { - host: { - ip: ['2001:db8:85a3::8a2e:370:7334'], - }, - }, - }, - ], - }, - }, - }; - expect(getIPFromBucket(InfraNodeType.host, bucket)).toBe(null); - }); - }); -}); diff --git a/x-pack/legacy/plugins/infra/server/lib/snapshot/response_helpers.ts b/x-pack/legacy/plugins/infra/server/lib/snapshot/response_helpers.ts deleted file mode 100644 index d22f41ff152f7..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/snapshot/response_helpers.ts +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { isNumber, last, max, sum, get } from 'lodash'; -import moment from 'moment'; - -import { - InfraSnapshotMetricType, - InfraSnapshotNodePath, - InfraSnapshotNodeMetric, - InfraNodeType, -} from '../../graphql/types'; -import { getIntervalInSeconds } from '../../utils/get_interval_in_seconds'; -import { InfraSnapshotRequestOptions } from './types'; -import { findInventoryModel } from '../../../common/inventory_models'; - -export interface InfraSnapshotNodeMetricsBucket { - key: { id: string }; - histogram: { - buckets: InfraSnapshotMetricsBucket[]; - }; -} - -// Jumping through TypeScript hoops here: -// We need an interface that has the known members 'key' and 'doc_count' and also -// an unknown number of members with unknown names but known format, containing the -// metrics. -// This union type is the only way I found to express this that TypeScript accepts. -export interface InfraSnapshotBucketWithKey { - key: string | number; - doc_count: number; -} - -export interface InfraSnapshotBucketWithValues { - [name: string]: { value: number; normalized_value?: number }; -} - -export type InfraSnapshotMetricsBucket = InfraSnapshotBucketWithKey & InfraSnapshotBucketWithValues; - -interface InfraSnapshotIpHit { - _index: string; - _type: string; - _id: string; - _score: number | null; - _source: { - host: { - ip: string[] | string; - }; - }; - sort: number[]; -} - -export interface InfraSnapshotNodeGroupByBucket { - key: { - id: string; - name: string; - [groupByField: string]: string; - }; - ip: { - hits: { - total: { value: number }; - hits: InfraSnapshotIpHit[]; - }; - }; -} - -export const isIPv4 = (subject: string) => /^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$/.test(subject); - -export const getIPFromBucket = ( - nodeType: InfraNodeType, - bucket: InfraSnapshotNodeGroupByBucket -): string | null => { - const inventoryModel = findInventoryModel(nodeType); - if (!inventoryModel.fields.ip) { - return null; - } - const ip = get(bucket, `ip.hits.hits[0]._source.${inventoryModel.fields.ip}`, null) as - | string[] - | null; - if (Array.isArray(ip)) { - return ip.find(isIPv4) || null; - } else if (typeof ip === 'string') { - return ip; - } - - return null; -}; - -export const getNodePath = ( - groupBucket: InfraSnapshotNodeGroupByBucket, - options: InfraSnapshotRequestOptions -): InfraSnapshotNodePath[] => { - const node = groupBucket.key; - const path = options.groupBy.map(gb => { - return { value: node[`${gb.field}`], label: node[`${gb.field}`] } as InfraSnapshotNodePath; - }); - const ip = getIPFromBucket(options.nodeType, groupBucket); - path.push({ value: node.id, label: node.name || node.id, ip }); - return path; -}; - -interface NodeMetricsForLookup { - [nodeId: string]: InfraSnapshotMetricsBucket[]; -} - -export const getNodeMetricsForLookup = ( - metrics: InfraSnapshotNodeMetricsBucket[] -): NodeMetricsForLookup => { - return metrics.reduce((acc: NodeMetricsForLookup, metric) => { - acc[`${metric.key.id}`] = metric.histogram.buckets; - return acc; - }, {}); -}; - -// In the returned object, -// value contains the value from the last bucket spanning a full interval -// max and avg are calculated from all buckets returned for the timerange -export const getNodeMetrics = ( - nodeBuckets: InfraSnapshotMetricsBucket[], - options: InfraSnapshotRequestOptions -): InfraSnapshotNodeMetric => { - if (!nodeBuckets) { - return { - name: options.metric.type, - value: null, - max: null, - avg: null, - }; - } - const lastBucket = findLastFullBucket(nodeBuckets, options); - const result = { - name: options.metric.type, - value: getMetricValueFromBucket(options.metric.type, lastBucket), - max: calculateMax(nodeBuckets, options.metric.type), - avg: calculateAvg(nodeBuckets, options.metric.type), - }; - return result; -}; - -const findLastFullBucket = ( - buckets: InfraSnapshotMetricsBucket[], - options: InfraSnapshotRequestOptions -) => { - const to = moment.utc(options.timerange.to); - const bucketSize = getIntervalInSeconds(options.timerange.interval); - return buckets.reduce((current, item) => { - const itemKey = isNumber(item.key) ? item.key : parseInt(item.key, 10); - const date = moment.utc(itemKey + bucketSize * 1000); - if (!date.isAfter(to) && item.doc_count > 0) { - return item; - } - return current; - }, last(buckets)); -}; - -const getMetricValueFromBucket = ( - type: InfraSnapshotMetricType, - bucket: InfraSnapshotMetricsBucket -) => { - const metric = bucket[type]; - return (metric && (metric.normalized_value || metric.value)) || 0; -}; - -function calculateMax(buckets: InfraSnapshotMetricsBucket[], type: InfraSnapshotMetricType) { - return max(buckets.map(bucket => getMetricValueFromBucket(type, bucket))) || 0; -} - -function calculateAvg(buckets: InfraSnapshotMetricsBucket[], type: InfraSnapshotMetricType) { - return sum(buckets.map(bucket => getMetricValueFromBucket(type, bucket))) / buckets.length || 0; -} diff --git a/x-pack/legacy/plugins/infra/server/lib/snapshot/snapshot.ts b/x-pack/legacy/plugins/infra/server/lib/snapshot/snapshot.ts deleted file mode 100644 index 1a724673608a2..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/snapshot/snapshot.ts +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { RequestHandlerContext } from 'src/core/server'; -import { InfraSnapshotNode } from '../../graphql/types'; -import { InfraDatabaseSearchResponse } from '../adapters/framework'; -import { KibanaFramework } from '../adapters/framework/kibana_framework_adapter'; -import { InfraSources } from '../sources'; - -import { JsonObject } from '../../../common/typed_json'; -import { SNAPSHOT_COMPOSITE_REQUEST_SIZE } from './constants'; -import { - getGroupedNodesSources, - getMetricsAggregations, - getMetricsSources, - getDateHistogramOffset, -} from './query_helpers'; -import { - getNodeMetrics, - getNodeMetricsForLookup, - getNodePath, - InfraSnapshotNodeGroupByBucket, - InfraSnapshotNodeMetricsBucket, -} from './response_helpers'; -import { getAllCompositeData } from '../../utils/get_all_composite_data'; -import { createAfterKeyHandler } from '../../utils/create_afterkey_handler'; -import { findInventoryModel } from '../../../common/inventory_models'; -import { InfraSnapshotRequestOptions } from './types'; -import { createTimeRangeWithInterval } from './create_timerange_with_interval'; - -export class InfraSnapshot { - constructor(private readonly libs: { sources: InfraSources; framework: KibanaFramework }) {} - - public async getNodes( - requestContext: RequestHandlerContext, - options: InfraSnapshotRequestOptions - ): Promise<{ nodes: InfraSnapshotNode[]; interval: string }> { - // Both requestGroupedNodes and requestNodeMetrics may send several requests to elasticsearch - // in order to page through the results of their respective composite aggregations. - // Both chains of requests are supposed to run in parallel, and their results be merged - // when they have both been completed. - const timeRangeWithIntervalApplied = await createTimeRangeWithInterval( - this.libs.framework, - requestContext, - options - ); - const optionsWithTimerange = { ...options, timerange: timeRangeWithIntervalApplied }; - const groupedNodesPromise = requestGroupedNodes( - requestContext, - optionsWithTimerange, - this.libs.framework - ); - const nodeMetricsPromise = requestNodeMetrics( - requestContext, - optionsWithTimerange, - this.libs.framework - ); - - const groupedNodeBuckets = await groupedNodesPromise; - const nodeMetricBuckets = await nodeMetricsPromise; - return { - nodes: mergeNodeBuckets(groupedNodeBuckets, nodeMetricBuckets, options), - interval: timeRangeWithIntervalApplied.interval, - }; - } -} - -const bucketSelector = ( - response: InfraDatabaseSearchResponse<{}, InfraSnapshotAggregationResponse> -) => (response.aggregations && response.aggregations.nodes.buckets) || []; - -const handleAfterKey = createAfterKeyHandler( - 'body.aggregations.nodes.composite.after', - input => input?.aggregations?.nodes?.after_key -); - -const requestGroupedNodes = async ( - requestContext: RequestHandlerContext, - options: InfraSnapshotRequestOptions, - framework: KibanaFramework -): Promise => { - const inventoryModel = findInventoryModel(options.nodeType); - const query = { - allowNoIndices: true, - index: `${options.sourceConfiguration.logAlias},${options.sourceConfiguration.metricAlias}`, - ignoreUnavailable: true, - body: { - query: { - bool: { - filter: buildFilters(options), - }, - }, - size: 0, - aggregations: { - nodes: { - composite: { - size: SNAPSHOT_COMPOSITE_REQUEST_SIZE, - sources: getGroupedNodesSources(options), - }, - aggs: { - ip: { - top_hits: { - sort: [{ [options.sourceConfiguration.fields.timestamp]: { order: 'desc' } }], - _source: { - includes: inventoryModel.fields.ip ? [inventoryModel.fields.ip] : [], - }, - size: 1, - }, - }, - }, - }, - }, - }, - }; - - return await getAllCompositeData< - InfraSnapshotAggregationResponse, - InfraSnapshotNodeGroupByBucket - >(framework, requestContext, query, bucketSelector, handleAfterKey); -}; - -const requestNodeMetrics = async ( - requestContext: RequestHandlerContext, - options: InfraSnapshotRequestOptions, - framework: KibanaFramework -): Promise => { - const index = - options.metric.type === 'logRate' - ? `${options.sourceConfiguration.logAlias}` - : `${options.sourceConfiguration.metricAlias}`; - - const query = { - allowNoIndices: true, - index, - ignoreUnavailable: true, - body: { - query: { - bool: { - filter: buildFilters(options, false), - }, - }, - size: 0, - aggregations: { - nodes: { - composite: { - size: SNAPSHOT_COMPOSITE_REQUEST_SIZE, - sources: getMetricsSources(options), - }, - aggregations: { - histogram: { - date_histogram: { - field: options.sourceConfiguration.fields.timestamp, - interval: options.timerange.interval || '1m', - offset: getDateHistogramOffset(options), - extended_bounds: { - min: options.timerange.from, - max: options.timerange.to, - }, - }, - aggregations: getMetricsAggregations(options), - }, - }, - }, - }, - }, - }; - return await getAllCompositeData< - InfraSnapshotAggregationResponse, - InfraSnapshotNodeMetricsBucket - >(framework, requestContext, query, bucketSelector, handleAfterKey); -}; - -// buckets can be InfraSnapshotNodeGroupByBucket[] or InfraSnapshotNodeMetricsBucket[] -// but typing this in a way that makes TypeScript happy is unreadable (if possible at all) -interface InfraSnapshotAggregationResponse { - nodes: { - buckets: any[]; - after_key: { [id: string]: string }; - }; -} - -const mergeNodeBuckets = ( - nodeGroupByBuckets: InfraSnapshotNodeGroupByBucket[], - nodeMetricsBuckets: InfraSnapshotNodeMetricsBucket[], - options: InfraSnapshotRequestOptions -): InfraSnapshotNode[] => { - const nodeMetricsForLookup = getNodeMetricsForLookup(nodeMetricsBuckets); - - return nodeGroupByBuckets.map(node => { - return { - path: getNodePath(node, options), - metric: getNodeMetrics(nodeMetricsForLookup[node.key.id], options), - }; - }); -}; - -const createQueryFilterClauses = (filterQuery: JsonObject | undefined) => - filterQuery ? [filterQuery] : []; - -const buildFilters = (options: InfraSnapshotRequestOptions, withQuery = true) => { - let filters: any = [ - { - range: { - [options.sourceConfiguration.fields.timestamp]: { - gte: options.timerange.from, - lte: options.timerange.to, - format: 'epoch_millis', - }, - }, - }, - ]; - - if (withQuery) { - filters = [...createQueryFilterClauses(options.filterQuery), ...filters]; - } - - if (options.accountId) { - filters.push({ - term: { - 'cloud.account.id': options.accountId, - }, - }); - } - - if (options.region) { - filters.push({ - term: { - 'cloud.region': options.region, - }, - }); - } - - return filters; -}; diff --git a/x-pack/legacy/plugins/infra/server/lib/snapshot/types.ts b/x-pack/legacy/plugins/infra/server/lib/snapshot/types.ts deleted file mode 100644 index 31823b2811121..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/snapshot/types.ts +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { JsonObject } from '../../../common/typed_json'; -import { - InfraNodeType, - InfraSourceConfiguration, - InfraTimerangeInput, - InfraSnapshotGroupbyInput, - InfraSnapshotMetricInput, -} from '../../../public/graphql/types'; - -export interface InfraSnapshotRequestOptions { - nodeType: InfraNodeType; - sourceConfiguration: InfraSourceConfiguration; - timerange: InfraTimerangeInput; - groupBy: InfraSnapshotGroupbyInput[]; - metric: InfraSnapshotMetricInput; - filterQuery: JsonObject | undefined; - accountId?: string; - region?: string; -} diff --git a/x-pack/legacy/plugins/infra/server/lib/source_status.ts b/x-pack/legacy/plugins/infra/server/lib/source_status.ts deleted file mode 100644 index 1f0845b6b223f..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/source_status.ts +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { RequestHandlerContext } from 'src/core/server'; -import { InfraSources } from './sources'; - -export class InfraSourceStatus { - constructor( - private readonly adapter: InfraSourceStatusAdapter, - private readonly libs: { sources: InfraSources } - ) {} - - public async getLogIndexNames( - requestContext: RequestHandlerContext, - sourceId: string - ): Promise { - const sourceConfiguration = await this.libs.sources.getSourceConfiguration( - requestContext, - sourceId - ); - const indexNames = await this.adapter.getIndexNames( - requestContext, - sourceConfiguration.configuration.logAlias - ); - return indexNames; - } - public async getMetricIndexNames( - requestContext: RequestHandlerContext, - sourceId: string - ): Promise { - const sourceConfiguration = await this.libs.sources.getSourceConfiguration( - requestContext, - sourceId - ); - const indexNames = await this.adapter.getIndexNames( - requestContext, - sourceConfiguration.configuration.metricAlias - ); - return indexNames; - } - public async hasLogAlias( - requestContext: RequestHandlerContext, - sourceId: string - ): Promise { - const sourceConfiguration = await this.libs.sources.getSourceConfiguration( - requestContext, - sourceId - ); - const hasAlias = await this.adapter.hasAlias( - requestContext, - sourceConfiguration.configuration.logAlias - ); - return hasAlias; - } - public async hasMetricAlias( - requestContext: RequestHandlerContext, - sourceId: string - ): Promise { - const sourceConfiguration = await this.libs.sources.getSourceConfiguration( - requestContext, - sourceId - ); - const hasAlias = await this.adapter.hasAlias( - requestContext, - sourceConfiguration.configuration.metricAlias - ); - return hasAlias; - } - public async hasLogIndices( - requestContext: RequestHandlerContext, - sourceId: string - ): Promise { - const sourceConfiguration = await this.libs.sources.getSourceConfiguration( - requestContext, - sourceId - ); - const hasIndices = await this.adapter.hasIndices( - requestContext, - sourceConfiguration.configuration.logAlias - ); - return hasIndices; - } - public async hasMetricIndices( - requestContext: RequestHandlerContext, - sourceId: string - ): Promise { - const sourceConfiguration = await this.libs.sources.getSourceConfiguration( - requestContext, - sourceId - ); - const hasIndices = await this.adapter.hasIndices( - requestContext, - sourceConfiguration.configuration.metricAlias - ); - return hasIndices; - } -} - -export interface InfraSourceStatusAdapter { - getIndexNames(requestContext: RequestHandlerContext, aliasName: string): Promise; - hasAlias(requestContext: RequestHandlerContext, aliasName: string): Promise; - hasIndices(requestContext: RequestHandlerContext, indexNames: string): Promise; -} diff --git a/x-pack/legacy/plugins/infra/server/lib/sources/defaults.ts b/x-pack/legacy/plugins/infra/server/lib/sources/defaults.ts deleted file mode 100644 index b9ead0d169ee6..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/sources/defaults.ts +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { InfraSourceConfiguration } from './types'; - -export const defaultSourceConfiguration: InfraSourceConfiguration = { - name: 'Default', - description: '', - metricAlias: 'metricbeat-*', - logAlias: 'filebeat-*,kibana_sample_data_logs*', - fields: { - container: 'container.id', - host: 'host.name', - message: ['message', '@message'], - pod: 'kubernetes.pod.uid', - tiebreaker: '_doc', - timestamp: '@timestamp', - }, - logColumns: [ - { - timestampColumn: { - id: '5e7f964a-be8a-40d8-88d2-fbcfbdca0e2f', - }, - }, - { - fieldColumn: { - id: ' eb9777a8-fcd3-420e-ba7d-172fff6da7a2', - field: 'event.dataset', - }, - }, - { - messageColumn: { - id: 'b645d6da-824b-4723-9a2a-e8cece1645c0', - }, - }, - ], -}; diff --git a/x-pack/legacy/plugins/infra/server/lib/sources/errors.ts b/x-pack/legacy/plugins/infra/server/lib/sources/errors.ts deleted file mode 100644 index 9f835f21443c6..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/sources/errors.ts +++ /dev/null @@ -1,12 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export class NotFoundError extends Error { - constructor(message?: string) { - super(message); - Object.setPrototypeOf(this, new.target.prototype); - } -} diff --git a/x-pack/legacy/plugins/infra/server/lib/sources/index.ts b/x-pack/legacy/plugins/infra/server/lib/sources/index.ts deleted file mode 100644 index 6837f953ea18a..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/sources/index.ts +++ /dev/null @@ -1,10 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export * from './defaults'; -export * from './saved_object_mappings'; -export * from './sources'; -export * from './types'; diff --git a/x-pack/legacy/plugins/infra/server/lib/sources/saved_object_mappings.ts b/x-pack/legacy/plugins/infra/server/lib/sources/saved_object_mappings.ts deleted file mode 100644 index 973a790eeedaf..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/sources/saved_object_mappings.ts +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { ElasticsearchMappingOf } from '../../utils/typed_elasticsearch_mappings'; -import { InfraSavedSourceConfiguration } from './types'; - -export const infraSourceConfigurationSavedObjectType = 'infrastructure-ui-source'; - -export const infraSourceConfigurationSavedObjectMappings: { - [infraSourceConfigurationSavedObjectType]: ElasticsearchMappingOf; -} = { - [infraSourceConfigurationSavedObjectType]: { - properties: { - name: { - type: 'text', - }, - description: { - type: 'text', - }, - metricAlias: { - type: 'keyword', - }, - logAlias: { - type: 'keyword', - }, - fields: { - properties: { - container: { - type: 'keyword', - }, - host: { - type: 'keyword', - }, - pod: { - type: 'keyword', - }, - tiebreaker: { - type: 'keyword', - }, - timestamp: { - type: 'keyword', - }, - }, - }, - logColumns: { - type: 'nested', - properties: { - timestampColumn: { - properties: { - id: { - type: 'keyword', - }, - }, - }, - messageColumn: { - properties: { - id: { - type: 'keyword', - }, - }, - }, - fieldColumn: { - properties: { - id: { - type: 'keyword', - }, - field: { - type: 'keyword', - }, - }, - }, - }, - }, - }, - }, -}; diff --git a/x-pack/legacy/plugins/infra/server/lib/sources/sources.test.ts b/x-pack/legacy/plugins/infra/server/lib/sources/sources.test.ts deleted file mode 100644 index 4a83ca730ff83..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/sources/sources.test.ts +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -import { InfraSources } from './sources'; - -describe('the InfraSources lib', () => { - describe('getSourceConfiguration method', () => { - test('returns a source configuration if it exists', async () => { - const sourcesLib = new InfraSources({ - config: createMockStaticConfiguration({}), - }); - - const request: any = createRequestContext({ - id: 'TEST_ID', - version: 'foo', - updated_at: '2000-01-01T00:00:00.000Z', - attributes: { - metricAlias: 'METRIC_ALIAS', - logAlias: 'LOG_ALIAS', - fields: { - container: 'CONTAINER', - host: 'HOST', - pod: 'POD', - tiebreaker: 'TIEBREAKER', - timestamp: 'TIMESTAMP', - }, - }, - }); - - expect(await sourcesLib.getSourceConfiguration(request, 'TEST_ID')).toMatchObject({ - id: 'TEST_ID', - version: 'foo', - updatedAt: 946684800000, - configuration: { - metricAlias: 'METRIC_ALIAS', - logAlias: 'LOG_ALIAS', - fields: { - container: 'CONTAINER', - host: 'HOST', - pod: 'POD', - tiebreaker: 'TIEBREAKER', - timestamp: 'TIMESTAMP', - }, - }, - }); - }); - - test('adds missing attributes from the static configuration to a source configuration', async () => { - const sourcesLib = new InfraSources({ - config: createMockStaticConfiguration({ - default: { - metricAlias: 'METRIC_ALIAS', - logAlias: 'LOG_ALIAS', - fields: { - host: 'HOST', - pod: 'POD', - tiebreaker: 'TIEBREAKER', - timestamp: 'TIMESTAMP', - }, - }, - }), - }); - - const request: any = createRequestContext({ - id: 'TEST_ID', - version: 'foo', - updated_at: '2000-01-01T00:00:00.000Z', - attributes: { - fields: { - container: 'CONTAINER', - }, - }, - }); - - expect(await sourcesLib.getSourceConfiguration(request, 'TEST_ID')).toMatchObject({ - id: 'TEST_ID', - version: 'foo', - updatedAt: 946684800000, - configuration: { - metricAlias: 'METRIC_ALIAS', - logAlias: 'LOG_ALIAS', - fields: { - container: 'CONTAINER', - host: 'HOST', - pod: 'POD', - tiebreaker: 'TIEBREAKER', - timestamp: 'TIMESTAMP', - }, - }, - }); - }); - - test('adds missing attributes from the default configuration to a source configuration', async () => { - const sourcesLib = new InfraSources({ - config: createMockStaticConfiguration({}), - }); - - const request: any = createRequestContext({ - id: 'TEST_ID', - version: 'foo', - updated_at: '2000-01-01T00:00:00.000Z', - attributes: {}, - }); - - expect(await sourcesLib.getSourceConfiguration(request, 'TEST_ID')).toMatchObject({ - id: 'TEST_ID', - version: 'foo', - updatedAt: 946684800000, - configuration: { - metricAlias: expect.any(String), - logAlias: expect.any(String), - fields: { - container: expect.any(String), - host: expect.any(String), - pod: expect.any(String), - tiebreaker: expect.any(String), - timestamp: expect.any(String), - }, - }, - }); - }); - }); -}); - -const createMockStaticConfiguration = (sources: any) => ({ - enabled: true, - query: { - partitionSize: 1, - partitionFactor: 1, - }, - sources, -}); - -const createRequestContext = (savedObject?: any) => { - return { - core: { - savedObjects: { - client: { - async get() { - return savedObject; - }, - errors: { - isNotFoundError() { - return typeof savedObject === 'undefined'; - }, - }, - }, - }, - }, - }; -}; diff --git a/x-pack/legacy/plugins/infra/server/lib/sources/sources.ts b/x-pack/legacy/plugins/infra/server/lib/sources/sources.ts deleted file mode 100644 index 2b38d81e4a8d5..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/sources/sources.ts +++ /dev/null @@ -1,247 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import * as runtimeTypes from 'io-ts'; -import { failure } from 'io-ts/lib/PathReporter'; -import { identity, constant } from 'fp-ts/lib/function'; -import { pipe } from 'fp-ts/lib/pipeable'; -import { map, fold } from 'fp-ts/lib/Either'; -import { RequestHandlerContext } from 'src/core/server'; -import { defaultSourceConfiguration } from './defaults'; -import { NotFoundError } from './errors'; -import { infraSourceConfigurationSavedObjectType } from './saved_object_mappings'; -import { - InfraSavedSourceConfiguration, - InfraSourceConfiguration, - InfraStaticSourceConfiguration, - pickSavedSourceConfiguration, - SourceConfigurationSavedObjectRuntimeType, - StaticSourceConfigurationRuntimeType, -} from './types'; -import { InfraConfig } from '../../../../../../plugins/infra/server'; - -interface Libs { - config: InfraConfig; -} - -export class InfraSources { - private internalSourceConfigurations: Map = new Map(); - private readonly libs: Libs; - - constructor(libs: Libs) { - this.libs = libs; - } - - public async getSourceConfiguration(requestContext: RequestHandlerContext, sourceId: string) { - const staticDefaultSourceConfiguration = await this.getStaticDefaultSourceConfiguration(); - - const savedSourceConfiguration = await this.getInternalSourceConfiguration(sourceId) - .then(internalSourceConfiguration => ({ - id: sourceId, - version: undefined, - updatedAt: undefined, - origin: 'internal' as 'internal', - configuration: mergeSourceConfiguration( - staticDefaultSourceConfiguration, - internalSourceConfiguration - ), - })) - .catch(err => - err instanceof NotFoundError - ? this.getSavedSourceConfiguration(requestContext, sourceId).then(result => ({ - ...result, - configuration: mergeSourceConfiguration( - staticDefaultSourceConfiguration, - result.configuration - ), - })) - : Promise.reject(err) - ) - .catch(err => - requestContext.core.savedObjects.client.errors.isNotFoundError(err) - ? Promise.resolve({ - id: sourceId, - version: undefined, - updatedAt: undefined, - origin: 'fallback' as 'fallback', - configuration: staticDefaultSourceConfiguration, - }) - : Promise.reject(err) - ); - - return savedSourceConfiguration; - } - - public async getAllSourceConfigurations(requestContext: RequestHandlerContext) { - const staticDefaultSourceConfiguration = await this.getStaticDefaultSourceConfiguration(); - - const savedSourceConfigurations = await this.getAllSavedSourceConfigurations(requestContext); - - return savedSourceConfigurations.map(savedSourceConfiguration => ({ - ...savedSourceConfiguration, - configuration: mergeSourceConfiguration( - staticDefaultSourceConfiguration, - savedSourceConfiguration.configuration - ), - })); - } - - public async createSourceConfiguration( - requestContext: RequestHandlerContext, - sourceId: string, - source: InfraSavedSourceConfiguration - ) { - const staticDefaultSourceConfiguration = await this.getStaticDefaultSourceConfiguration(); - - const newSourceConfiguration = mergeSourceConfiguration( - staticDefaultSourceConfiguration, - source - ); - - const createdSourceConfiguration = convertSavedObjectToSavedSourceConfiguration( - await requestContext.core.savedObjects.client.create( - infraSourceConfigurationSavedObjectType, - pickSavedSourceConfiguration(newSourceConfiguration) as any, - { id: sourceId } - ) - ); - - return { - ...createdSourceConfiguration, - configuration: mergeSourceConfiguration( - staticDefaultSourceConfiguration, - createdSourceConfiguration.configuration - ), - }; - } - - public async deleteSourceConfiguration(requestContext: RequestHandlerContext, sourceId: string) { - await requestContext.core.savedObjects.client.delete( - infraSourceConfigurationSavedObjectType, - sourceId - ); - } - - public async updateSourceConfiguration( - requestContext: RequestHandlerContext, - sourceId: string, - sourceProperties: InfraSavedSourceConfiguration - ) { - const staticDefaultSourceConfiguration = await this.getStaticDefaultSourceConfiguration(); - - const { configuration, version } = await this.getSourceConfiguration(requestContext, sourceId); - - const updatedSourceConfigurationAttributes = mergeSourceConfiguration( - configuration, - sourceProperties - ); - - const updatedSourceConfiguration = convertSavedObjectToSavedSourceConfiguration( - await requestContext.core.savedObjects.client.update( - infraSourceConfigurationSavedObjectType, - sourceId, - pickSavedSourceConfiguration(updatedSourceConfigurationAttributes) as any, - { - version, - } - ) - ); - - return { - ...updatedSourceConfiguration, - configuration: mergeSourceConfiguration( - staticDefaultSourceConfiguration, - updatedSourceConfiguration.configuration - ), - }; - } - - public async defineInternalSourceConfiguration( - sourceId: string, - sourceProperties: InfraStaticSourceConfiguration - ) { - this.internalSourceConfigurations.set(sourceId, sourceProperties); - } - - public async getInternalSourceConfiguration(sourceId: string) { - const internalSourceConfiguration = this.internalSourceConfigurations.get(sourceId); - - if (!internalSourceConfiguration) { - throw new NotFoundError( - `Failed to load internal source configuration: no configuration "${sourceId}" found.` - ); - } - - return internalSourceConfiguration; - } - - private async getStaticDefaultSourceConfiguration() { - const staticSourceConfiguration = pipe( - runtimeTypes - .type({ - sources: runtimeTypes.type({ - default: StaticSourceConfigurationRuntimeType, - }), - }) - .decode(this.libs.config), - map(({ sources: { default: defaultConfiguration } }) => defaultConfiguration), - fold(constant({}), identity) - ); - - return mergeSourceConfiguration(defaultSourceConfiguration, staticSourceConfiguration); - } - - private async getSavedSourceConfiguration( - requestContext: RequestHandlerContext, - sourceId: string - ) { - const savedObject = await requestContext.core.savedObjects.client.get( - infraSourceConfigurationSavedObjectType, - sourceId - ); - - return convertSavedObjectToSavedSourceConfiguration(savedObject); - } - - private async getAllSavedSourceConfigurations(requestContext: RequestHandlerContext) { - const savedObjects = await requestContext.core.savedObjects.client.find({ - type: infraSourceConfigurationSavedObjectType, - }); - - return savedObjects.saved_objects.map(convertSavedObjectToSavedSourceConfiguration); - } -} - -const mergeSourceConfiguration = ( - first: InfraSourceConfiguration, - ...others: InfraStaticSourceConfiguration[] -) => - others.reduce( - (previousSourceConfiguration, currentSourceConfiguration) => ({ - ...previousSourceConfiguration, - ...currentSourceConfiguration, - fields: { - ...previousSourceConfiguration.fields, - ...currentSourceConfiguration.fields, - }, - }), - first - ); - -const convertSavedObjectToSavedSourceConfiguration = (savedObject: unknown) => - pipe( - SourceConfigurationSavedObjectRuntimeType.decode(savedObject), - map(savedSourceConfiguration => ({ - id: savedSourceConfiguration.id, - version: savedSourceConfiguration.version, - updatedAt: savedSourceConfiguration.updated_at, - origin: 'stored' as 'stored', - configuration: savedSourceConfiguration.attributes, - })), - fold(errors => { - throw new Error(failure(errors).join('\n')); - }, identity) - ); diff --git a/x-pack/legacy/plugins/infra/server/lib/sources/types.ts b/x-pack/legacy/plugins/infra/server/lib/sources/types.ts deleted file mode 100644 index 1f850635cf35a..0000000000000 --- a/x-pack/legacy/plugins/infra/server/lib/sources/types.ts +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -/* eslint-disable @typescript-eslint/no-empty-interface */ - -import * as runtimeTypes from 'io-ts'; -import moment from 'moment'; -import { pipe } from 'fp-ts/lib/pipeable'; -import { chain } from 'fp-ts/lib/Either'; - -export const TimestampFromString = new runtimeTypes.Type( - 'TimestampFromString', - (input): input is number => typeof input === 'number', - (input, context) => - pipe( - runtimeTypes.string.validate(input, context), - chain(stringInput => { - const momentValue = moment(stringInput); - return momentValue.isValid() - ? runtimeTypes.success(momentValue.valueOf()) - : runtimeTypes.failure(stringInput, context); - }) - ), - output => new Date(output).toISOString() -); - -/** - * Stored source configuration as read from and written to saved objects - */ - -const SavedSourceConfigurationFieldsRuntimeType = runtimeTypes.partial({ - container: runtimeTypes.string, - host: runtimeTypes.string, - pod: runtimeTypes.string, - tiebreaker: runtimeTypes.string, - timestamp: runtimeTypes.string, -}); - -export const SavedSourceConfigurationTimestampColumnRuntimeType = runtimeTypes.type({ - timestampColumn: runtimeTypes.type({ - id: runtimeTypes.string, - }), -}); - -export const SavedSourceConfigurationMessageColumnRuntimeType = runtimeTypes.type({ - messageColumn: runtimeTypes.type({ - id: runtimeTypes.string, - }), -}); - -export const SavedSourceConfigurationFieldColumnRuntimeType = runtimeTypes.type({ - fieldColumn: runtimeTypes.type({ - id: runtimeTypes.string, - field: runtimeTypes.string, - }), -}); - -export const SavedSourceConfigurationColumnRuntimeType = runtimeTypes.union([ - SavedSourceConfigurationTimestampColumnRuntimeType, - SavedSourceConfigurationMessageColumnRuntimeType, - SavedSourceConfigurationFieldColumnRuntimeType, -]); - -export const SavedSourceConfigurationRuntimeType = runtimeTypes.partial({ - name: runtimeTypes.string, - description: runtimeTypes.string, - metricAlias: runtimeTypes.string, - logAlias: runtimeTypes.string, - fields: SavedSourceConfigurationFieldsRuntimeType, - logColumns: runtimeTypes.array(SavedSourceConfigurationColumnRuntimeType), -}); - -export interface InfraSavedSourceConfiguration - extends runtimeTypes.TypeOf {} - -export const pickSavedSourceConfiguration = ( - value: InfraSourceConfiguration -): InfraSavedSourceConfiguration => { - const { name, description, metricAlias, logAlias, fields, logColumns } = value; - const { container, host, pod, tiebreaker, timestamp } = fields; - - return { - name, - description, - metricAlias, - logAlias, - fields: { container, host, pod, tiebreaker, timestamp }, - logColumns, - }; -}; - -/** - * Static source configuration as read from the configuration file - */ - -const StaticSourceConfigurationFieldsRuntimeType = runtimeTypes.partial({ - ...SavedSourceConfigurationFieldsRuntimeType.props, - message: runtimeTypes.array(runtimeTypes.string), -}); - -export const StaticSourceConfigurationRuntimeType = runtimeTypes.partial({ - name: runtimeTypes.string, - description: runtimeTypes.string, - metricAlias: runtimeTypes.string, - logAlias: runtimeTypes.string, - fields: StaticSourceConfigurationFieldsRuntimeType, - logColumns: runtimeTypes.array(SavedSourceConfigurationColumnRuntimeType), -}); - -export interface InfraStaticSourceConfiguration - extends runtimeTypes.TypeOf {} - -/** - * Full source configuration type after all cleanup has been done at the edges - */ - -const SourceConfigurationFieldsRuntimeType = runtimeTypes.type({ - ...StaticSourceConfigurationFieldsRuntimeType.props, -}); - -export const SourceConfigurationRuntimeType = runtimeTypes.type({ - ...SavedSourceConfigurationRuntimeType.props, - fields: SourceConfigurationFieldsRuntimeType, - logColumns: runtimeTypes.array(SavedSourceConfigurationColumnRuntimeType), -}); - -export interface InfraSourceConfiguration - extends runtimeTypes.TypeOf {} - -/** - * Saved object type with metadata - */ - -export const SourceConfigurationSavedObjectRuntimeType = runtimeTypes.intersection([ - runtimeTypes.type({ - id: runtimeTypes.string, - attributes: SavedSourceConfigurationRuntimeType, - }), - runtimeTypes.partial({ - version: runtimeTypes.string, - updated_at: TimestampFromString, - }), -]); - -export interface SourceConfigurationSavedObject - extends runtimeTypes.TypeOf {} diff --git a/x-pack/legacy/plugins/infra/server/new_platform_index.ts b/x-pack/legacy/plugins/infra/server/new_platform_index.ts deleted file mode 100644 index e59897a6b241d..0000000000000 --- a/x-pack/legacy/plugins/infra/server/new_platform_index.ts +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { PluginInitializerContext } from 'src/core/server'; -import { InfraServerPlugin, InfraPluginSetup } from './new_platform_plugin'; -import { config, InfraConfig } from '../../../../plugins/infra/server'; -import { InfraServerPluginDeps } from './lib/adapters/framework'; -export { config, InfraConfig, InfraServerPluginDeps, InfraPluginSetup }; - -export function plugin(context: PluginInitializerContext) { - return new InfraServerPlugin(context); -} diff --git a/x-pack/legacy/plugins/infra/server/new_platform_plugin.ts b/x-pack/legacy/plugins/infra/server/new_platform_plugin.ts deleted file mode 100644 index 147729a1d0b3e..0000000000000 --- a/x-pack/legacy/plugins/infra/server/new_platform_plugin.ts +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -import { CoreSetup, PluginInitializerContext } from 'src/core/server'; -import { i18n } from '@kbn/i18n'; -import { Server } from 'hapi'; -import { InfraConfig } from '../../../../plugins/infra/server'; -import { initInfraServer } from './infra_server'; -import { InfraBackendLibs, InfraDomainLibs } from './lib/infra_types'; -import { FrameworkFieldsAdapter } from './lib/adapters/fields/framework_fields_adapter'; -import { KibanaFramework } from './lib/adapters/framework/kibana_framework_adapter'; -import { InfraKibanaLogEntriesAdapter } from './lib/adapters/log_entries/kibana_log_entries_adapter'; -import { KibanaMetricsAdapter } from './lib/adapters/metrics/kibana_metrics_adapter'; -import { InfraElasticsearchSourceStatusAdapter } from './lib/adapters/source_status'; -import { InfraFieldsDomain } from './lib/domains/fields_domain'; -import { InfraLogEntriesDomain } from './lib/domains/log_entries_domain'; -import { InfraMetricsDomain } from './lib/domains/metrics_domain'; -import { InfraLogAnalysis } from './lib/log_analysis'; -import { InfraSnapshot } from './lib/snapshot'; -import { InfraSourceStatus } from './lib/source_status'; -import { InfraSources } from './lib/sources'; -import { InfraServerPluginDeps } from './lib/adapters/framework'; -import { METRICS_FEATURE, LOGS_FEATURE } from './features'; -import { UsageCollector } from './usage/usage_collector'; -import { APP_ID } from '../index'; -import { InfraStaticSourceConfiguration } from './lib/sources/types'; - -export interface KbnServer extends Server { - usage: any; -} - -const logsSampleDataLinkLabel = i18n.translate('xpack.infra.sampleDataLinkLabel', { - defaultMessage: 'Logs', -}); - -export interface InfraPluginSetup { - defineInternalSourceConfiguration: ( - sourceId: string, - sourceProperties: InfraStaticSourceConfiguration - ) => void; -} - -const DEFAULT_CONFIG: InfraConfig = { - enabled: true, - query: { - partitionSize: 75, - partitionFactor: 1.2, - }, -}; - -export class InfraServerPlugin { - public config: InfraConfig = DEFAULT_CONFIG; - public libs: InfraBackendLibs | undefined; - - constructor(context: PluginInitializerContext) { - const config$ = context.config.create(); - config$.subscribe(configValue => { - this.config = { - ...DEFAULT_CONFIG, - enabled: configValue.enabled, - query: { - ...DEFAULT_CONFIG.query, - ...configValue.query, - }, - }; - }); - } - - getLibs() { - if (!this.libs) { - throw new Error('libs not set up yet'); - } - return this.libs; - } - - setup(core: CoreSetup, plugins: InfraServerPluginDeps) { - const framework = new KibanaFramework(core, this.config, plugins); - const sources = new InfraSources({ - config: this.config, - }); - const sourceStatus = new InfraSourceStatus( - new InfraElasticsearchSourceStatusAdapter(framework), - { - sources, - } - ); - const snapshot = new InfraSnapshot({ sources, framework }); - const logAnalysis = new InfraLogAnalysis({ framework }); - - // TODO: separate these out individually and do away with "domains" as a temporary group - const domainLibs: InfraDomainLibs = { - fields: new InfraFieldsDomain(new FrameworkFieldsAdapter(framework), { - sources, - }), - logEntries: new InfraLogEntriesDomain(new InfraKibanaLogEntriesAdapter(framework), { - sources, - }), - metrics: new InfraMetricsDomain(new KibanaMetricsAdapter(framework)), - }; - - this.libs = { - configuration: this.config, - framework, - logAnalysis, - snapshot, - sources, - sourceStatus, - ...domainLibs, - }; - - plugins.features.registerFeature(METRICS_FEATURE); - plugins.features.registerFeature(LOGS_FEATURE); - - plugins.home.sampleData.addAppLinksToSampleDataset('logs', [ - { - path: `/app/${APP_ID}#/logs`, - label: logsSampleDataLinkLabel, - icon: 'logsApp', - }, - ]); - - initInfraServer(this.libs); - - // Telemetry - UsageCollector.registerUsageCollector(plugins.usageCollection); - - return { - defineInternalSourceConfiguration(sourceId, sourceProperties) { - sources.defineInternalSourceConfiguration(sourceId, sourceProperties); - }, - } as InfraPluginSetup; - } -} diff --git a/x-pack/legacy/plugins/infra/server/routes/inventory_metadata/index.ts b/x-pack/legacy/plugins/infra/server/routes/inventory_metadata/index.ts deleted file mode 100644 index 33328bdfebaf4..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/inventory_metadata/index.ts +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { schema } from '@kbn/config-schema'; -import Boom from 'boom'; -import { pipe } from 'fp-ts/lib/pipeable'; -import { fold } from 'fp-ts/lib/Either'; -import { identity } from 'fp-ts/lib/function'; -import { InfraBackendLibs } from '../../lib/infra_types'; -import { throwErrors } from '../../../common/runtime_types'; - -import { - InventoryMetaRequestRT, - InventoryMetaResponseRT, -} from '../../../common/http_api/inventory_meta_api'; -import { getCloudMetadata } from './lib/get_cloud_metadata'; - -const escapeHatch = schema.object({}, { allowUnknowns: true }); - -export const initInventoryMetaRoute = (libs: InfraBackendLibs) => { - const { framework } = libs; - - framework.registerRoute( - { - method: 'post', - path: '/api/infra/inventory/meta', - validate: { - body: escapeHatch, - }, - }, - async (requestContext, request, response) => { - try { - const { sourceId, nodeType } = pipe( - InventoryMetaRequestRT.decode(request.body), - fold(throwErrors(Boom.badRequest), identity) - ); - - const { configuration } = await libs.sources.getSourceConfiguration( - requestContext, - sourceId - ); - const awsMetadata = await getCloudMetadata( - framework, - requestContext, - configuration, - nodeType - ); - - return response.ok({ - body: InventoryMetaResponseRT.encode(awsMetadata), - }); - } catch (error) { - return response.internalError({ - body: error.message, - }); - } - } - ); -}; diff --git a/x-pack/legacy/plugins/infra/server/routes/ip_to_hostname.ts b/x-pack/legacy/plugins/infra/server/routes/ip_to_hostname.ts deleted file mode 100644 index 5ad79b3d17a13..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/ip_to_hostname.ts +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -import { first } from 'lodash'; -import { schema } from '@kbn/config-schema'; -import { InfraBackendLibs } from '../lib/infra_types'; - -export interface IpToHostResponse { - host: string; -} - -interface HostDoc { - _source: { - host: { - name: string; - }; - }; -} - -const ipToHostSchema = schema.object({ - ip: schema.string(), - index_pattern: schema.string(), -}); - -export const initIpToHostName = ({ framework }: InfraBackendLibs) => { - const { callWithRequest } = framework; - framework.registerRoute( - { - method: 'post', - path: '/api/infra/ip_to_host', - validate: { - body: ipToHostSchema, - }, - }, - async (requestContext, { body }, response) => { - try { - const params = { - index: body.index_pattern, - body: { - size: 1, - query: { - match: { 'host.ip': body.ip }, - }, - _source: ['host.name'], - }, - }; - const { hits } = await callWithRequest(requestContext, 'search', params); - if (hits.total.value === 0) { - return response.notFound({ - body: { message: 'Host with matching IP address not found.' }, - }); - } - const hostDoc = first(hits.hits); - return response.ok({ body: { host: hostDoc._source.host.name } }); - } catch ({ statusCode = 500, message = 'Unknown error occurred' }) { - return response.customError({ - statusCode, - body: { message }, - }); - } - } - ); -}; diff --git a/x-pack/legacy/plugins/infra/server/routes/log_analysis/index.ts b/x-pack/legacy/plugins/infra/server/routes/log_analysis/index.ts deleted file mode 100644 index 378e32cb3582c..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/log_analysis/index.ts +++ /dev/null @@ -1,8 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export * from './results'; -export * from './validation'; diff --git a/x-pack/legacy/plugins/infra/server/routes/log_analysis/results/index.ts b/x-pack/legacy/plugins/infra/server/routes/log_analysis/results/index.ts deleted file mode 100644 index 1749421277719..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/log_analysis/results/index.ts +++ /dev/null @@ -1,7 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export * from './log_entry_rate'; diff --git a/x-pack/legacy/plugins/infra/server/routes/log_analysis/results/log_entry_rate.ts b/x-pack/legacy/plugins/infra/server/routes/log_analysis/results/log_entry_rate.ts deleted file mode 100644 index 9778311bd8e58..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/log_analysis/results/log_entry_rate.ts +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import Boom from 'boom'; - -import { pipe } from 'fp-ts/lib/pipeable'; -import { fold } from 'fp-ts/lib/Either'; -import { identity } from 'fp-ts/lib/function'; -import { schema } from '@kbn/config-schema'; -import { InfraBackendLibs } from '../../../lib/infra_types'; -import { - LOG_ANALYSIS_GET_LOG_ENTRY_RATE_PATH, - getLogEntryRateRequestPayloadRT, - getLogEntryRateSuccessReponsePayloadRT, - GetLogEntryRateSuccessResponsePayload, -} from '../../../../common/http_api/log_analysis'; -import { throwErrors } from '../../../../common/runtime_types'; -import { NoLogRateResultsIndexError } from '../../../lib/log_analysis'; - -const anyObject = schema.object({}, { allowUnknowns: true }); - -export const initGetLogEntryRateRoute = ({ framework, logAnalysis }: InfraBackendLibs) => { - framework.registerRoute( - { - method: 'post', - path: LOG_ANALYSIS_GET_LOG_ENTRY_RATE_PATH, - validate: { - // short-circuit forced @kbn/config-schema validation so we can do io-ts validation - body: anyObject, - }, - }, - async (requestContext, request, response) => { - try { - const payload = pipe( - getLogEntryRateRequestPayloadRT.decode(request.body), - fold(throwErrors(Boom.badRequest), identity) - ); - - const logEntryRateBuckets = await logAnalysis.getLogEntryRateBuckets( - requestContext, - payload.data.sourceId, - payload.data.timeRange.startTime, - payload.data.timeRange.endTime, - payload.data.bucketDuration, - request - ); - - return response.ok({ - body: getLogEntryRateSuccessReponsePayloadRT.encode({ - data: { - bucketDuration: payload.data.bucketDuration, - histogramBuckets: logEntryRateBuckets, - totalNumberOfLogEntries: getTotalNumberOfLogEntries(logEntryRateBuckets), - }, - }), - }); - } catch (e) { - const { statusCode = 500, message = 'Unknown error occurred' } = e; - if (e instanceof NoLogRateResultsIndexError) { - return response.notFound({ body: { message } }); - } - return response.customError({ - statusCode, - body: { message }, - }); - } - } - ); -}; - -const getTotalNumberOfLogEntries = ( - logEntryRateBuckets: GetLogEntryRateSuccessResponsePayload['data']['histogramBuckets'] -) => { - return logEntryRateBuckets.reduce((sumNumberOfLogEntries, bucket) => { - const sumPartitions = bucket.partitions.reduce((partitionsTotal, partition) => { - return (partitionsTotal += partition.numberOfLogEntries); - }, 0); - return (sumNumberOfLogEntries += sumPartitions); - }, 0); -}; diff --git a/x-pack/legacy/plugins/infra/server/routes/log_analysis/validation/index.ts b/x-pack/legacy/plugins/infra/server/routes/log_analysis/validation/index.ts deleted file mode 100644 index 727faca69298e..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/log_analysis/validation/index.ts +++ /dev/null @@ -1,7 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export * from './indices'; diff --git a/x-pack/legacy/plugins/infra/server/routes/log_analysis/validation/indices.ts b/x-pack/legacy/plugins/infra/server/routes/log_analysis/validation/indices.ts deleted file mode 100644 index fe579124cfe10..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/log_analysis/validation/indices.ts +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -import Boom from 'boom'; - -import { pipe } from 'fp-ts/lib/pipeable'; -import { fold } from 'fp-ts/lib/Either'; -import { identity } from 'fp-ts/lib/function'; -import { schema } from '@kbn/config-schema'; -import { InfraBackendLibs } from '../../../lib/infra_types'; -import { - LOG_ANALYSIS_VALIDATE_INDICES_PATH, - validationIndicesRequestPayloadRT, - validationIndicesResponsePayloadRT, - ValidationIndicesError, -} from '../../../../common/http_api'; - -import { throwErrors } from '../../../../common/runtime_types'; - -const escapeHatch = schema.object({}, { allowUnknowns: true }); - -export const initValidateLogAnalysisIndicesRoute = ({ framework }: InfraBackendLibs) => { - framework.registerRoute( - { - method: 'post', - path: LOG_ANALYSIS_VALIDATE_INDICES_PATH, - validate: { body: escapeHatch }, - }, - async (requestContext, request, response) => { - try { - const payload = pipe( - validationIndicesRequestPayloadRT.decode(request.body), - fold(throwErrors(Boom.badRequest), identity) - ); - - const { fields, indices } = payload.data; - const errors: ValidationIndicesError[] = []; - - // Query each pattern individually, to map correctly the errors - await Promise.all( - indices.map(async index => { - const fieldCaps = await framework.callWithRequest(requestContext, 'fieldCaps', { - allow_no_indices: true, - fields: fields.map(field => field.name), - ignore_unavailable: true, - index, - }); - - if (fieldCaps.indices.length === 0) { - errors.push({ - error: 'INDEX_NOT_FOUND', - index, - }); - return; - } - - fields.forEach(({ name: fieldName, validTypes }) => { - const fieldMetadata = fieldCaps.fields[fieldName]; - - if (fieldMetadata === undefined) { - errors.push({ - error: 'FIELD_NOT_FOUND', - index, - field: fieldName, - }); - } else { - const fieldTypes = Object.keys(fieldMetadata); - - if (!fieldTypes.every(fieldType => validTypes.includes(fieldType))) { - errors.push({ - error: `FIELD_NOT_VALID`, - index, - field: fieldName, - }); - } - } - }); - }) - ); - - return response.ok({ - body: validationIndicesResponsePayloadRT.encode({ data: { errors } }), - }); - } catch (error) { - return response.internalError({ - body: error.message, - }); - } - } - ); -}; diff --git a/x-pack/legacy/plugins/infra/server/routes/log_entries/index.ts b/x-pack/legacy/plugins/infra/server/routes/log_entries/index.ts deleted file mode 100644 index 8fed914c3dc8c..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/log_entries/index.ts +++ /dev/null @@ -1,9 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export * from './item'; -export * from './summary'; -export * from './summary_highlights'; diff --git a/x-pack/legacy/plugins/infra/server/routes/log_entries/summary.ts b/x-pack/legacy/plugins/infra/server/routes/log_entries/summary.ts deleted file mode 100644 index 05643adbe781f..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/log_entries/summary.ts +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import Boom from 'boom'; - -import { pipe } from 'fp-ts/lib/pipeable'; -import { fold } from 'fp-ts/lib/Either'; -import { identity } from 'fp-ts/lib/function'; -import { schema } from '@kbn/config-schema'; - -import { throwErrors } from '../../../common/runtime_types'; - -import { InfraBackendLibs } from '../../lib/infra_types'; -import { - LOG_ENTRIES_SUMMARY_PATH, - logEntriesSummaryRequestRT, - logEntriesSummaryResponseRT, -} from '../../../common/http_api/log_entries'; -import { parseFilterQuery } from '../../utils/serialized_query'; - -const escapeHatch = schema.object({}, { allowUnknowns: true }); - -export const initLogEntriesSummaryRoute = ({ framework, logEntries }: InfraBackendLibs) => { - framework.registerRoute( - { - method: 'post', - path: LOG_ENTRIES_SUMMARY_PATH, - validate: { body: escapeHatch }, - }, - async (requestContext, request, response) => { - try { - const payload = pipe( - logEntriesSummaryRequestRT.decode(request.body), - fold(throwErrors(Boom.badRequest), identity) - ); - const { sourceId, startDate, endDate, bucketSize, query } = payload; - - const buckets = await logEntries.getLogSummaryBucketsBetween( - requestContext, - sourceId, - startDate, - endDate, - bucketSize, - parseFilterQuery(query) - ); - - return response.ok({ - body: logEntriesSummaryResponseRT.encode({ - data: { - start: startDate, - end: endDate, - buckets, - }, - }), - }); - } catch (error) { - return response.internalError({ - body: error.message, - }); - } - } - ); -}; diff --git a/x-pack/legacy/plugins/infra/server/routes/log_entries/summary_highlights.ts b/x-pack/legacy/plugins/infra/server/routes/log_entries/summary_highlights.ts deleted file mode 100644 index ecccd931bb371..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/log_entries/summary_highlights.ts +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import Boom from 'boom'; - -import { pipe } from 'fp-ts/lib/pipeable'; -import { fold } from 'fp-ts/lib/Either'; -import { identity } from 'fp-ts/lib/function'; -import { schema } from '@kbn/config-schema'; - -import { throwErrors } from '../../../common/runtime_types'; - -import { InfraBackendLibs } from '../../lib/infra_types'; -import { - LOG_ENTRIES_SUMMARY_HIGHLIGHTS_PATH, - logEntriesSummaryHighlightsRequestRT, - logEntriesSummaryHighlightsResponseRT, -} from '../../../common/http_api/log_entries'; -import { parseFilterQuery } from '../../utils/serialized_query'; - -const escapeHatch = schema.object({}, { allowUnknowns: true }); - -export const initLogEntriesSummaryHighlightsRoute = ({ - framework, - logEntries, -}: InfraBackendLibs) => { - framework.registerRoute( - { - method: 'post', - path: LOG_ENTRIES_SUMMARY_HIGHLIGHTS_PATH, - validate: { body: escapeHatch }, - }, - async (requestContext, request, response) => { - try { - const payload = pipe( - logEntriesSummaryHighlightsRequestRT.decode(request.body), - fold(throwErrors(Boom.badRequest), identity) - ); - const { sourceId, startDate, endDate, bucketSize, query, highlightTerms } = payload; - - const bucketsPerHighlightTerm = await logEntries.getLogSummaryHighlightBucketsBetween( - requestContext, - sourceId, - startDate, - endDate, - bucketSize, - highlightTerms, - parseFilterQuery(query) - ); - - return response.ok({ - body: logEntriesSummaryHighlightsResponseRT.encode({ - data: bucketsPerHighlightTerm.map(buckets => ({ - start: startDate, - end: endDate, - buckets, - })), - }), - }); - } catch (error) { - return response.internalError({ - body: error.message, - }); - } - } - ); -}; diff --git a/x-pack/legacy/plugins/infra/server/routes/metadata/index.ts b/x-pack/legacy/plugins/infra/server/routes/metadata/index.ts deleted file mode 100644 index a1f6311a103eb..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/metadata/index.ts +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { schema } from '@kbn/config-schema'; -import Boom from 'boom'; -import { get } from 'lodash'; -import { pipe } from 'fp-ts/lib/pipeable'; -import { fold } from 'fp-ts/lib/Either'; -import { identity } from 'fp-ts/lib/function'; -import { - InfraMetadataFeature, - InfraMetadataRequestRT, - InfraMetadataRT, -} from '../../../common/http_api/metadata_api'; -import { InfraBackendLibs } from '../../lib/infra_types'; -import { getMetricMetadata } from './lib/get_metric_metadata'; -import { pickFeatureName } from './lib/pick_feature_name'; -import { hasAPMData } from './lib/has_apm_data'; -import { getCloudMetricsMetadata } from './lib/get_cloud_metric_metadata'; -import { getNodeInfo } from './lib/get_node_info'; -import { throwErrors } from '../../../common/runtime_types'; - -const escapeHatch = schema.object({}, { allowUnknowns: true }); - -export const initMetadataRoute = (libs: InfraBackendLibs) => { - const { framework } = libs; - - framework.registerRoute( - { - method: 'post', - path: '/api/infra/metadata', - validate: { - body: escapeHatch, - }, - }, - async (requestContext, request, response) => { - try { - const { nodeId, nodeType, sourceId } = pipe( - InfraMetadataRequestRT.decode(request.body), - fold(throwErrors(Boom.badRequest), identity) - ); - - const { configuration } = await libs.sources.getSourceConfiguration( - requestContext, - sourceId - ); - const metricsMetadata = await getMetricMetadata( - framework, - requestContext, - configuration, - nodeId, - nodeType - ); - const metricFeatures = pickFeatureName(metricsMetadata.buckets).map( - nameToFeature('metrics') - ); - - const info = await getNodeInfo(framework, requestContext, configuration, nodeId, nodeType); - const cloudInstanceId = get(info, 'cloud.instance.id'); - - const cloudMetricsMetadata = cloudInstanceId - ? await getCloudMetricsMetadata(framework, requestContext, configuration, cloudInstanceId) - : { buckets: [] }; - const cloudMetricsFeatures = pickFeatureName(cloudMetricsMetadata.buckets).map( - nameToFeature('metrics') - ); - const hasAPM = await hasAPMData(framework, requestContext, configuration, nodeId, nodeType); - const apmMetricFeatures = hasAPM ? [{ name: 'apm.transaction', source: 'apm' }] : []; - - const id = metricsMetadata.id; - const name = metricsMetadata.name || id; - return response.ok({ - body: InfraMetadataRT.encode({ - id, - name, - features: [...metricFeatures, ...cloudMetricsFeatures, ...apmMetricFeatures], - info, - }), - }); - } catch (error) { - return response.internalError({ - body: error.message, - }); - } - } - ); -}; - -const nameToFeature = (source: string) => (name: string): InfraMetadataFeature => ({ - name, - source, -}); diff --git a/x-pack/legacy/plugins/infra/server/routes/metadata/lib/get_cloud_metric_metadata.ts b/x-pack/legacy/plugins/infra/server/routes/metadata/lib/get_cloud_metric_metadata.ts deleted file mode 100644 index 75ca3ae3caee2..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/metadata/lib/get_cloud_metric_metadata.ts +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { RequestHandlerContext } from 'src/core/server'; -import { - InfraMetadataAggregationBucket, - InfraMetadataAggregationResponse, -} from '../../../lib/adapters/framework'; -import { KibanaFramework } from '../../../lib/adapters/framework/kibana_framework_adapter'; -import { InfraSourceConfiguration } from '../../../lib/sources'; -import { CLOUD_METRICS_MODULES } from '../../../lib/constants'; - -export interface InfraCloudMetricsAdapterResponse { - buckets: InfraMetadataAggregationBucket[]; -} - -export const getCloudMetricsMetadata = async ( - framework: KibanaFramework, - requestContext: RequestHandlerContext, - sourceConfiguration: InfraSourceConfiguration, - instanceId: string -): Promise => { - const metricQuery = { - allowNoIndices: true, - ignoreUnavailable: true, - index: sourceConfiguration.metricAlias, - body: { - query: { - bool: { - filter: [{ match: { 'cloud.instance.id': instanceId } }], - should: CLOUD_METRICS_MODULES.map(module => ({ match: { 'event.module': module } })), - }, - }, - size: 0, - aggs: { - metrics: { - terms: { - field: 'event.dataset', - size: 1000, - }, - }, - }, - }, - }; - - const response = await framework.callWithRequest< - {}, - { - metrics?: InfraMetadataAggregationResponse; - } - >(requestContext, 'search', metricQuery); - - const buckets = - response.aggregations && response.aggregations.metrics - ? response.aggregations.metrics.buckets - : []; - - return { buckets }; -}; diff --git a/x-pack/legacy/plugins/infra/server/routes/metadata/lib/get_metric_metadata.ts b/x-pack/legacy/plugins/infra/server/routes/metadata/lib/get_metric_metadata.ts deleted file mode 100644 index 191339565b813..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/metadata/lib/get_metric_metadata.ts +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { get } from 'lodash'; -import { RequestHandlerContext } from 'src/core/server'; -import { - InfraMetadataAggregationBucket, - InfraMetadataAggregationResponse, -} from '../../../lib/adapters/framework'; -import { KibanaFramework } from '../../../lib/adapters/framework/kibana_framework_adapter'; -import { InfraSourceConfiguration } from '../../../lib/sources'; -import { findInventoryFields } from '../../../../common/inventory_models'; -import { InventoryItemType } from '../../../../common/inventory_models/types'; - -export interface InfraMetricsAdapterResponse { - id: string; - name?: string; - buckets: InfraMetadataAggregationBucket[]; -} - -export const getMetricMetadata = async ( - framework: KibanaFramework, - requestContext: RequestHandlerContext, - sourceConfiguration: InfraSourceConfiguration, - nodeId: string, - nodeType: InventoryItemType -): Promise => { - const fields = findInventoryFields(nodeType, sourceConfiguration.fields); - const metricQuery = { - allowNoIndices: true, - ignoreUnavailable: true, - index: sourceConfiguration.metricAlias, - body: { - query: { - bool: { - must_not: [{ match: { 'event.dataset': 'aws.ec2' } }], - filter: [ - { - match: { [fields.id]: nodeId }, - }, - ], - }, - }, - size: 0, - aggs: { - nodeName: { - terms: { - field: fields.name, - size: 1, - }, - }, - metrics: { - terms: { - field: 'event.dataset', - size: 1000, - }, - }, - }, - }, - }; - - const response = await framework.callWithRequest< - {}, - { - metrics?: InfraMetadataAggregationResponse; - nodeName?: InfraMetadataAggregationResponse; - } - >(requestContext, 'search', metricQuery); - - const buckets = - response.aggregations && response.aggregations.metrics - ? response.aggregations.metrics.buckets - : []; - - return { - id: nodeId, - name: get(response, ['aggregations', 'nodeName', 'buckets', 0, 'key'], nodeId), - buckets, - }; -}; diff --git a/x-pack/legacy/plugins/infra/server/routes/metadata/lib/get_node_info.ts b/x-pack/legacy/plugins/infra/server/routes/metadata/lib/get_node_info.ts deleted file mode 100644 index 4ff0df30abedd..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/metadata/lib/get_node_info.ts +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { first, set, startsWith } from 'lodash'; -import { RequestHandlerContext } from 'src/core/server'; -import { KibanaFramework } from '../../../lib/adapters/framework/kibana_framework_adapter'; -import { InfraSourceConfiguration } from '../../../lib/sources'; -import { InfraNodeType } from '../../../graphql/types'; -import { InfraMetadataInfo } from '../../../../common/http_api/metadata_api'; -import { getPodNodeName } from './get_pod_node_name'; -import { CLOUD_METRICS_MODULES } from '../../../lib/constants'; -import { findInventoryFields } from '../../../../common/inventory_models'; -import { InventoryItemType } from '../../../../common/inventory_models/types'; - -export const getNodeInfo = async ( - framework: KibanaFramework, - requestContext: RequestHandlerContext, - sourceConfiguration: InfraSourceConfiguration, - nodeId: string, - nodeType: InventoryItemType -): Promise => { - // If the nodeType is a Kubernetes pod then we need to get the node info - // from a host record instead of a pod. This is due to the fact that any host - // can report pod details and we can't rely on the host/cloud information associated - // with the kubernetes.pod.uid. We need to first lookup the `kubernetes.node.name` - // then use that to lookup the host's node information. - if (nodeType === InfraNodeType.pod) { - const kubernetesNodeName = await getPodNodeName( - framework, - requestContext, - sourceConfiguration, - nodeId, - nodeType - ); - if (kubernetesNodeName) { - return getNodeInfo( - framework, - requestContext, - sourceConfiguration, - kubernetesNodeName, - InfraNodeType.host - ); - } - return {}; - } - const fields = findInventoryFields(nodeType, sourceConfiguration.fields); - const params = { - allowNoIndices: true, - ignoreUnavailable: true, - terminateAfter: 1, - index: sourceConfiguration.metricAlias, - body: { - size: 1, - _source: ['host.*', 'cloud.*'], - query: { - bool: { - filter: [{ match: { [fields.id]: nodeId } }], - }, - }, - }, - }; - if (!CLOUD_METRICS_MODULES.some(m => startsWith(nodeType, m))) { - set( - params, - 'body.query.bool.must_not', - CLOUD_METRICS_MODULES.map(module => ({ match: { 'event.module': module } })) - ); - } - const response = await framework.callWithRequest<{ _source: InfraMetadataInfo }, {}>( - requestContext, - 'search', - params - ); - const firstHit = first(response.hits.hits); - if (firstHit) { - return firstHit._source; - } - return {}; -}; diff --git a/x-pack/legacy/plugins/infra/server/routes/metadata/lib/get_pod_node_name.ts b/x-pack/legacy/plugins/infra/server/routes/metadata/lib/get_pod_node_name.ts deleted file mode 100644 index be6e29a794d09..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/metadata/lib/get_pod_node_name.ts +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { first, get } from 'lodash'; -import { RequestHandlerContext } from 'src/core/server'; -import { KibanaFramework } from '../../../lib/adapters/framework/kibana_framework_adapter'; -import { InfraSourceConfiguration } from '../../../lib/sources'; -import { findInventoryFields } from '../../../../common/inventory_models'; - -export const getPodNodeName = async ( - framework: KibanaFramework, - requestContext: RequestHandlerContext, - sourceConfiguration: InfraSourceConfiguration, - nodeId: string, - nodeType: 'host' | 'pod' | 'container' -): Promise => { - const fields = findInventoryFields(nodeType, sourceConfiguration.fields); - const params = { - allowNoIndices: true, - ignoreUnavailable: true, - terminateAfter: 1, - index: sourceConfiguration.metricAlias, - body: { - size: 1, - _source: ['kubernetes.node.name'], - query: { - bool: { - filter: [ - { match: { [fields.id]: nodeId } }, - { exists: { field: `kubernetes.node.name` } }, - ], - }, - }, - }, - }; - const response = await framework.callWithRequest< - { _source: { kubernetes: { node: { name: string } } } }, - {} - >(requestContext, 'search', params); - const firstHit = first(response.hits.hits); - if (firstHit) { - return get(firstHit, '_source.kubernetes.node.name'); - } -}; diff --git a/x-pack/legacy/plugins/infra/server/routes/metadata/lib/has_apm_data.ts b/x-pack/legacy/plugins/infra/server/routes/metadata/lib/has_apm_data.ts deleted file mode 100644 index 9ca0819d74d46..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/metadata/lib/has_apm_data.ts +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { RequestHandlerContext } from 'src/core/server'; - -import { KibanaFramework } from '../../../lib/adapters/framework/kibana_framework_adapter'; -import { InfraSourceConfiguration } from '../../../lib/sources'; -import { findInventoryFields } from '../../../../common/inventory_models'; -import { InventoryItemType } from '../../../../common/inventory_models/types'; - -export const hasAPMData = async ( - framework: KibanaFramework, - requestContext: RequestHandlerContext, - sourceConfiguration: InfraSourceConfiguration, - nodeId: string, - nodeType: InventoryItemType -) => { - const apmIndices = await framework.plugins.apm.getApmIndices( - requestContext.core.savedObjects.client - ); - const apmIndex = apmIndices['apm_oss.transactionIndices'] || 'apm-*'; - const fields = findInventoryFields(nodeType, sourceConfiguration.fields); - - // There is a bug in APM ECS data where host.name is not set. - // This will fixed with: https://github.com/elastic/apm-server/issues/2502 - const nodeFieldName = nodeType === 'host' ? 'host.hostname' : fields.id; - const params = { - allowNoIndices: true, - ignoreUnavailable: true, - terminateAfter: 1, - index: apmIndex, - body: { - size: 0, - query: { - bool: { - filter: [ - { - match: { [nodeFieldName]: nodeId }, - }, - { - exists: { field: 'service.name' }, - }, - { - exists: { field: 'transaction.type' }, - }, - ], - }, - }, - }, - }; - const response = await framework.callWithRequest<{}, {}>(requestContext, 'search', params); - return response.hits.total.value !== 0; -}; diff --git a/x-pack/legacy/plugins/infra/server/routes/metadata/lib/pick_feature_name.ts b/x-pack/legacy/plugins/infra/server/routes/metadata/lib/pick_feature_name.ts deleted file mode 100644 index 8b6bb49d9f645..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/metadata/lib/pick_feature_name.ts +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { InfraMetadataAggregationBucket } from '../../../lib/adapters/framework'; - -export const pickFeatureName = (buckets: InfraMetadataAggregationBucket[]): string[] => { - if (buckets) { - const metadata = buckets.map(bucket => bucket.key); - return metadata; - } else { - return []; - } -}; diff --git a/x-pack/legacy/plugins/infra/server/routes/metrics_explorer/index.ts b/x-pack/legacy/plugins/infra/server/routes/metrics_explorer/index.ts deleted file mode 100644 index 64cdb9318b6e1..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/metrics_explorer/index.ts +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import Boom from 'boom'; -import { pipe } from 'fp-ts/lib/pipeable'; -import { fold } from 'fp-ts/lib/Either'; -import { identity } from 'fp-ts/lib/function'; -import { schema } from '@kbn/config-schema'; -import { InfraBackendLibs } from '../../lib/infra_types'; -import { getGroupings } from './lib/get_groupings'; -import { populateSeriesWithTSVBData } from './lib/populate_series_with_tsvb_data'; -import { metricsExplorerRequestBodyRT, metricsExplorerResponseRT } from '../../../common/http_api'; -import { throwErrors } from '../../../common/runtime_types'; - -const escapeHatch = schema.object({}, { allowUnknowns: true }); - -export const initMetricExplorerRoute = (libs: InfraBackendLibs) => { - const { framework } = libs; - const { callWithRequest } = framework; - - framework.registerRoute( - { - method: 'post', - path: '/api/infra/metrics_explorer', - validate: { - body: escapeHatch, - }, - }, - async (requestContext, request, response) => { - try { - const payload = pipe( - metricsExplorerRequestBodyRT.decode(request.body), - fold(throwErrors(Boom.badRequest), identity) - ); - - const search = (searchOptions: object) => - callWithRequest<{}, Aggregation>(requestContext, 'search', searchOptions); - - // First we get the groupings from a composite aggregation - const groupings = await getGroupings(search, payload); - - // Then we take the results and fill in the data from TSVB with the - // user's custom metrics - const seriesWithMetrics = await Promise.all( - groupings.series.map( - populateSeriesWithTSVBData(request, payload, framework, requestContext) - ) - ); - return response.ok({ - body: metricsExplorerResponseRT.encode({ ...groupings, series: seriesWithMetrics }), - }); - } catch (error) { - return response.internalError({ - body: error.message, - }); - } - } - ); -}; diff --git a/x-pack/legacy/plugins/infra/server/routes/metrics_explorer/lib/create_metrics_model.ts b/x-pack/legacy/plugins/infra/server/routes/metrics_explorer/lib/create_metrics_model.ts deleted file mode 100644 index 9e5fe16d482b2..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/metrics_explorer/lib/create_metrics_model.ts +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { InfraMetricModelMetricType } from '../../../lib/adapters/metrics'; -import { MetricsExplorerRequestBody } from '../types'; -import { InfraMetric } from '../../../graphql/types'; -import { TSVBMetricModel } from '../../../../common/inventory_models/types'; -export const createMetricModel = (options: MetricsExplorerRequestBody): TSVBMetricModel => { - return { - id: InfraMetric.custom, - requires: [], - index_pattern: options.indexPattern, - interval: options.timerange.interval, - time_field: options.timerange.field, - type: 'timeseries', - // Create one series per metric requested. The series.id will be used to identify the metric - // when the responses are processed and combined with the grouping request. - series: options.metrics.map((metric, index) => { - // If the metric is a rate then we need to add TSVB metrics for calculating the derivative - if (metric.aggregation === 'rate') { - const aggType = 'max'; - return { - id: `metric_${index}`, - split_mode: 'everything', - metrics: [ - { - id: `metric_${aggType}_${index}`, - field: metric.field, - type: aggType, - }, - { - id: `metric_deriv_${aggType}_${index}`, - field: `metric_${aggType}_${index}`, - type: 'derivative', - unit: '1s', - }, - { - id: `metric_posonly_deriv_${aggType}_${index}`, - type: 'calculation', - variables: [ - { id: 'var-rate', name: 'rate', field: `metric_deriv_${aggType}_${index}` }, - ], - script: 'params.rate > 0.0 ? params.rate : 0.0', - }, - ], - }; - } - // Create a basic TSVB series with a single metric - const aggregation = metric.aggregation || 'avg'; - - return { - id: `metric_${index}`, - split_mode: 'everything', - metrics: [ - { - field: metric.field, - id: `metric_${aggregation}_${index}`, - type: InfraMetricModelMetricType[aggregation], - }, - ], - }; - }), - }; -}; diff --git a/x-pack/legacy/plugins/infra/server/routes/metrics_explorer/lib/get_groupings.ts b/x-pack/legacy/plugins/infra/server/routes/metrics_explorer/lib/get_groupings.ts deleted file mode 100644 index 7111d3e7f8ca4..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/metrics_explorer/lib/get_groupings.ts +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { isObject, set } from 'lodash'; -import { InfraDatabaseSearchResponse } from '../../../lib/adapters/framework'; -import { MetricsExplorerRequestBody, MetricsExplorerResponse } from '../types'; - -interface GroupingAggregation { - groupingsCount: { - value: number; - }; - groupings: { - after_key?: { - [name: string]: string; - }; - buckets: Array<{ key: { [id: string]: string }; doc_count: number }>; - }; -} - -const EMPTY_RESPONSE = { - series: [{ id: 'ALL', columns: [], rows: [] }], - pageInfo: { total: 0, afterKey: null }, -}; - -export const getGroupings = async ( - search: (options: object) => Promise>, - options: MetricsExplorerRequestBody -): Promise => { - if (!options.groupBy) { - return EMPTY_RESPONSE; - } - const limit = options.limit || 9; - const params = { - allowNoIndices: true, - ignoreUnavailable: true, - index: options.indexPattern, - body: { - size: 0, - query: { - bool: { - should: [ - ...options.metrics - .filter(m => m.field) - .map(m => ({ - exists: { field: m.field }, - })), - ], - filter: [ - { - range: { - [options.timerange.field]: { - gte: options.timerange.from, - lte: options.timerange.to, - format: 'epoch_millis', - }, - }, - }, - ] as object[], - }, - }, - aggs: { - groupingsCount: { - cardinality: { field: options.groupBy }, - }, - groupings: { - composite: { - size: limit, - sources: [{ groupBy: { terms: { field: options.groupBy, order: 'asc' } } }], - }, - }, - }, - }, - }; - - if (params.body.query.bool.should.length !== 0) { - set(params, 'body.query.bool.minimum_should_match', 1); - } - - if (options.afterKey) { - set(params, 'body.aggs.groupings.composite.after', { groupBy: options.afterKey }); - } - - if (options.filterQuery) { - try { - const filterObject = JSON.parse(options.filterQuery); - if (isObject(filterObject)) { - params.body.query.bool.filter.push(filterObject); - } - } catch (err) { - params.body.query.bool.filter.push({ - query_string: { - query: options.filterQuery, - analyze_wildcard: true, - }, - }); - } - } - - const response = await search(params); - if (response.hits.total.value === 0) { - return { ...EMPTY_RESPONSE, series: [] }; - } - if (!response.aggregations) { - throw new Error('Aggregations should be present.'); - } - const { groupings, groupingsCount } = response.aggregations; - const { after_key: afterKey } = groupings; - return { - series: groupings.buckets.map(bucket => { - return { id: bucket.key.groupBy, rows: [], columns: [] }; - }), - pageInfo: { - total: groupingsCount.value, - afterKey: afterKey && groupings.buckets.length === limit ? afterKey.groupBy : null, - }, - }; -}; diff --git a/x-pack/legacy/plugins/infra/server/routes/metrics_explorer/lib/populate_series_with_tsvb_data.ts b/x-pack/legacy/plugins/infra/server/routes/metrics_explorer/lib/populate_series_with_tsvb_data.ts deleted file mode 100644 index 17fc46b41278a..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/metrics_explorer/lib/populate_series_with_tsvb_data.ts +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { union } from 'lodash'; -import { KibanaRequest, RequestHandlerContext } from 'src/core/server'; -import { KibanaFramework } from '../../../lib/adapters/framework/kibana_framework_adapter'; -import { - MetricsExplorerRow, - MetricsExplorerSeries, - MetricsExplorerRequestBody, - MetricsExplorerColumn, -} from '../types'; -import { createMetricModel } from './create_metrics_model'; -import { JsonObject } from '../../../../common/typed_json'; -import { calculateMetricInterval } from '../../../utils/calculate_metric_interval'; - -export const populateSeriesWithTSVBData = ( - request: KibanaRequest, - options: MetricsExplorerRequestBody, - framework: KibanaFramework, - requestContext: RequestHandlerContext -) => async (series: MetricsExplorerSeries) => { - // IF there are no metrics selected then we should return an empty result. - if (options.metrics.length === 0) { - return { - ...series, - columns: [], - rows: [], - }; - } - - // Set the filter for the group by or match everything - const filters: JsonObject[] = options.groupBy - ? [{ match: { [options.groupBy]: series.id } }] - : []; - if (options.filterQuery) { - try { - const filterQuery = JSON.parse(options.filterQuery); - filters.push(filterQuery); - } catch (error) { - filters.push({ - query_string: { - query: options.filterQuery, - analyze_wildcard: true, - }, - }); - } - } - const timerange = { min: options.timerange.from, max: options.timerange.to }; - - // Create the TSVB model based on the request options - const model = createMetricModel(options); - const calculatedInterval = await calculateMetricInterval( - framework, - requestContext, - { - indexPattern: options.indexPattern, - timestampField: options.timerange.field, - timerange: options.timerange, - }, - options.metrics - .filter(metric => metric.field) - .map(metric => { - return metric - .field!.split(/\./) - .slice(0, 2) - .join('.'); - }) - ); - - if (calculatedInterval) { - model.interval = `>=${calculatedInterval}s`; - } - - // Get TSVB results using the model, timerange and filters - const tsvbResults = await framework.makeTSVBRequest(requestContext, model, timerange, filters); - - // If there is no data `custom` will not exist. - if (!tsvbResults.custom) { - return { - ...series, - columns: [], - rows: [], - }; - } - - // Setup the dynamic columns and row attributes depending on if the user is doing a group by - // and multiple metrics - const attributeColumns: MetricsExplorerColumn[] = - options.groupBy != null ? [{ name: 'groupBy', type: 'string' }] : []; - const metricColumns: MetricsExplorerColumn[] = options.metrics.map((m, i) => ({ - name: `metric_${i}`, - type: 'number', - })); - const rowAttributes = options.groupBy != null ? { groupBy: series.id } : {}; - - // To support multiple metrics, there are multiple TSVB series which need to be combined - // into one MetricExplorerRow (Canvas row). This is done by collecting all the timestamps - // across each TSVB series. Then for each timestamp we find the values and create a - // MetricsExplorerRow. - const timestamps = tsvbResults.custom.series.reduce( - (currentTimestamps, tsvbSeries) => - union( - currentTimestamps, - tsvbSeries.data.map(row => row[0]) - ).sort(), - [] as number[] - ); - // Combine the TSVB series for multiple metrics. - const rows = timestamps.map(timestamp => { - return tsvbResults.custom.series.reduce( - (currentRow, tsvbSeries) => { - const matches = tsvbSeries.data.find(d => d[0] === timestamp); - if (matches) { - return { ...currentRow, [tsvbSeries.id]: matches[1] }; - } - return currentRow; - }, - { timestamp, ...rowAttributes } as MetricsExplorerRow - ); - }); - return { - ...series, - rows, - columns: [ - { name: 'timestamp', type: 'date' } as MetricsExplorerColumn, - ...metricColumns, - ...attributeColumns, - ], - }; -}; diff --git a/x-pack/legacy/plugins/infra/server/routes/metrics_explorer/types.ts b/x-pack/legacy/plugins/infra/server/routes/metrics_explorer/types.ts deleted file mode 100644 index f4c5e26c5c6d1..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/metrics_explorer/types.ts +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import * as rt from 'io-ts'; -import { - metricsExplorerMetricRT, - metricsExplorerPageInfoRT, - metricsExplorerColumnRT, - metricsExplorerRowRT, - metricsExplorerSeriesRT, - metricsExplorerRequestBodyRT, - metricsExplorerResponseRT, - metricsExplorerAggregationRT, - metricsExplorerColumnTypeRT, -} from '../../../common/http_api'; - -export type MetricsExplorerAggregation = rt.TypeOf; - -export type MetricsExplorerColumnType = rt.TypeOf; - -export type MetricsExplorerMetric = rt.TypeOf; - -export type MetricsExplorerPageInfo = rt.TypeOf; - -export type MetricsExplorerColumn = rt.TypeOf; - -export type MetricsExplorerRow = rt.TypeOf; - -export type MetricsExplorerSeries = rt.TypeOf; - -export type MetricsExplorerRequestBody = rt.TypeOf; - -export type MetricsExplorerResponse = rt.TypeOf; diff --git a/x-pack/legacy/plugins/infra/server/routes/node_details/index.ts b/x-pack/legacy/plugins/infra/server/routes/node_details/index.ts deleted file mode 100644 index a9419cd27e684..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/node_details/index.ts +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -import Boom from 'boom'; -import { schema } from '@kbn/config-schema'; -import { pipe } from 'fp-ts/lib/pipeable'; -import { fold } from 'fp-ts/lib/Either'; -import { identity } from 'fp-ts/lib/function'; -import { InfraBackendLibs } from '../../lib/infra_types'; -import { UsageCollector } from '../../usage/usage_collector'; -import { InfraMetricsRequestOptions } from '../../lib/adapters/metrics'; -import { InfraNodeType, InfraMetric } from '../../graphql/types'; -import { - NodeDetailsRequestRT, - NodeDetailsMetricDataResponseRT, -} from '../../../common/http_api/node_details_api'; -import { throwErrors } from '../../../common/runtime_types'; - -const escapeHatch = schema.object({}, { allowUnknowns: true }); - -export const initNodeDetailsRoute = (libs: InfraBackendLibs) => { - const { framework } = libs; - - framework.registerRoute( - { - method: 'post', - path: '/api/metrics/node_details', - validate: { - body: escapeHatch, - }, - }, - async (requestContext, request, response) => { - try { - const { nodeId, cloudId, nodeType, metrics, timerange, sourceId } = pipe( - NodeDetailsRequestRT.decode(request.body), - fold(throwErrors(Boom.badRequest), identity) - ); - const source = await libs.sources.getSourceConfiguration(requestContext, sourceId); - - UsageCollector.countNode(nodeType); - - const options: InfraMetricsRequestOptions = { - nodeIds: { - nodeId, - cloudId, - }, - nodeType: nodeType as InfraNodeType, - sourceConfiguration: source.configuration, - metrics: metrics as InfraMetric[], - timerange, - }; - return response.ok({ - body: NodeDetailsMetricDataResponseRT.encode({ - metrics: await libs.metrics.getMetrics(requestContext, options, request), - }), - }); - } catch (error) { - return response.internalError({ - body: error.message, - }); - } - } - ); -}; diff --git a/x-pack/legacy/plugins/infra/server/routes/snapshot/index.ts b/x-pack/legacy/plugins/infra/server/routes/snapshot/index.ts deleted file mode 100644 index ba7f52e9ec1e7..0000000000000 --- a/x-pack/legacy/plugins/infra/server/routes/snapshot/index.ts +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -import Boom from 'boom'; -import { schema } from '@kbn/config-schema'; -import { pipe } from 'fp-ts/lib/pipeable'; -import { fold } from 'fp-ts/lib/Either'; -import { identity } from 'fp-ts/lib/function'; -import { InfraBackendLibs } from '../../lib/infra_types'; -import { UsageCollector } from '../../usage/usage_collector'; -import { parseFilterQuery } from '../../utils/serialized_query'; -import { InfraNodeType, InfraSnapshotMetricInput } from '../../../public/graphql/types'; -import { SnapshotRequestRT, SnapshotNodeResponseRT } from '../../../common/http_api/snapshot_api'; -import { throwErrors } from '../../../common/runtime_types'; -import { InfraSnapshotRequestOptions } from '../../lib/snapshot/types'; - -const escapeHatch = schema.object({}, { allowUnknowns: true }); - -export const initSnapshotRoute = (libs: InfraBackendLibs) => { - const { framework } = libs; - - framework.registerRoute( - { - method: 'post', - path: '/api/metrics/snapshot', - validate: { - body: escapeHatch, - }, - }, - async (requestContext, request, response) => { - try { - const { - filterQuery, - nodeType, - groupBy, - sourceId, - metric, - timerange, - accountId, - region, - } = pipe( - SnapshotRequestRT.decode(request.body), - fold(throwErrors(Boom.badRequest), identity) - ); - const source = await libs.sources.getSourceConfiguration(requestContext, sourceId); - UsageCollector.countNode(nodeType); - const options: InfraSnapshotRequestOptions = { - filterQuery: parseFilterQuery(filterQuery), - accountId, - region, - // TODO: Use common infra metric and replace graphql type - nodeType: nodeType as InfraNodeType, - groupBy, - sourceConfiguration: source.configuration, - // TODO: Use common infra metric and replace graphql type - metric: metric as InfraSnapshotMetricInput, - timerange, - }; - const nodesWithInterval = await libs.snapshot.getNodes(requestContext, options); - return response.ok({ - body: SnapshotNodeResponseRT.encode(nodesWithInterval), - }); - } catch (error) { - return response.internalError({ - body: error.message, - }); - } - } - ); -}; diff --git a/x-pack/legacy/plugins/infra/server/saved_objects.ts b/x-pack/legacy/plugins/infra/server/saved_objects.ts deleted file mode 100644 index 2e554300b0ecb..0000000000000 --- a/x-pack/legacy/plugins/infra/server/saved_objects.ts +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { infraSourceConfigurationSavedObjectMappings } from './lib/sources'; -import { metricsExplorerViewSavedObjectMappings } from '../common/saved_objects/metrics_explorer_view'; -import { inventoryViewSavedObjectMappings } from '../common/saved_objects/inventory_view'; - -export const savedObjectMappings = { - ...infraSourceConfigurationSavedObjectMappings, - ...metricsExplorerViewSavedObjectMappings, - ...inventoryViewSavedObjectMappings, -}; diff --git a/x-pack/legacy/plugins/infra/server/usage/usage_collector.ts b/x-pack/legacy/plugins/infra/server/usage/usage_collector.ts deleted file mode 100644 index 60b9372b135df..0000000000000 --- a/x-pack/legacy/plugins/infra/server/usage/usage_collector.ts +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { UsageCollectionSetup } from 'src/plugins/usage_collection/server'; -import { InfraNodeType } from '../graphql/types'; -import { InventoryItemType } from '../../common/inventory_models/types'; - -const KIBANA_REPORTING_TYPE = 'infraops'; - -interface InfraopsSum { - infraopsHosts: number; - infraopsDocker: number; - infraopsKubernetes: number; - logs: number; -} - -export class UsageCollector { - public static registerUsageCollector(usageCollection: UsageCollectionSetup): void { - const collector = UsageCollector.getUsageCollector(usageCollection); - usageCollection.registerCollector(collector); - } - - public static getUsageCollector(usageCollection: UsageCollectionSetup) { - return usageCollection.makeUsageCollector({ - type: KIBANA_REPORTING_TYPE, - isReady: () => true, - fetch: async () => { - return this.getReport(); - }, - }); - } - - public static countNode(nodeType: InventoryItemType) { - const bucket = this.getBucket(); - this.maybeInitializeBucket(bucket); - - switch (nodeType) { - case InfraNodeType.pod: - this.counters[bucket].infraopsKubernetes += 1; - break; - case InfraNodeType.container: - this.counters[bucket].infraopsDocker += 1; - break; - default: - this.counters[bucket].infraopsHosts += 1; - } - } - - public static countLogs() { - const bucket = this.getBucket(); - this.maybeInitializeBucket(bucket); - this.counters[bucket].logs += 1; - } - - private static counters: any = {}; - private static BUCKET_SIZE = 3600; // seconds in an hour - private static BUCKET_NUMBER = 24; // report the last 24 hours - - private static getBucket() { - const now = Math.floor(Date.now() / 1000); - return now - (now % this.BUCKET_SIZE); - } - - private static maybeInitializeBucket(bucket: any) { - if (!this.counters[bucket]) { - this.counters[bucket] = { - infraopsHosts: 0, - infraopsDocker: 0, - infraopsKubernetes: 0, - logs: 0, - }; - } - } - - private static getReport() { - const keys = Object.keys(this.counters); - - // only keep the newest BUCKET_NUMBER buckets - const cutoff = this.getBucket() - this.BUCKET_SIZE * (this.BUCKET_NUMBER - 1); - keys.forEach(key => { - if (parseInt(key, 10) < cutoff) { - delete this.counters[key]; - } - }); - - // all remaining buckets are current - const sums = Object.keys(this.counters).reduce( - (a: InfraopsSum, b: any) => { - const key = parseInt(b, 10); - return { - infraopsHosts: a.infraopsHosts + this.counters[key].infraopsHosts, - infraopsDocker: a.infraopsDocker + this.counters[key].infraopsDocker, - infraopsKubernetes: a.infraopsKubernetes + this.counters[key].infraopsKubernetes, - logs: a.logs + this.counters[key].logs, - }; - }, - { - infraopsHosts: 0, - infraopsDocker: 0, - infraopsKubernetes: 0, - logs: 0, - } - ); - - return { - last_24_hours: { - hits: { - infraops_hosts: sums.infraopsHosts, - infraops_docker: sums.infraopsDocker, - infraops_kubernetes: sums.infraopsKubernetes, - logs: sums.logs, - }, - }, - }; - } -} diff --git a/x-pack/legacy/plugins/infra/server/utils/README.md b/x-pack/legacy/plugins/infra/server/utils/README.md deleted file mode 100644 index 8a6a27aa29867..0000000000000 --- a/x-pack/legacy/plugins/infra/server/utils/README.md +++ /dev/null @@ -1 +0,0 @@ -Utils should be data processing functions and other tools.... all in all utils is basicly everything that is not an adaptor, or presenter and yet too much to put in a lib. \ No newline at end of file diff --git a/x-pack/legacy/plugins/infra/server/utils/calculate_metric_interval.ts b/x-pack/legacy/plugins/infra/server/utils/calculate_metric_interval.ts deleted file mode 100644 index 586193a3c242d..0000000000000 --- a/x-pack/legacy/plugins/infra/server/utils/calculate_metric_interval.ts +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { RequestHandlerContext } from 'src/core/server'; -import { InfraNodeType } from '../graphql/types'; -import { findInventoryModel } from '../../common/inventory_models'; -import { KibanaFramework } from '../lib/adapters/framework/kibana_framework_adapter'; - -interface Options { - indexPattern: string; - timestampField: string; - timerange: { - from: number; - to: number; - }; -} - -/** - * Look at the data from metricbeat and get the max period for a given timerange. - * This is useful for visualizing metric modules like s3 that only send metrics once per day. - */ -export const calculateMetricInterval = async ( - framework: KibanaFramework, - requestContext: RequestHandlerContext, - options: Options, - modules?: string[], - nodeType?: InfraNodeType // TODO: check that this type still makes sense -) => { - let from = options.timerange.from; - if (nodeType) { - const inventoryModel = findInventoryModel(nodeType); - from = options.timerange.to - inventoryModel.metrics.defaultTimeRangeInSeconds * 1000; - } - const query = { - allowNoIndices: true, - index: options.indexPattern, - ignoreUnavailable: true, - body: { - query: { - bool: { - filter: [ - { - range: { - [options.timestampField]: { - gte: from, - lte: options.timerange.to, - format: 'epoch_millis', - }, - }, - }, - ], - }, - }, - size: 0, - aggs: { - modules: { - terms: { - field: 'event.dataset', - include: modules, - }, - aggs: { - period: { - max: { - field: 'metricset.period', - }, - }, - }, - }, - }, - }, - }; - - const resp = await framework.callWithRequest<{}, PeriodAggregationData>( - requestContext, - 'search', - query - ); - - // if ES doesn't return an aggregations key, something went seriously wrong. - if (!resp.aggregations) { - return; - } - - const intervals = resp.aggregations.modules.buckets.map(a => a.period.value).filter(v => !!v); - if (!intervals.length) { - return; - } - - return Math.max(...intervals) / 1000; -}; - -interface PeriodAggregationData { - modules: { - buckets: Array<{ - key: string; - doc_count: number; - period: { - value: number; - }; - }>; - }; -} diff --git a/x-pack/legacy/plugins/infra/server/utils/create_afterkey_handler.ts b/x-pack/legacy/plugins/infra/server/utils/create_afterkey_handler.ts deleted file mode 100644 index 559fba0799987..0000000000000 --- a/x-pack/legacy/plugins/infra/server/utils/create_afterkey_handler.ts +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { set } from 'lodash'; -import { InfraDatabaseSearchResponse } from '../lib/adapters/framework'; - -export const createAfterKeyHandler = ( - optionsAfterKeyPath: string | string[], - afterKeySelector: (input: InfraDatabaseSearchResponse) => any -) => (options: Options, response: InfraDatabaseSearchResponse): Options => { - if (!response.aggregations) { - return options; - } - const newOptions = { ...options }; - const afterKey = afterKeySelector(response); - set(newOptions, optionsAfterKeyPath, afterKey); - return newOptions; -}; diff --git a/x-pack/legacy/plugins/infra/server/utils/get_all_composite_data.ts b/x-pack/legacy/plugins/infra/server/utils/get_all_composite_data.ts deleted file mode 100644 index c7ff1b077f685..0000000000000 --- a/x-pack/legacy/plugins/infra/server/utils/get_all_composite_data.ts +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { RequestHandlerContext } from 'src/core/server'; -import { KibanaFramework } from '../lib/adapters/framework/kibana_framework_adapter'; -import { InfraDatabaseSearchResponse } from '../lib/adapters/framework'; - -export const getAllCompositeData = async < - Aggregation = undefined, - Bucket = {}, - Options extends object = {} ->( - framework: KibanaFramework, - requestContext: RequestHandlerContext, - options: Options, - bucketSelector: (response: InfraDatabaseSearchResponse<{}, Aggregation>) => Bucket[], - onAfterKey: (options: Options, response: InfraDatabaseSearchResponse<{}, Aggregation>) => Options, - previousBuckets: Bucket[] = [] -): Promise => { - const response = await framework.callWithRequest<{}, Aggregation>( - requestContext, - 'search', - options - ); - - // Nothing available, return the previous buckets. - if (response.hits.total.value === 0) { - return previousBuckets; - } - - // if ES doesn't return an aggregations key, something went seriously wrong. - if (!response.aggregations) { - throw new Error('Whoops!, `aggregations` key must always be returned.'); - } - - const currentBuckets = bucketSelector(response); - - // if there are no currentBuckets then we are finished paginating through the results - if (currentBuckets.length === 0) { - return previousBuckets; - } - - // There is possibly more data, concat previous and current buckets and call ourselves recursively. - const newOptions = onAfterKey(options, response); - return getAllCompositeData( - framework, - requestContext, - newOptions, - bucketSelector, - onAfterKey, - previousBuckets.concat(currentBuckets) - ); -}; diff --git a/x-pack/legacy/plugins/infra/server/utils/get_interval_in_seconds.ts b/x-pack/legacy/plugins/infra/server/utils/get_interval_in_seconds.ts deleted file mode 100644 index 297e5828956af..0000000000000 --- a/x-pack/legacy/plugins/infra/server/utils/get_interval_in_seconds.ts +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -const intervalUnits = ['y', 'M', 'w', 'd', 'h', 'm', 's', 'ms']; -const INTERVAL_STRING_RE = new RegExp('^([0-9\\.]*)\\s*(' + intervalUnits.join('|') + ')$'); - -interface UnitsToSeconds { - [unit: string]: number; -} - -const units: UnitsToSeconds = { - ms: 0.001, - s: 1, - m: 60, - h: 3600, - d: 86400, - w: 86400 * 7, - M: 86400 * 30, - y: 86400 * 356, -}; - -export const getIntervalInSeconds = (interval: string): number => { - const matches = interval.match(INTERVAL_STRING_RE); - if (matches) { - return parseFloat(matches[1]) * units[matches[2]]; - } - throw new Error('Invalid interval string format.'); -}; diff --git a/x-pack/legacy/plugins/infra/server/utils/serialized_query.ts b/x-pack/legacy/plugins/infra/server/utils/serialized_query.ts deleted file mode 100644 index 932df847e65d0..0000000000000 --- a/x-pack/legacy/plugins/infra/server/utils/serialized_query.ts +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { UserInputError } from 'apollo-server-errors'; - -import { JsonObject } from '../../common/typed_json'; - -export const parseFilterQuery = ( - filterQuery: string | null | undefined -): JsonObject | undefined => { - try { - if (filterQuery) { - const parsedFilterQuery = JSON.parse(filterQuery); - if ( - !parsedFilterQuery || - ['string', 'number', 'boolean'].includes(typeof parsedFilterQuery) || - Array.isArray(parsedFilterQuery) - ) { - throw new Error('expected value to be an object'); - } - return parsedFilterQuery; - } else { - return undefined; - } - } catch (err) { - throw new UserInputError(`Failed to parse query: ${err}`, { - query: filterQuery, - originalError: err, - }); - } -}; diff --git a/x-pack/legacy/plugins/infra/server/utils/typed_elasticsearch_mappings.ts b/x-pack/legacy/plugins/infra/server/utils/typed_elasticsearch_mappings.ts deleted file mode 100644 index f18b9f3de55c9..0000000000000 --- a/x-pack/legacy/plugins/infra/server/utils/typed_elasticsearch_mappings.ts +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -export type ElasticsearchMappingOf = Type extends string - ? ElasticsearchStringFieldMapping - : Type extends number - ? ElasticsearchNumberFieldMapping - : Type extends boolean - ? ElasticsearchBooleanFieldMapping - : Type extends object[] - ? ElasticsearchNestedFieldMapping - : Type extends {} - ? ElasticsearchObjectFieldMapping - : never; - -export interface ElasticsearchStringFieldMapping { - type: 'keyword' | 'text'; -} - -export interface ElasticsearchBooleanFieldMapping { - type: 'boolean'; -} - -export interface ElasticsearchNumberFieldMapping { - type: - | 'long' - | 'integer' - | 'short' - | 'byte' - | 'double' - | 'float' - | 'half_float' - | 'scaled_float' - | 'date'; -} - -export interface ElasticsearchNestedFieldMapping { - type?: 'nested'; - properties: { [K in keyof Obj[0]]-?: ElasticsearchMappingOf }; -} - -export interface ElasticsearchObjectFieldMapping { - type?: 'object'; - properties: { [K in keyof Obj]-?: ElasticsearchMappingOf }; -} diff --git a/x-pack/legacy/plugins/infra/server/utils/typed_resolvers.ts b/x-pack/legacy/plugins/infra/server/utils/typed_resolvers.ts deleted file mode 100644 index d5f2d00abd504..0000000000000 --- a/x-pack/legacy/plugins/infra/server/utils/typed_resolvers.ts +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { Resolver } from '../graphql/types'; - -type ResolverResult = R | Promise; - -type InfraResolverResult = - | Promise - | Promise<{ [P in keyof R]: () => Promise }> - | { [P in keyof R]: () => Promise } - | { [P in keyof R]: () => R[P] } - | R; - -export type ResultOf = Resolver_ extends Resolver> - ? Result - : never; - -export type SubsetResolverWithFields = R extends Resolver< - Array, - infer ParentInArray, - infer ContextInArray, - infer ArgsInArray -> - ? Resolver< - Array>>, - ParentInArray, - ContextInArray, - ArgsInArray - > - : R extends Resolver - ? Resolver>, Parent, Context, Args> - : never; - -export type SubsetResolverWithoutFields = R extends Resolver< - Array, - infer ParentInArray, - infer ContextInArray, - infer ArgsInArray -> - ? Resolver< - Array>>, - ParentInArray, - ContextInArray, - ArgsInArray - > - : R extends Resolver - ? Resolver>, Parent, Context, Args> - : never; - -export type ResolverWithParent = Resolver_ extends Resolver< - infer Result, - any, - infer Context, - infer Args -> - ? Resolver - : never; - -export type InfraResolver = Resolver< - InfraResolverResult, - Parent, - Context, - Args ->; - -export type InfraResolverOf = Resolver_ extends Resolver< - ResolverResult, - never, - infer ContextWithNeverParent, - infer ArgsWithNeverParent -> - ? InfraResolver - : Resolver_ extends Resolver< - ResolverResult, - infer Parent, - infer Context, - infer Args - > - ? InfraResolver - : never; - -export type InfraResolverWithFields = InfraResolverOf< - SubsetResolverWithFields ->; - -export type InfraResolverWithoutFields = InfraResolverOf< - SubsetResolverWithoutFields ->; - -export type ChildResolverOf = ResolverWithParent< - Resolver_, - ResultOf ->; diff --git a/x-pack/legacy/plugins/monitoring/index.js b/x-pack/legacy/plugins/monitoring/index.js index 3d98b11c2045b..1a7cbac9a167b 100644 --- a/x-pack/legacy/plugins/monitoring/index.js +++ b/x-pack/legacy/plugins/monitoring/index.js @@ -84,6 +84,6 @@ export const monitoring = kibana => uiExports: getUiExports(), postInit(server) { const serverConfig = server.config(); - initInfraSource(serverConfig, server.plugins.infra); + initInfraSource(serverConfig, server.newPlatform.setup.plugins.infra); }, }); diff --git a/x-pack/plugins/infra/common/time/time_key.ts b/x-pack/plugins/infra/common/time/time_key.ts index 117cd38314de0..e4f41615eb484 100644 --- a/x-pack/plugins/infra/common/time/time_key.ts +++ b/x-pack/plugins/infra/common/time/time_key.ts @@ -5,7 +5,7 @@ */ import { ascending, bisector } from 'd3-array'; -import pick from 'lodash/fp/pick'; +import { pick } from 'lodash'; export interface TimeKey { time: number; @@ -27,7 +27,7 @@ export const isTimeKey = (value: any): value is TimeKey => typeof value.tiebreaker === 'number'; export const pickTimeKey = (value: T): TimeKey => - pick(['time', 'tiebreaker'], value); + pick(value, ['time', 'tiebreaker']); export function compareTimeKeys( firstKey: TimeKey, diff --git a/x-pack/plugins/infra/kibana.json b/x-pack/plugins/infra/kibana.json index ec5420a4d28d5..bf7146c9605c5 100644 --- a/x-pack/plugins/infra/kibana.json +++ b/x-pack/plugins/infra/kibana.json @@ -2,5 +2,7 @@ "id": "infra", "version": "8.0.0", "kibanaVersion": "kibana", - "server": true + "requiredPlugins": ["features", "apm", "usageCollection", "spaces"], + "server": true, + "ui": false } diff --git a/x-pack/plugins/infra/server/lib/infra_types.ts b/x-pack/plugins/infra/server/lib/infra_types.ts index 46d32885600df..fc92dacde4f7b 100644 --- a/x-pack/plugins/infra/server/lib/infra_types.ts +++ b/x-pack/plugins/infra/server/lib/infra_types.ts @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -import { InfraSourceConfiguration } from '../../public/graphql/types'; +import { InfraSourceConfiguration } from '../../common/graphql/types'; import { InfraFieldsDomain } from './domains/fields_domain'; import { InfraLogEntriesDomain } from './domains/log_entries_domain'; import { InfraMetricsDomain } from './domains/metrics_domain'; @@ -12,7 +12,7 @@ import { InfraLogAnalysis } from './log_analysis/log_analysis'; import { InfraSnapshot } from './snapshot'; import { InfraSources } from './sources'; import { InfraSourceStatus } from './source_status'; -import { InfraConfig } from '../../../../../plugins/infra/server'; +import { InfraConfig } from '../plugin'; import { KibanaFramework } from './adapters/framework/kibana_framework_adapter'; // NP_TODO: We shouldn't need this context anymore but I am diff --git a/x-pack/plugins/infra/server/lib/log_analysis/log_analysis.ts b/x-pack/plugins/infra/server/lib/log_analysis/log_analysis.ts index fac49a7980f26..f46713bb300a8 100644 --- a/x-pack/plugins/infra/server/lib/log_analysis/log_analysis.ts +++ b/x-pack/plugins/infra/server/lib/log_analysis/log_analysis.ts @@ -17,7 +17,7 @@ import { LogRateModelPlotBucket, CompositeTimestampPartitionKey, } from './queries'; -import { RequestHandlerContext, KibanaRequest } from '../../../../../../../src/core/server'; +import { RequestHandlerContext, KibanaRequest } from '../../../../../../src/core/server'; const COMPOSITE_AGGREGATION_BATCH_SIZE = 1000; From ca4d97df27c915cc2f2afcdd42dbf50b3f9b8e56 Mon Sep 17 00:00:00 2001 From: Jason Rhodes Date: Fri, 3 Jan 2020 10:56:02 -0500 Subject: [PATCH 6/9] Adds metrics plugin dep back after cherry picking needed changes --- x-pack/plugins/infra/kibana.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugins/infra/kibana.json b/x-pack/plugins/infra/kibana.json index bf7146c9605c5..98385e7c50f4a 100644 --- a/x-pack/plugins/infra/kibana.json +++ b/x-pack/plugins/infra/kibana.json @@ -2,7 +2,7 @@ "id": "infra", "version": "8.0.0", "kibanaVersion": "kibana", - "requiredPlugins": ["features", "apm", "usageCollection", "spaces"], + "requiredPlugins": ["features", "apm", "usageCollection", "spaces", "metrics"], "server": true, "ui": false } From 9c7aa22fcf8f0003ced9756d72260eaa9bf338b8 Mon Sep 17 00:00:00 2001 From: Jason Rhodes Date: Thu, 9 Jan 2020 08:55:33 -0500 Subject: [PATCH 7/9] Remove unused saved objects mapping -- need to replace before merge --- .../common/saved_objects/inventory_view.ts | 87 ----------------- .../saved_objects/metrics_explorer_view.ts | 94 ------------------- x-pack/plugins/infra/server/saved_objects.ts | 15 --- 3 files changed, 196 deletions(-) delete mode 100644 x-pack/plugins/infra/common/saved_objects/inventory_view.ts delete mode 100644 x-pack/plugins/infra/common/saved_objects/metrics_explorer_view.ts delete mode 100644 x-pack/plugins/infra/server/saved_objects.ts diff --git a/x-pack/plugins/infra/common/saved_objects/inventory_view.ts b/x-pack/plugins/infra/common/saved_objects/inventory_view.ts deleted file mode 100644 index c86be102f85a8..0000000000000 --- a/x-pack/plugins/infra/common/saved_objects/inventory_view.ts +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { ElasticsearchMappingOf } from '../../server/utils/typed_elasticsearch_mappings'; -import { WaffleViewState } from '../../public/containers/waffle/with_waffle_view_state'; - -export const inventoryViewSavedObjectType = 'inventory-view'; -import { SavedViewSavedObject } from '../../public/hooks/use_saved_view'; - -export const inventoryViewSavedObjectMappings: { - [inventoryViewSavedObjectType]: ElasticsearchMappingOf>; -} = { - [inventoryViewSavedObjectType]: { - properties: { - name: { - type: 'keyword', - }, - metric: { - properties: { - type: { - type: 'keyword', - }, - }, - }, - groupBy: { - type: 'nested', - properties: { - label: { - type: 'keyword', - }, - field: { - type: 'keyword', - }, - }, - }, - nodeType: { - type: 'keyword', - }, - view: { - type: 'keyword', - }, - customOptions: { - type: 'nested', - properties: { - text: { - type: 'keyword', - }, - field: { - type: 'keyword', - }, - }, - }, - boundsOverride: { - properties: { - max: { - type: 'integer', - }, - min: { - type: 'integer', - }, - }, - }, - autoBounds: { - type: 'boolean', - }, - time: { - type: 'integer', - }, - autoReload: { - type: 'boolean', - }, - filterQuery: { - properties: { - kind: { - type: 'keyword', - }, - expression: { - type: 'keyword', - }, - }, - }, - }, - }, -}; diff --git a/x-pack/plugins/infra/common/saved_objects/metrics_explorer_view.ts b/x-pack/plugins/infra/common/saved_objects/metrics_explorer_view.ts deleted file mode 100644 index e4ec71907eaa8..0000000000000 --- a/x-pack/plugins/infra/common/saved_objects/metrics_explorer_view.ts +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { ElasticsearchMappingOf } from '../../server/utils/typed_elasticsearch_mappings'; -import { - MetricsExplorerOptions, - MetricsExplorerChartOptions, - MetricsExplorerTimeOptions, -} from '../../public/containers/metrics_explorer/use_metrics_explorer_options'; -import { SavedViewSavedObject } from '../../public/hooks/use_saved_view'; - -interface MetricsExplorerSavedView { - options: MetricsExplorerOptions; - chartOptions: MetricsExplorerChartOptions; - currentTimerange: MetricsExplorerTimeOptions; -} - -export const metricsExplorerViewSavedObjectType = 'metrics-explorer-view'; - -export const metricsExplorerViewSavedObjectMappings: { - [metricsExplorerViewSavedObjectType]: ElasticsearchMappingOf< - SavedViewSavedObject - >; -} = { - [metricsExplorerViewSavedObjectType]: { - properties: { - name: { - type: 'keyword', - }, - options: { - properties: { - metrics: { - type: 'nested', - properties: { - aggregation: { - type: 'keyword', - }, - field: { - type: 'keyword', - }, - color: { - type: 'keyword', - }, - label: { - type: 'keyword', - }, - }, - }, - limit: { - type: 'integer', - }, - groupBy: { - type: 'keyword', - }, - filterQuery: { - type: 'keyword', - }, - aggregation: { - type: 'keyword', - }, - }, - }, - chartOptions: { - properties: { - type: { - type: 'keyword', - }, - yAxisMode: { - type: 'keyword', - }, - stack: { - type: 'boolean', - }, - }, - }, - currentTimerange: { - properties: { - from: { - type: 'keyword', - }, - to: { - type: 'keyword', - }, - interval: { - type: 'keyword', - }, - }, - }, - }, - }, -}; diff --git a/x-pack/plugins/infra/server/saved_objects.ts b/x-pack/plugins/infra/server/saved_objects.ts deleted file mode 100644 index 2e554300b0ecb..0000000000000 --- a/x-pack/plugins/infra/server/saved_objects.ts +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -import { infraSourceConfigurationSavedObjectMappings } from './lib/sources'; -import { metricsExplorerViewSavedObjectMappings } from '../common/saved_objects/metrics_explorer_view'; -import { inventoryViewSavedObjectMappings } from '../common/saved_objects/inventory_view'; - -export const savedObjectMappings = { - ...infraSourceConfigurationSavedObjectMappings, - ...metricsExplorerViewSavedObjectMappings, - ...inventoryViewSavedObjectMappings, -}; From 4d944a9a71b79c2d0e604e73bef17732caac8acb Mon Sep 17 00:00:00 2001 From: Jason Rhodes Date: Thu, 9 Jan 2020 08:56:45 -0500 Subject: [PATCH 8/9] Implements destructured imports for lodash methods Co-Authored-By: John Schulz --- .../lib/adapters/log_entries/kibana_log_entries_adapter.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugins/infra/server/lib/adapters/log_entries/kibana_log_entries_adapter.ts b/x-pack/plugins/infra/server/lib/adapters/log_entries/kibana_log_entries_adapter.ts index f26a6ab22e0a9..79fa19cf7241b 100644 --- a/x-pack/plugins/infra/server/lib/adapters/log_entries/kibana_log_entries_adapter.ts +++ b/x-pack/plugins/infra/server/lib/adapters/log_entries/kibana_log_entries_adapter.ts @@ -8,7 +8,7 @@ import { timeMilliseconds } from 'd3-time'; import * as runtimeTypes from 'io-ts'; -import _ from 'lodash'; +import { first, get, has, zip } from 'lodash'; import { pipe } from 'fp-ts/lib/pipeable'; import { map, fold } from 'fp-ts/lib/Either'; import { identity, constant } from 'fp-ts/lib/function'; From b3f8308590526ce231b1c6d5a35d24e73f855caa Mon Sep 17 00:00:00 2001 From: Jason Rhodes Date: Thu, 9 Jan 2020 15:29:08 -0500 Subject: [PATCH 9/9] Fixes lodash destructure change so methods are correctly called in code --- .../adapters/log_entries/kibana_log_entries_adapter.ts | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/x-pack/plugins/infra/server/lib/adapters/log_entries/kibana_log_entries_adapter.ts b/x-pack/plugins/infra/server/lib/adapters/log_entries/kibana_log_entries_adapter.ts index 79fa19cf7241b..e7b1692547ead 100644 --- a/x-pack/plugins/infra/server/lib/adapters/log_entries/kibana_log_entries_adapter.ts +++ b/x-pack/plugins/infra/server/lib/adapters/log_entries/kibana_log_entries_adapter.ts @@ -202,7 +202,7 @@ export class InfraKibanaLogEntriesAdapter implements LogEntriesAdapter { }; const response = await search(params); - const document = _.first(response.hits.hits); + const document = first(response.hits.hits); if (!document) { throw new Error('Document not found'); } @@ -310,7 +310,7 @@ export class InfraKibanaLogEntriesAdapter implements LogEntriesAdapter { function getLookupIntervals(start: number, direction: 'asc' | 'desc'): Array<[number, number]> { const offsetSign = direction === 'asc' ? 1 : -1; const translatedOffsets = LOOKUP_OFFSETS.map(offset => start + offset * offsetSign); - const intervals = _.zip(translatedOffsets.slice(0, -1), translatedOffsets.slice(1)) as Array< + const intervals = zip(translatedOffsets.slice(0, -1), translatedOffsets.slice(1)) as Array< [number, number] >; return intervals; @@ -322,10 +322,10 @@ const convertHitToLogEntryDocument = (fields: string[]) => ( gid: hit._id, fields: fields.reduce( (flattenedFields, fieldName) => - _.has(hit._source, fieldName) + has(hit._source, fieldName) ? { ...flattenedFields, - [fieldName]: _.get(hit._source, fieldName), + [fieldName]: get(hit._source, fieldName), } : flattenedFields, {} as { [fieldName: string]: string | number | object | boolean | null }