-
Notifications
You must be signed in to change notification settings - Fork 245
/
Copy pathvalues.yaml
231 lines (198 loc) · 8.03 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
# Copyright OpenSearch Contributors
# SPDX-License-Identifier: Apache-2.0
# Default values for data-prepper.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
global:
# Set if you want to change the default docker registry, e.g. a private one.
dockerRegistry: ""
image:
# -- The image repository from which to pull the Data Prepper image
repository: opensearchproject/data-prepper
# -- The image tag to pull. Default: IfNotPresent
pullPolicy: IfNotPresent
# -- Overrides the image tag whose default is the chart appVersion.
tag: ""
# -- List of imagePullSecrets to use if the Docker image is stored in a private registry
imagePullSecrets: []
# -- Override the default name for the deployment
nameOverride: ""
# -- Override the default fullname for the deployment
fullnameOverride: ""
# -- Extra environment variables to pass to the Data Prepper container
extraEnvs: []
# - name: "JAVA_OPTS"
# value: "-Dlog4j2.debug=true"
# Check https://opensearch.org/docs/latest/data-prepper/managing-data-prepper/configuring-data-prepper/
# for more information on the configuration options
# -- Data Prepper configuration
config:
# -- Main Data Prepper configuration file content
data-prepper-config.yaml: |
ssl: false
# circuit_breakers:
# heap:
# usage: 2gb
# reset: 30s
# check_interval: 5s
# -- Log4j2 configuration for Data Prepper logging
log4j2-rolling.properties: |
#
# Copyright OpenSearch Contributors
# SPDX-License-Identifier: Apache-2.0
#
status = error
dest = err
name = PropertiesConfig
property.filename = log/data-prepper/data-prepper.log
appender.console.type = Console
appender.console.name = STDOUT
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = %d{ISO8601} [%t] %-5p %40C - %m%n
appender.rolling.type = RollingFile
appender.rolling.name = RollingFile
appender.rolling.fileName = ${filename}
appender.rolling.filePattern = logs/data-prepper.log.%d{MM-dd-yy-HH}-%i.gz
appender.rolling.layout.type = PatternLayout
appender.rolling.layout.pattern = %d{ISO8601} [%t] %-5p %40C - %m%n
appender.rolling.policies.type = Policies
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.rolling.policies.time.interval = 1
appender.rolling.policies.time.modulate = true
appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.rolling.policies.size.size=100MB
appender.rolling.strategy.type = DefaultRolloverStrategy
appender.rolling.strategy.max = 168
rootLogger.level = warn
rootLogger.appenderRef.stdout.ref = STDOUT
rootLogger.appenderRef.file.ref = RollingFile
logger.pipeline.name = org.opensearch.dataprepper.pipeline
logger.pipeline.level = info
logger.parser.name = org.opensearch.dataprepper.parser
logger.parser.level = info
logger.plugins.name = org.opensearch.dataprepper.plugins
logger.plugins.level = info
# For OpenSearch Data Prepper is crucial for defining the behavior and structure of your data processing pipelines.
# Each pipeline is defined with a unique name and can include `source`, `processor`, and `sink` components to ingest,
# process, and output data respectively. This flexible configuration allows for the creation of complex data processing
# flows, including the routing of data between pipelines.
# For detailed information on the available options and to get the most up-to-date guidance on configuring `pipeline.yaml`,
# please consult the [OpenSearch Documentation on Pipelines](https://opensearch.org/docs/2.4/data-prepper/pipelines/pipelines/).
# This resource provides comprehensive examples and explanations of each component, ensuring you can tailor your Data Prepper
# deployment to meet your specific data processing needs.
# -- Pipeline configuration
pipelineConfig:
# If 'true', a secret containing a demo pipeline configuration with random source and stdout sink will be created.
# If left undefined, the demo pipeline will be used only when no other pipeline is configured below
demoPipeline: ""
# -- The name of the existing secret containing the pipeline configuration.
# If enabled is false existingSecret is used. The existingSecret must have a key named `pipelines.yaml`.
existingSecret: ""
# If enabled, a secret containing the pipeline configuration will be created based on the 'config' section below.
enabled: false
# The configuration of the pipeline see https://opensearch.org/docs/2.4/data-prepper/pipelines/pipelines/
config:
## Provide your pipeline configuration here if 'enabled' is set to true. See documentation for more advanced pipelines
# simple-sample-pipeline:
# workers: 2 # the number of workers
# delay: 5000 # in milliseconds, how long workers wait between read attempts
# source:
# random: {}
# buffer:
# bounded_blocking:
# buffer_size: 1024 # max number of records the buffer accepts
# batch_size: 256 # max number of records the buffer drains after each read
# processor:
# - string_converter:
# upper_case: true
# sink:
# - stdout: {}
# -- Data Prepper ports
ports:
# -- The port that the source is running on. Default value is 2021. Valid options are between 0 and 65535.
# https://opensearch.org/docs/latest/data-prepper/pipelines/configuration/sources/http-source/
- name: http-source
port: 2021
# -- The port that the otel_trace_source source runs on. Default value is 21890.
# https://opensearch.org/docs/latest/data-prepper/pipelines/configuration/sources/otel-trace-source/
- name: otel-traces
port: 21890
# -- The port that the OpenTelemtry metrics source runs on. Default value is 21891.
# https://opensearch.org/docs/latest/data-prepper/pipelines/configuration/sources/otel-metrics-source/
- name: otel-metrics
port: 21891
# -- Represents the port that the otel_logs_source source is running on. Default value is 21892.
# https://opensearch.org/docs/latest/data-prepper/pipelines/configuration/sources/otel-logs-source/
- name: otel-logs
port: 21892
serviceAccount:
# -- Specifies whether a service account should be created
create: true
# -- Automatically mount a ServiceAccount's API credentials?
automount: true
# -- Annotations to add to the service account
annotations: {}
# -- The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
initContainers: []
service:
type: ClusterIP
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
# Additional volumes on the output Deployment definition.
volumes: []
# - name: foo
# secret:
# secretName: mysecret
# optional: false
# Additional volumeMounts on the output Deployment definition.
volumeMounts: []
# - name: foo
# mountPath: "/etc/foo"
# readOnly: true
nodeSelector: {}
tolerations: []
affinity: {}