-
Notifications
You must be signed in to change notification settings - Fork 7
/
env.local.example
612 lines (554 loc) · 28.9 KB
/
env.local.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
#############################################################################
# Mandatory vars (will be enforced by pavics-compose.sh)
# Can add new vars but do not remove, else automated deployment will break
#
# Do NOT use environment variables in here since when pavics-compose.sh runs
# inside a container, the environment vars do not have the same value.
#
# Any default value that can pose a security concern or that are strongly
# recommended to be modified should use '__DEFAULT__{var}' definition, and
# have those default definitions defined in 'default.env'. This will ensure
# that these example values are flagged by the script if left unmodified
# (see also: 'check_default_vars' in 'birdhouse/read-configs.include.sh').
#############################################################################
# Override data persistence root directory
# export DATA_PERSIST_ROOT="/data/custom/path" # otherwise use value of 'default.env', directory must exist
# Root directory for all files that are persisted on disk and may contain links (ie. the files
# are "shared" between subdirectories). This means that the subdirectory structure is fixed.
#export DATA_PERSIST_SHARED_ROOT='${DATA_PERSIST_ROOT}' # otherwise use the value from 'default.env', must exist
export SSL_CERTIFICATE="${__DEFAULT__SSL_CERTIFICATE}" # *absolute* path to the nginx ssl certificate, path and key bundle
export PAVICS_FQDN="${__DEFAULT__PAVICS_FQDN}" # Fully qualified domain name of this Pavics installation
export DOC_URL="${__DEFAULT__DOC_URL}" # URL where /doc gets redirected
export MAGPIE_SECRET="${__DEFAULT__MAGPIE_SECRET}"
export MAGPIE_ADMIN_USERNAME="${__DEFAULT__MAGPIE_ADMIN_USERNAME}"
# Magpie now requires a password length of at least 12 characters
# For initial bootstrap only, change in the Magpie Web UI after initial boostrap.
export MAGPIE_ADMIN_PASSWORD="${__DEFAULT__MAGPIE_ADMIN_PASSWORD}"
export TWITCHER_PROTECTED_PATH="/twitcher/ows/proxy"
export SUPPORT_EMAIL="${__DEFAULT__SUPPORT_EMAIL}"
export CMIP5_THREDDS_ROOT="birdhouse/CMIP5/CCCMA"
export POSTGRES_PAVICS_USERNAME="${__DEFAULT__POSTGRES_PAVICS_USERNAME}"
export POSTGRES_PAVICS_PASSWORD="${__DEFAULT__POSTGRES_PAVICS_PASSWORD}"
export POSTGRES_MAGPIE_USERNAME="${__DEFAULT__POSTGRES_MAGPIE_USERNAME}"
export POSTGRES_MAGPIE_PASSWORD="${__DEFAULT__POSTGRES_MAGPIE_PASSWORD}"
export GEOSERVER_ADMIN_USER="${__DEFAULT__GEOSERVER_ADMIN_USER}"
export GEOSERVER_ADMIN_PASSWORD="${__DEFAULT__GEOSERVER_ADMIN_PASSWORD}"
#############################################################################
# Optional vars
#############################################################################
# Extra dirs possibly containing:
# * `docker-compose-extra.yml` file to override the default docker-compose.yml file
# * `default.env` file to provide extra defaults for each component
# * `pre-docker-compose-up` script to execute before `docker-compose up`
# * `post-docker-compose-up` script to execute after `docker-compose up`
#
# Useful to split configs into different dirs leveraging docker-compose
# override capabilities, see https://docs.docker.com/compose/extends/.
#
# Relative paths are relative to the main docker-compose.yml file.
#
# Possible use-cases:
#
# * Split config to have all private and sensitive infos in a different
# config dir.
#
# * Manage different env (prod, stating, dev, ...) with each env specific
# config in its own dir leaving the default config dir with generic/common
# config only.
#
# * Assemble different combinations of components/functionalities by including
# only the config/docker-compose fragment necessary.
#
# Last dir/component in the EXTRA_CONF_DIRS list have highest override
# precedence, example:
#
# * Last docker-compose volume mount to same destination win over the
# previous docker-compose volume mount.
#
# * Last default.env can change the values of all previous default.env.
#
# * Last pre/post docker-compose-up script can potentially undo actions
# from previous scripts.
#
# Suggested to keep the private-config-repo last in the list to be able to
# override anything.
#
# Note that a component listed in DEFAULT_CONF_DIRS and EXTRA_CONF_DIRS
# will load any dependant components (defined in the COMPONENT_DEPENDENCIES
# variable) immediately after the specified component.
#
# Format: space separated list of dirs
#
#export EXTRA_CONF_DIRS="/path/to/dir1 ./path/to/dir2 dir3 dir4"
#export EXTRA_CONF_DIRS="
# ./components/canarie-api
# ./components/geoserver
# ./components/finch
# ./components/raven
# ./components/hummingbird
# ./components/thredds
# ./components/portainer
# ./components/jupyterhub
# ./components/monitoring
# ./components/weaver
# ./components/scheduler
# ./optional-components/canarie-api-full-monitoring
# ./optional-components/emu
# ./optional-components/testthredds
# ./optional-components/generic_bird
# ./optional-components/all-public-access
# ./optional-components/secure-thredds
# ./optional-components/database-external-ports
# ./optional-components/wps-healthchecks
# ./optional-components/test-weaver
# ./optional-components/test-geoserver-secured-access
# /path/to/private-config-repo
#"
# Extra repos, than the current repo, the autodeploy should keep up-to-date.
# Any changes to these extra repos will also trigger autodeploy.
#
# Useful to save the instanciated version of this env.local config file and
# any custom docker-compose-extra.yml from the previous EXTRA_CONF_DIRS var.
#
# Note:
#
# * These extra repos should be git repos for the out-of-date detection to
# work to trigger autodeploy. If you just have a regular folder, do not add
# it here, out-of-date detection currently only works for git repos.
#
# * To preserve write permissions for your user, run once for this repo and
# once each time AUTODEPLOY_EXTRA_REPOS changes:
# deployment/fix-write-perm
#
# Format: space separated list of full path to dirs
#export AUTODEPLOY_EXTRA_REPOS="/path/to/dir1 /path/to/dir2 /path/to/dir3"
#export AUTODEPLOY_EXTRA_REPOS="/path/to/private-config-containing-env.local"
# For each git repo in AUTODEPLOY_EXTRA_REPOS that use ssh to clone/fetch
# instead of https, provide its corresponding ssh deploy key in this dir.
#
# See instructions in deployment/deploy.sh or
# https://developer.github.com/v3/guides/managing-deploy-keys/#deploy-keys for
# how to create deploy key for your git repos.
#
# The autodeploy mechanism runs inside its own container so environment
# variables are not the same inside and outside the container. Do not use
# any environment vars, use their fully resolved values.
#
# Format of keys inside the dir: {repo-name-1}_deploy_key,
# {repo-name-2}_deploy_key, ...
#
# If '{repo-name}_deploy_key' file is not found, default to
# 'id_rsa_git_ssh_read_only' so if multiple private repos share the same ssh
# deploy key, you can just name that shared key id_rsa_git_ssh_read_only and
# create {repo-name}_deploy_key only for repo specific key.
#
# Example of keys inside the dir: dir1_deploy_key, dir2_deploy_key,
# private-config-containing-env.local_deploy_key,
# id_rsa_git_ssh_read_only
#
#export AUTODEPLOY_DEPLOY_KEY_ROOT_DIR="/path/to/ssh-deploy-keys-for-all-repos"
# Frequency to trigger the various autodeploy tasks.
# See default.env for default.
#
# For all possible syntax, see implementation at
# https://github.com/Ouranosinc/docker-crontab/blob/3ac8cfa363b3f2ffdd0ead6089d355ff84521dc9/docker-entrypoint#L137-L184
#
# Ex:
# - daily at 5:07 AM: "7 5 * * *"
# - daily at midnight: "0 0 * * *" or "@daily"/"@midnight"
# - hourly: ""0 * * * *" or "@hourly"
# - every 2 hours: "*/120 * * * *" or "@every 2h"
# - every 5 minutes: "*/5 * * * *" or "@every 5m"
#
# "Platform" are all the git repos in AUTODEPLOY_EXTRA_REPOS.
#export AUTODEPLOY_PLATFORM_FREQUENCY="@every 5m"
#
# "Notebook" are all the tutorial notebooks on Jupyter.
#export AUTODEPLOY_NOTEBOOK_FREQUENCY="@every 5m"
# Add more jobs to ./components/scheduler/config.yml
#
# Potential usages: other deployment, backup jobs on the same machine
#
#export AUTODEPLOY_EXTRA_SCHEDULER_JOBS=""
# The scheduler runs as the root user so new/updated files will be owned by root after the code is updated.
# If AUTODEPLOY_CODE_OWNERSHIP if set, the ownership of all files and directories in this repo will be set to that user
# after each autodeploy update.
# AUTODEPLOY_CODE_OWNERSHIP should contain uids instead of usernames since usernames within a docker container will
# not necessarily line up with those on the host system.
# AUTODEPLOY_CODE_OWNERSHIP should be set to a constant value (eg. "1000" not "$(id -u)") since this will be evaluated
# within the autodeploy docker container as the root user.
#export AUTODEPLOY_CODE_OWNERSHIP="1000:1000"
# Load pre-configured job to auto-renew LetsEncrypt SSL certificate if a
# LetsEncrypt SSL certificate has previously been requested.
#
# This job performs the renewal only, not the initial request. Must have
# performed the initial request using the script deployment/certbotwrapper.
#
# See the job for additional possible configurations. The "scheduler"
# component needs to be enabled for this pre-configured job to work.
#
# This job will write to the value of SSL_CERTIFICATE here so make sure this
# job is sourced after the last definition of SSL_CERTIFICATE.
#
#if [ -f "/<absolute path>/components/scheduler/renew_letsencrypt_ssl_cert_extra_job.env" ]; then
# . /<absolute path>/components/scheduler/renew_letsencrypt_ssl_cert_extra_job.env
#fi
#
# Load pre-configured cronjob to automatically deploy Raven testdata to Thredds
# for Raven tutorial notebooks.
#
# See the job for additional possible configurations. The "scheduler"
# component needs to be enabled for this pre-configured job to work.
#
#if [ -f "$COMPOSE_DIR/components/scheduler/deploy_raven_testdata_to_thredds.env" \
# -a -f "$COMPOSE_DIR/components/scheduler/deploy_data_job.env" ]; then
# . $COMPOSE_DIR/components/scheduler/deploy_raven_testdata_to_thredds.env
# . $COMPOSE_DIR/components/scheduler/deploy_data_job.env
#fi
#
# Load pre-configured cronjob to automatically deploy Xclim testdata to Thredds
# for Finch and Xclim tutorial notebooks.
#
# See the job for additional possible configurations. The "scheduler"
# component needs to be enabled for this pre-configured job to work.
#
#if [ -f "$COMPOSE_DIR/components/scheduler/deploy_xclim_testdata_to_thredds.env" \
# -a -f "$COMPOSE_DIR/components/scheduler/deploy_data_job.env" ]; then
# . $COMPOSE_DIR/components/scheduler/deploy_xclim_testdata_to_thredds.env
# . $COMPOSE_DIR/components/scheduler/deploy_data_job.env
#fi
# Mount point on host machine for the scheduler to write data from log rotations
# (note: if using 'DATA_PERSIST_ROOT', it must be defined earlier, either in this file or from 'default.env')
#export LOGROTATE_DATA_DIR='${DATA_PERSIST_ROOT}/logrotate'
# Content of "location /" in file config/proxy/conf.d/all-services.include.template
# Useful to have a custom homepage.
# Default:
#export PROXY_ROOT_LOCATION="return 302 https://\$host/magpie;"
# Note that the default homepage will become the jupyterhub login page if the jupyterhub component is enabled.
# If the jupyterhub component is not enabled, it is highly recommended to create a custom homepage since the magpie
# landing page is not the most user-friendly option.
# Sample, remember to add this /data/homepage volume mount to proxy container.
# See PR https://github.com/bird-house/birdhouse-deploy-ouranos/pull/11.
#export PROXY_ROOT_LOCATION="alias /data/homepage/;"
# Public (on the internet) fully qualified domain name of this Pavics
# installation. This is optional so default to the same internal PAVICS_FQDN if
# not set.
#export PAVICS_FQDN_PUBLIC="$PAVICS_FQDN"
# If ALLOW_UNSECURE_HTTP is enabled, port 80 will not redirect to 443
# anymore and will have the same service definitions as 443.
#
# This is so we can use pagekite proper SSL certificate. Pagekite will
# hit port 80 instead of 443 and will be the one providing the SSL cert.
#
# Port 443 is still active with whatever the SSL cert it was given before.
#
# Set to "True" to allow traffic on unsecure port 80.
# This is for debugging only, do Not set this on a Production server.
#
#export ALLOW_UNSECURE_HTTP=""
# Jupyter single-user server images
#export DOCKER_NOTEBOOK_IMAGES="pavics/workflow-tests:210216 \
# pavics/crim-jupyter-eo:0.3.0 \
# pavics/crim-jupyter-nlp:0.4.0 \
# birdhouse/pavics-jupyter-base:mlflow-proxy"
# Name of the images displayed on the JupyterHub image selection page
# The name order must correspond to the order of the DOCKER_NOTEBOOK_IMAGES variable,
# and both variables should have the same number of entries.
# Note that the selection names are also used as directory names for the tutorial-notebooks directories mounted when
# starting the corresponding image. The name can use the '<name>' or the '<name>:<version>' format. The version will be
# excluded when mounting the corresponding directory.
#export JUPYTERHUB_IMAGE_SELECTION_NAMES="pavics \
# eo-crim:0.3.0 \
# nlp-crim \
# mlflow-crim"
# allow jupyterhub user selection of which notebook image to run
# see https://jupyter-docker-stacks.readthedocs.io/en/latest/using/selecting.html
#export ENABLE_JUPYTERHUB_MULTI_NOTEBOOKS="
#c.DockerSpawner.image_whitelist = {
# os.environ['JUPYTERHUB_IMAGE_SELECTION_NAMES'].split()[0]: os.environ['DOCKER_NOTEBOOK_IMAGES'].split()[0],
# os.environ['JUPYTERHUB_IMAGE_SELECTION_NAMES'].split()[1]: os.environ['DOCKER_NOTEBOOK_IMAGES'].split()[1],
# os.environ['JUPYTERHUB_IMAGE_SELECTION_NAMES'].split()[2]: os.environ['DOCKER_NOTEBOOK_IMAGES'].split()[2],
# os.environ['JUPYTERHUB_IMAGE_SELECTION_NAMES'].split()[3]: os.environ['DOCKER_NOTEBOOK_IMAGES'].split()[3],
# 'jupyter/scipy-notebook': 'jupyter/scipy-notebook',
# 'jupyter/r-notebook': 'jupyter/r-notebook',
# 'jupyter/tensorflow-notebook': 'jupyter/tensorflow-notebook',
# 'jupyter/datascience-notebook': 'jupyter/datascience-notebook',
# 'jupyter/pyspark-notebook': 'jupyter/pyspark-notebook',
# 'jupyter/all-spark-notebook': 'jupyter/all-spark-notebook',
#}
#"
# Load jobs to automatically deploy the custom notebooks from the specific images
#
# Ensure we always use the "latest" version of the "cronjob generation code"
# Path to a checked out repo of "pavics-jupyter-base" (https://github.com/bird-house/pavics-jupyter-base)
# which contains the config required for the cronjob generation
#CHECKOUT_PAVICS_JUPYTER_BASE="/path/to/checkout/pavics-jupyter-base"
#export AUTODEPLOY_EXTRA_REPOS="$AUTODEPLOY_EXTRA_REPOS $CHECKOUT_PAVICS_JUPYTER_BASE"
# Config for the generation of cronjobs, found on external repo
#DEPLOY_DATA_PAVICS_JUPYTER_ENV="$CHECKOUT_PAVICS_JUPYTER_BASE/scheduler-jobs/deploy_data_pavics_jupyter.env"
# Generates a cronjob for each image found in DOCKERNOTEBOOK_IMAGES
#if [ -f "$DEPLOY_DATA_PAVICS_JUPYTER_ENV" ]; then
# . $DEPLOY_DATA_PAVICS_JUPYTER_ENV
#fi
# Activates mounting a tutorial-notebooks subfolder that has the same name as the spawned image on JupyterHub
# This variable is only useful if there are more than one image in DOCKER_NOTEBOOK_IMAGES
# and ENABLE_JUPYTERHUB_MULTI_NOTEBOOKS is set with a proper c.DockerSpawner.image_whitelist
# matching the images in DOCKER_NOTEBOOK_IMAGES and their corresponding JUPYTERHUB_IMAGE_SELECTION_NAMES.
# export MOUNT_IMAGE_SPECIFIC_NOTEBOOKS=true
# The parent folder where all the user notebooks will be stored.
# For example, a user named "bob" will have his data in $JUPYTERHUB_USER_DATA_DIR/bob
# and this folder will be mounted when he logs into JupyterHub.
# (note: if using 'DATA_PERSIST_ROOT', it must be defined earlier, either in this file or from 'default.env')
#export JUPYTERHUB_USER_DATA_DIR="$DATA_PERSIST_ROOT/jupyterhub_user_data"
# Path to the file containing the clientID for the google drive extension for jupyterlab
# This file will be mounted into JupyterLab instances.
# It should contain the following data : {"clientId":"<add_client_id_here>"}
# To setup a project and find the clientID, check the doc at :
# https://github.com/jupyterlab/jupyterlab-google-drive/blob/master/docs/setup.md
#export JUPYTER_GOOGLE_DRIVE_SETTINGS=
# URL to terms and conditions for logging into Jupyter.
# If this option is set, the login button will be disabled until user check the
# checkbox agreeing to the terms and conditions.
#export JUPYTER_LOGIN_TERMS_URL="https://host/path/to/terms"
# Path to custom README for welcoming and guiding new users to Jupyterhub.
# If this path is changed, users will have to restart their personal Jupyter
# server for the change to take effect.
#export JUPYTERHUB_README="/path/to/README.ipynb"
# Previous default value.
#export JUPYTERHUB_README="$JUPYTERHUB_USER_DATA_DIR/README.ipynb"
#if [ ! -f "$JUPYTERHUB_README" ]; then
# # Do not volume-mount non existing file on disk, will create false empty dir.
# # This would happen if the autodeploy of JUPYTERHUB_README has not had the
# # chance to run yet.
# export JUPYTERHUB_README=""
#fi
# Timeout (in seconds, default: 3 days) to shut down the user server when no kernels or terminals
# are running and there is no activity. If undefined or set to zero, the feature will not be enabled.
#export JUPYTER_IDLE_SERVER_CULL_TIMEOUT=259200
# Timeout (in seconds, default: 1 day) after which individual
# user kernels/terminals are considered idle and ready to be culled.
#export JUPYTER_IDLE_KERNEL_CULL_TIMEOUT=86400
# Interval (in seconds) on which to check for idle kernels exceeding the cull timeout value.
# Enabled only if 'JUPYTER_IDLE_KERNEL_CULL_TIMEOUT' is provided and greater than zero.
# If this value is not provided, equal to zero, or is set higher than 'JUPYTER_IDLE_KERNEL_CULL_TIMEOUT',
# it will be automatically reduced by half of the timeout value to ensure that it can be effective.
#export JUPYTER_IDLE_KERNEL_CULL_INTERVAL=0
# The following variables can be used to configure additional authentication settings for jupyterhub
#
# 32 byte hex-encoded key used to encrypt a user's authentication state in the juptyerhub database.
# If set, jupyterhub will periodically check if the user still has permission to access jupyterhub (according to Magpie)
# This may be a semicolon-separated list of encryption keys. If there are multiple keys present, the first key is always
# used to persist any new auth_state.
# To generate a key the following command can be used: `openssl rand -hex 32`
# See for more details: https://jupyterhub.readthedocs.io/en/stable/reference/authenticators.html#authentication-state
#export JUPYTERHUB_CRYPT_KEY=
#
# Jupyterhub will check if the current logged in user still has permission to access jupyterhub (according to Magpie)
# if their authentication information is older that this value (in seconds). This value is only applied if
# JUPYTERHUB_CRYPT_KEY is set.
#export JUPYTERHUB_AUTHENTICATOR_REFRESH_AGE=60
# Allow for adding new config or override existing config in
# config/jupyterhub/jupyterhub_config.py.template.
#
#export JUPYTERHUB_CONFIG_OVERRIDE="
#
# Sample below will allow for sharing notebooks between Jupyter users.
# Note all shares are public.
#
### public-read paths
#
## /data/jupyterhub_user_data/public-share/
#public_read_on_disk = join(jupyterhub_data_dir, 'public-share')
#
## /notebook_dir/public/
#public_read_in_container = join(notebook_dir, 'public')
#
#c.DockerSpawner.volumes[public_read_on_disk] = {
# 'bind': public_read_in_container,
# 'mode': 'ro',
#}
#
### public-share paths
#
## /data/jupyterhub_user_data/public-share/{username}-public
#public_share_on_disk = join(public_read_on_disk, '{username}-public')
#
## /notebook_dir/mypublic
#public_share_in_container = join(notebook_dir, 'mypublic')
#
#c.DockerSpawner.volumes[public_share_on_disk] = {
# 'bind': public_share_in_container,
# 'mode': 'rw',
#}
#
### create dir with proper permissions
#
#def custom_create_dir_hook(spawner):
# username = spawner.user.name
#
# perso_public_share_dir = public_share_on_disk.format(username=username)
#
# for dir_to_create in [public_read_on_disk, perso_public_share_dir]:
# if not os.path.exists(dir_to_create):
# os.mkdir(dir_to_create, 0o755)
#
# subprocess.call(['chown', '-R', '1000:1000', public_read_on_disk])
#
# # call original create_dir_hook() function
# create_dir_hook(spawner)
#
#c.Spawner.pre_spawn_hook = custom_create_dir_hook
#"
# Usernames that should be given admin access in jupyterhub
# By default, only the MAGPIE_ADMIN_USERNAME user is given admin access. Update this variable only if you wish
# to give additional users admin access by default.
# Note that you can also give users admin access through the jupyterhub UI.
#export JUPYTERHUB_ADMIN_USERS='{\"${MAGPIE_ADMIN_USERNAME}\", \"othername\"}' # python set syntax
# Extra PyWPS config for **all** WPS services (currently only Flyingpigeon, Finch and Raven supported).
# export EXTRA_PYWPS_CONFIG="
# [logging]
# level = DEBUG
# "
# Thredds server customization
#
# Name of organization hosting the Thredds server
#export THREDDS_ORGANIZATION="Birdhouse"
#export THREDDS_DATASET_LOCATION_ON_CONTAINER='/pavics-ncml' # this default is for backward compatibility
#export THREDDS_SERVICE_DATA_LOCATION_ON_CONTAINER='/pavics-data' # this default is for backward compatibility
#export THREDDS_DATASET_LOCATION_ON_HOST='${DATA_PERSIST_ROOT}/ncml' # this default is for backward compatibility
#export THREDDS_SERVICE_DATA_LOCATION_ON_HOST='${DATA_PERSIST_ROOT}/datasets' # this default is for backward compatibility
#export THREDDS_DATASET_LOCATION_NAME='Datasets' # this default is for backward compatibility
#export THREDDS_SERVICE_DATA_LOCATION_NAME='Birdhouse' # this default is for backward compatibility
#export THREDDS_DATASET_URL_PATH='datasets' # this default is for backward compatibility
#export THREDDS_SERVICE_DATA_URL_PATH='birdhouse' # this default is for backward compatibility
# Additional catalogs for THREDDS. Add as many datasetScan XML blocks as needed to THREDDS_ADDITIONAL_CATALOG.
# Each block defines a new top-level catalog. See birdhouse/components/thredds/catalog.xml.template for more information.
export THREDDS_ADDITIONAL_CATALOG=""
#export THREDDS_ADDITIONAL_CATALOG="
# <datasetScan name='dataset_location_name' ID='dataset_url_path' path='dataset_url_path' location='dataset_location_on_container'>
#
# <metadata inherited='true'>
# <serviceName>all</serviceName>
# </metadata>
#
# <filter>
# <include wildcard='*.nc' />
# <include wildcard='*.ncml' />
# <include wildcard='*.txt' />
# <include wildcard='*.md' />
# <include wildcard='*.rst' />
# <include wildcard='*.csv' />
# </filter>
#
# </datasetScan>
#"
# Allow using Github as external AuthN/AuthZ provider with Magpie
# To setup Github as login, goto <https://github.com/settings/developers> under section [OAuth Apps]
# and create a new Magpie application with configurations:
#
# Homepage URL: https://${PAVICS_FQDN}
# Authorization callback URL: https://${PAVICS_FQDN}/magpie/providers/github/signin
#
# Then, specify obtained Github client ID/Secret for this Magpie OAuth App with following variables.
#
#export GITHUB_CLIENT_ID=####
#export GITHUB_CLIENT_SECRET=####
# Magpie DB name
#export MAGPIE_DB_NAME="magpiedb"
# Magpie user registration and approvals
# For more details, see :
# https://pavics-magpie.readthedocs.io/en/latest/configuration.html#user-registration-and-approval-configuration
# https://pavics-magpie.readthedocs.io/en/latest/authentication.html#user-registration
#export MAGPIE_USER_REGISTRATION_ENABLED=false
#export MAGPIE_USER_REGISTRATION_SUBMISSION_EMAIL_TEMPLATE=""
#export MAGPIE_USER_REGISTRATION_APPROVAL_ENABLED=false
#export MAGPIE_USER_REGISTRATION_APPROVAL_EMAIL_RECIPIENT=""
#export MAGPIE_USER_REGISTRATION_APPROVAL_EMAIL_TEMPLATE=""
#export MAGPIE_USER_REGISTRATION_APPROVED_EMAIL_TEMPLATE=""
#export MAGPIE_USER_REGISTRATION_DECLINED_EMAIL_TEMPLATE=""
#export MAGPIE_USER_REGISTRATION_NOTIFY_ENABLED=false
#export MAGPIE_USER_REGISTRATION_NOTIFY_EMAIL_RECIPIENT=""
#export MAGPIE_USER_REGISTRATION_NOTIFY_EMAIL_TEMPLATE=""
# Magpie user assignment to groups with terms & conditions
#export MAGPIE_GROUP_TERMS_SUBMISSION_EMAIL_TEMPLATE=""
#export MAGPIE_GROUP_TERMS_APPROVED_EMAIL_TEMPLATE=""
# Magpie smtp server configuration
# See https://pavics-magpie.readthedocs.io/en/latest/configuration.html#application-settings
#export MAGPIE_SMTP_FROM=""
#export MAGPIE_SMTP_HOST=""
#export MAGPIE_SMTP_PORT="465"
#export MAGPIE_SMTP_SSL=true
#export MAGPIE_SMTP_PASSWORD=""
# Set to 'false' if using self-signed SSL certificate
#export VERIFY_SSL="true"
# Jupyter public demo account with limited computing resources for security reasons
#export JUPYTER_DEMO_USER="demo"
# Changing any limits requires restarting the jupyter user server
#export JUPYTER_DEMO_USER_MEM_LIMIT="2G" # ex: 2G, 500M
# See config/jupyterhub/custom_templates/login.html.template
#export JUPYTER_LOGIN_BANNER_TOP_SECTION=""
#export JUPYTER_LOGIN_BANNER_BOTTOM_SECTION=""
# Raven to use the local Geoserver instead of the default production.
# See raven/default.env for more info.
#export RAVEN_GEO_URL="https://${PAVICS_FQDN}/geoserver/"
# Mount point on host machine to store mongodb server data
# (note: if using 'DATA_PERSIST_ROOT', it must be defined earlier, either in this file or from 'default.env')
#export MONGODB_DATA_DIR='${DATA_PERSIST_ROOT}/mongodb_persist'
# Mount point on host machine for cowbird to store data from its mongodb server
# (note: if using 'DATA_PERSIST_ROOT', it must be defined earlier, either in this file or from 'default.env')
#export COWBIRD_MONGODB_DATA_DIR='${DATA_PERSIST_ROOT}/mongodb_cowbird_persist'
# Mount point on host machine to store postgres server data
# (note: if using 'DATA_PERSIST_ROOT', it must be defined earlier, either in this file or from 'default.env')
#export POSTGRES_DATA_DIR='${DATA_PERSIST_ROOT}/frontend_persist'
# Mount point on host machine for weaver to store data from its mongodb server
# (note: if using 'DATA_PERSIST_ROOT', it must be defined earlier, either in this file or from 'default.env')
#export WEAVER_MONGODB_DATA_DIR='${DATA_PERSIST_ROOT}/mongodb_weaver_persist'
# If "True", Weaver providers that are no longer working (not responding when deployed) and are not named in
# WEAVER_WPS_PROVIDERS will be unregistered. This is useful when deploying Weaver with fewer providers than a previous
# deployment.
#export WEAVER_UNREGISTER_DROPPED_PROVIDERS="True"
# If "True", requests to the geoserver endpoint will not be authorized through twitcher/magpie
# (note: this is NOT recommended but will slightly improve performance when accessing geoserver endpoints)
#export GEOSERVER_SKIP_AUTH=True
#############################################################################
# Monitoring components configs
#############################################################################
#
# Below are Mandatory if monitoring component is enabled:
#export GRAFANA_ADMIN_PASSWORD=changeme!
#export ALERTMANAGER_ADMIN_EMAIL_RECEIVER="[email protected],[email protected]"
#export SMTP_SERVER="smtp.example.com:25"
# Below are optional for monitoring component
#export ALERTMANAGER_EXTRA_GLOBAL=""
#export ALERTMANAGER_EXTRA_ROUTES=""
#export ALERTMANAGER_EXTRA_INHIBITION=""
#export ALERTMANAGER_EXTRA_RECEIVERS=""
#############################################################################
# Emu optional vars
#############################################################################
# To enable emu: add './optional-components/emu' to EXTRA_CONF_DIRS above.
# Emu WPS service image if that testing component is enabled
#export EMU_IMAGE="tlvu/emu:watchdog"
#############################################################################
# Deprecated vars (for components in the ./deprecated-components directory)
#############################################################################
export TOMCAT_NCWMS_PASSWORD="${__DEFAULT__TOMCAT_NCWMS_PASSWORD}"
export CATALOG_USERNAME="${__DEFAULT__CATALOG_USERNAME}"
export CATALOG_PASSWORD="${__DEFAULT__CATALOG_PASSWORD}"
export CATALOG_THREDDS_SERVICE="thredds"
export PHOENIX_PASSWORD="${__DEFAULT__PHOENIX_PASSWORD}"
export PHOENIX_PASSWORD_HASH="${__DEFAULT__PHOENIX_PASSWORD_HASH}"
#############################################################################
# Compose vars
# https://docs.docker.com/compose/environment-variables/envvars/
# https://docs.docker.com/compose/environment-variables/envvars-precedence/
# https://docs.docker.com/engine/reference/commandline/cli/#environment-variables
#############################################################################
# Remove orphans containers, useful when disabling components.
# Harmless when left enabled all the time.
# Not working at the time of this writing, see https://github.com/docker/compose/issues/11374.
# Use COMPOSE_UP_EXTRA_OPTS below as a work-around.
#export COMPOSE_REMOVE_ORPHANS=true
# Extra options for 'pavics-compose.sh up'.
# --remove-orphans useful when disabling components. Harmless when left enabled all the time.
#export COMPOSE_UP_EXTRA_OPTS="--remove-orphans"