-
Notifications
You must be signed in to change notification settings - Fork 21
/
Copy pathcompose.yml
207 lines (197 loc) · 8.5 KB
/
compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
services:
proxy:
build:
context: ./docker/proxy
ports:
- "${HTTP_PORT:-80}:80"
- "${HTTPS_PORT:-443}:443"
volumes:
- ./docker/certbot/certs:/etc/nginx/certs
- ./docker/certbot/www:/var/www/certbot
depends_on:
varnish:
condition: service_started
certbot:
condition: service_healthy
environment:
- SERVER_NAME=${SERVER_NAME:-gally.localhost}
- BACKEND_UPSTREAM=varnish:80
certbot:
build:
context: ./docker/certbot
volumes:
- ./docker/certbot/certs:/etc/letsencrypt
environment:
- SERVER_NAME=${SERVER_NAME:-gally.localhost}
varnish:
build:
context: ./docker/varnish
depends_on:
- router
environment:
- BACKEND_HOST=router
- BACKEND_PORT=80
- PHP_UPSTREAM=php
- VARNISH_SIZE=512M
labels:
- traefik.enable=true
# Serve gally over http
- traefik.http.routers.gally-http.rule=Host(`${SERVER_NAME:-gally.localhost}`)
- traefik.http.routers.gally-http.entrypoints=http
# Serve gally over https
- traefik.http.routers.gally-https.rule=Host(`${SERVER_NAME:-gally.localhost}`)
- traefik.http.routers.gally-https.entrypoints=https
- traefik.http.routers.gally-https.tls=true
router:
build:
context: ./docker/router
volumes:
- php_static_files:/app/public:ro
depends_on:
- php
- pwa
environment:
- SERVER_NAME=${SERVER_NAME:-gally.localhost}
- API_ROUTE_PREFIX=${API_ROUTE_PREFIX:-api}
- PWA_UPSTREAM=${PWA_UPSTREAM:-pwa:3000}
- API_UPSTREAM=${API_UPSTREAM:-php:9000}
- EXAMPLE_UPSTREAM=${PWA_UPSTREAM:-pwa:3000}
php:
build:
context: ./docker/php
target: gally_php_prod
additional_contexts:
api_src: ./api
args:
COMPOSER_AUTH: ${COMPOSER_AUTH:-}
volumes:
- php_static_files:/app/public:rw
- jwt_keys:/app/config/jwt
depends_on:
- database
- search
- redis
environment:
- APP_ENV=${APP_ENV:-prod}
- APP_SECRET=${APP_SECRET:-!ChangeMe!}
- SERVER_NAME=${SERVER_NAME:-gally.localhost}, php:80
- API_ROUTE_PREFIX=${API_ROUTE_PREFIX:-api}
- TRUSTED_PROXIES=${TRUSTED_PROXIES:-127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16}
- TRUSTED_HOSTS=${TRUSTED_HOSTS:-^${SERVER_NAME:-|gally.localhost}|localhost|php$$}
- CORS_ALLOW_ORIGIN=^https?://${SERVER_NAME:-gally.localhost}$
- GALLY_CATALOG_MEDIA_URL=${GALLY_CATALOG_MEDIA_URL:-https://${SERVER_NAME:-gally.localhost}/media/catalog/product/}
- DATABASE_URL=postgresql://${POSTGRES_USER:-app}:${POSTGRES_PASSWORD:-!ChangeMe!}@database:5432/${POSTGRES_DB:-app}?serverVersion=${POSTGRES_VERSION:-16}&charset=${POSTGRES_CHARSET:-utf8}
- ELASTICSEARCH_URL=https://${SEARCH_USER:-admin}:${SEARCH_PASSWORD:-!ChangeMe0!}@${SEARCH_HOST:-search}:9200/
- ELASTICSEARCH_SSL_VERIFICATION=false
pwa:
build:
context: ./docker/front
target: gally_pwa_prod
additional_contexts:
front_src: ./front
args:
- NEXT_PUBLIC_ENTRYPOINT=${SERVER_NAME:-gally.localhost}
- NEXT_PUBLIC_API_URL=https://${SERVER_NAME:-gally.localhost}/${API_ROUTE_PREFIX:-api}
- NEXT_PUBLIC_API_ROUTE_PREFIX=${API_ROUTE_PREFIX:-api}
- REACT_APP_API_URL=https://${SERVER_NAME:-gally.localhost}/${API_ROUTE_PREFIX:-api}
environment:
- NEXT_PUBLIC_ENTRYPOINT=${SERVER_NAME:-gally.localhost}
- NEXT_PUBLIC_API_URL=https://${SERVER_NAME:-gally.localhost}/${API_ROUTE_PREFIX:-api}
- NEXT_PUBLIC_API_ROUTE_PREFIX=${API_ROUTE_PREFIX:-api}
- REACT_APP_API_URL=https://${SERVER_NAME:-gally.localhost}/${API_ROUTE_PREFIX:-api}
healthcheck:
test: test $$(curl --connect-timeout 2 -s -o /dev/null -w ''%{http_code}'' http://localhost:3000) -eq 200
interval: 10s
timeout: 5s
retries: 20
###> doctrine/doctrine-bundle ###
database:
image: postgres:${POSTGRES_VERSION:-16}-alpine
environment:
- POSTGRES_DB=${POSTGRES_DB:-app}
# You should definitely change the password in production
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-!ChangeMe!}
- POSTGRES_USER=${POSTGRES_USER:-app}
volumes:
- db_data:/var/lib/postgresql/data
# you may use a bind-mounted host directory instead, so that it is harder to accidentally remove the volume and lose all your data!
# - ./api/docker/db/data:/var/lib/postgresql/data
###< doctrine/doctrine-bundle ###
redis:
image: docker.io/bitnami/redis:6.2
environment:
# ALLOW_EMPTY_PASSWORD is recommended only for development.
- ALLOW_EMPTY_PASSWORD=yes
# - REDIS_DISABLE_COMMANDS=FLUSHDB,FLUSHALL
volumes:
- 'redis_data:/bitnami/redis/data'
search:
build:
context: docker/search/
target: gally_opensearch2
environment:
- cluster.name=os-docker-cluster # Search cluster name
- node.name=opensearch-node-data # Name the node that will run in this container
- discovery.seed_hosts=search # Nodes to look for when discovering the cluster
- cluster.initial_cluster_manager_nodes=opensearch-node-data # Nodes eligible to serve as cluster manager
- OPENSEARCH_JAVA_OPTS=-Xms1g -Xmx1g # Set min and max JVM heap sizes to at least 50% of system RAM
- bootstrap.memory_lock=true # Disable JVM heap memory swapping
- cluster.routing.allocation.disk.threshold_enabled=false # Avoid ES going read-only because low disk space availability
- plugins.ml_commons.allow_registering_model_via_url=true
- plugins.ml_commons.native_memory_threshold=100 # Prevent memory issue after multiple deploy (https://github.com/opensearch-project/ml-commons/issues/2308)
- plugins.ml_commons.jvm_heap_memory_threshold=100 # Prevent memory issue after multiple deploy (https://github.com/opensearch-project/ml-commons/issues/2308)
- plugins.security.audit.type=debug # https://github.com/opensearch-project/security/issues/3130
- OPENSEARCH_INITIAL_ADMIN_PASSWORD=${OPENSEARCH_ADMIN_PASSWORD:-!ChangeMe0!}
volumes:
- os2_data:/usr/share/opensearch/data:rw
ulimits:
memlock:
soft: -1
hard: -1
ports:
- 9200
- 9600
healthcheck:
test: test $$(curl -uadmin:$${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-!ChangeMe0!} -k --write-out %{http_code} --fail --silent --output /dev/null https://localhost:9200/_cluster/health?wait_for_status=green&timeout=5s) -eq 200
interval: 10s
timeout: 5s
retries: 20
search-ml:
build:
context: docker/search/
target: gally_opensearch2
environment:
- cluster.name=os-docker-cluster # Search cluster name
- node.name=opensearch-node-ml # Name the node that will run in this container
- discovery.seed_hosts=search # Nodes to look for when discovering the cluster
- cluster.initial_cluster_manager_nodes=opensearch-node-data # Nodes eligible to serve as cluster manager
- OPENSEARCH_JAVA_OPTS=-Xms1g -Xmx1g # Set min and max JVM heap sizes to at least 50% of system RAM
- bootstrap.memory_lock=true # Disable JVM heap memory swapping
- node.roles=ml # Define this node as an ml node
- cluster.routing.allocation.disk.threshold_enabled=false # Avoid ES going read-only because low disk space availability
- plugins.ml_commons.allow_registering_model_via_url=true
- plugins.ml_commons.native_memory_threshold=100 # Prevent memory issue after multiple deploy (https://github.com/opensearch-project/ml-commons/issues/2308)
- plugins.ml_commons.jvm_heap_memory_threshold=100 # Prevent memory issue after multiple deploy (https://github.com/opensearch-project/ml-commons/issues/2308)
- plugins.security.audit.type=debug # https://github.com/opensearch-project/security/issues/3130
- OPENSEARCH_INITIAL_ADMIN_PASSWORD=${OPENSEARCH_ADMIN_PASSWORD:-!ChangeMe0!}
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- os2_ml_data:/usr/share/opensearch/data:rw
healthcheck:
test: test $$(curl -uadmin:$${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-!ChangeMe0!} -k --write-out %{http_code} --fail --silent --output /dev/null https://localhost:9200/_cluster/health?wait_for_status=green&timeout=5s) -eq 200
interval: 10s
timeout: 5s
retries: 20
volumes:
php_static_files:
jwt_keys:
###> doctrine/doctrine-bundle ###
db_data:
###< doctrine/doctrine-bundle ###
os2_data:
os2_ml_data:
redis_data:
driver: local