-
-
Notifications
You must be signed in to change notification settings - Fork 57
/
Copy pathdocker-compose.yml
200 lines (183 loc) · 5.22 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
x-robotoff-base-volumes:
&robotoff-base-volumes
- ./data:/opt/robotoff/data
- ./cache:/opt/robotoff/cache
- ./datasets:/opt/robotoff/datasets
- ./models:/opt/robotoff/models
- robotoff_tmp:/tmp
x-robotoff-base:
&robotoff-base
restart: $RESTART_POLICY
# this image is built with target=runtime, so it won't have the dev dependencies
image: ghcr.io/openfoodfacts/robotoff:${TAG}
volumes: *robotoff-base-volumes
networks:
- default
- common_net
x-robotoff-base-env:
&robotoff-base-env
LOG_LEVEL:
ROBOTOFF_INSTANCE:
ROBOTOFF_TLD:
ROBOTOFF_SCHEME:
STATIC_DOMAIN:
GUNICORN_PRELOAD_APP:
GUNICORN_NUM_WORKERS:
ROBOTOFF_UPDATED_PRODUCT_WAIT:
REDIS_HOST:
REDIS_UPDATE_HOST:
REDIS_UPDATE_PORT:
REDIS_STREAM_NAME:
POSTGRES_HOST:
POSTGRES_DB:
POSTGRES_USER:
POSTGRES_PASSWORD:
MONGO_URI:
OFF_USER:
OFF_PASSWORD:
INFLUXDB_HOST:
INFLUXDB_PORT:
INFLUXDB_BUCKET:
INFLUXDB_AUTH_TOKEN:
SLACK_TOKEN:
SENTRY_DSN:
ELASTIC_HOST:
ELASTIC_PASSWORD:
ELASTIC_USER:
TRITON_URI:
FASTTEXT_HOST:
FASTTEXT_PORT:
ENABLE_MONGODB_ACCESS:
IN_DOCKER_CONTAINER:
IMAGE_MODERATION_SERVICE_URL:
CROP_ALLOWED_DOMAINS:
NUM_RQ_WORKERS: 4 # Update worker service command accordingly if you change this settings
# Used by Google SDK to know where to find the credentials
GOOGLE_APPLICATION_CREDENTIALS: /opt/robotoff/credentials/google/credentials.json
GOOGLE_CREDENTIALS: # JSON credentials pasted as environment variable
BATCH_JOB_KEY: # Secure Batch job import with a token key
x-robotoff-worker-base:
&robotoff-worker
restart: $RESTART_POLICY
image: ghcr.io/openfoodfacts/robotoff:${TAG}
volumes: *robotoff-base-volumes
environment: *robotoff-base-env
depends_on:
- postgres
- redis
mem_limit: 8g
networks:
- default
- common_net
services:
api:
<<: *robotoff-base
environment: *robotoff-base-env
mem_limit: 6g
ports:
- "${ROBOTOFF_EXPOSE:-5500}:5500"
depends_on:
- postgres
- redis
- elasticsearch
update-listener:
<<: *robotoff-base
environment: *robotoff-base-env
mem_limit: 1g
depends_on:
- redis
command: python -m robotoff run-update-listener
worker_1:
<<: *robotoff-worker
container_name: ${COMPOSE_PROJECT_NAME:-robotoff}_worker_1
# Each worker listens to a single high priority queue and to the low priority queue.
# As the low priority queue comes last, it will only be processed if there are no high
# priority tasks.
command: python -m robotoff run-worker robotoff-high-1 robotoff-low
worker_2:
<<: *robotoff-worker
container_name: ${COMPOSE_PROJECT_NAME:-robotoff}_worker_2
command: python -m robotoff run-worker robotoff-high-2 robotoff-low
worker_3:
<<: *robotoff-worker
container_name: ${COMPOSE_PROJECT_NAME:-robotoff}_worker_3
command: python -m robotoff run-worker robotoff-high-3 robotoff-low
worker_4:
<<: *robotoff-worker
container_name: ${COMPOSE_PROJECT_NAME:-robotoff}_worker_4
command: python -m robotoff run-worker robotoff-high-4 robotoff-low
scheduler:
<<: *robotoff-base
environment: *robotoff-base-env
command: python -m robotoff run-scheduler
mem_limit: 6g
postgres:
restart: $RESTART_POLICY
image: postgres:16.3-alpine
environment:
- POSTGRES_USER
- POSTGRES_PASSWORD
- POSTGRES_DB
volumes:
- postgres-data:/var/lib/postgresql/data
- backup:/opt/robotoff-backups
- ./scripts/backup_postgres.sh:/opt/backup_postgres.sh
command: postgres -c shared_buffers=${ROBOTOFF_POSTGRES_SHARED_BUFFERS} -c work_mem=${ROBOTOFF_POSTGRES_WORK_MEM}
mem_limit: 20g
shm_size: 1g
ports:
- "${POSTGRES_EXPOSE:-127.0.0.1:5501}:5432"
redis:
restart: $RESTART_POLICY
image: redis:7.0.5-alpine
volumes:
- redis-data:/data
environment:
REDIS_ARGS: --save 60 1000 --appendonly yes
mem_limit: 4g
ports:
- "${REDIS_EXPOSE:-127.0.0.1:5502}:6379"
elasticsearch:
restart: $RESTART_POLICY
image: elasticsearch:8.5.3
environment:
- discovery.type=single-node
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- ELASTIC_PASSWORD
- xpack.security.http.ssl.enabled=false
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 262144
hard: 262144
mem_limit: 15g
volumes:
- es-data:/usr/share/elasticsearch/data
- es-logs:/usr/share/elasticsearch/logs
- elasticsearch_tmp:/tmp
volumes:
postgres-data:
name: ${COMPOSE_PROJECT_NAME:-robotoff}_postgres-data
es-data:
name: ${COMPOSE_PROJECT_NAME:-robotoff}_es-data
es-logs:
name: ${COMPOSE_PROJECT_NAME:-robotoff}_es-logs
redis-data:
name: ${COMPOSE_PROJECT_NAME:-robotoff}_redis-data
backup:
name: ${COMPOSE_PROJECT_NAME:-robotoff}_backup
# Volume mount on /tmp on API and workers, to prevent
# large docker layer overlay
robotoff_tmp:
name: ${COMPOSE_PROJECT_NAME:-robotoff}_tmp
elasticsearch_tmp:
name: ${COMPOSE_PROJECT_NAME:-robotoff}_elasticsearch_tmp
networks:
default:
# this is the network shared with product opener, it's only used in staging (and locally)
common_net:
name: ${COMMON_NET_NAME}
external: true