-
Notifications
You must be signed in to change notification settings - Fork 0
/
docker-compose.test.yml
163 lines (154 loc) · 3.96 KB
/
docker-compose.test.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
services:
db:
image: postgres
restart: unless-stopped
env_file: .env.docker
ports:
- 5432:5432
volumes:
- postgres_test_data:/var/lib/postgresql/data
healthcheck:
test: pg_isready
start_period: 5s
interval: 30s
timeout: 5s
retries: 3
rabbitmq:
image: rabbitmq
restart: unless-stopped
ports:
- 5672:5672
volumes:
- rabbitmq_test_data:/var/lib/rabbitmq
healthcheck:
test: rabbitmq-diagnostics -q ping
start_period: 5s
interval: 30s
timeout: 5s
retries: 3
backend:
image: mtsrus/syncmaster-backend:${BACKEND_IMAGE_TAG:-test}
restart: unless-stopped
build:
dockerfile: docker/Dockerfile.backend
context: .
target: test
env_file: .env.docker
ports:
- 8000:8000
volumes:
- ./syncmaster:/app/syncmaster
- ./cached_jars:/root/.ivy2
- ./reports:/app/reports
- ./tests:/app/tests
- ./pyproject.toml:/app/pyproject.toml
depends_on:
db:
condition: service_healthy
rabbitmq:
condition: service_healthy
profiles: [backend, all]
worker:
image: mtsrus/syncmaster-worker:${WORKER_IMAGE_TAG:-test}
restart: unless-stopped
build:
dockerfile: docker/Dockerfile.worker
context: .
target: test
command: --loglevel=info -Q test_queue
env_file: .env.docker
volumes:
- ./syncmaster:/app/syncmaster
- ./cached_jars:/root/.ivy2
- ./reports:/app/reports
- ./tests:/app/tests
- ./pyproject.toml:/app/pyproject.toml
depends_on:
db:
condition: service_healthy
rabbitmq:
condition: service_healthy
profiles: [worker, s3, oracle, hdfs, hive, all]
test-postgres:
image: postgres
restart: unless-stopped
ports:
- 5433:5432
env_file: .env.docker
healthcheck:
test: pg_isready
start_period: 5s
interval: 30s
timeout: 5s
retries: 3
profiles: [s3, oracle, hdfs, hive, all]
test-s3:
image: bitnami/minio:latest
container_name: test-s3
restart: unless-stopped
env_file: .env.docker
ports:
- 9010:9000
- 9011:9001
healthcheck:
test: curl -f http://localhost:9000/minio/health/live
start_period: 5s
interval: 30s
timeout: 5s
retries: 3
profiles: [s3, all]
test-oracle:
image: gvenzl/oracle-xe:slim-faststart
restart: unless-stopped
ports:
- 1522:1521
environment:
TZ: UTC
ORACLE_PASSWORD: changeme
ORACLE_DATABASE: syncmaster
APP_USER: syncmaster
APP_USER_PASSWORD: changeme
profiles: [oracle, all]
metastore-hive:
image: postgres
restart: unless-stopped
environment:
POSTGRES_DB: metastore
POSTGRES_USER: test_hive
POSTGRES_PASSWORD: test_hive
ports:
- 5440:5432
healthcheck:
test: pg_isready
start_period: 5s
interval: 30s
timeout: 5s
retries: 3
profiles: [hive, hdfs, all]
test-hive:
image: mtsrus/hadoop:hadoop2.7.3-hive2.3.9
restart: unless-stopped
hostname: test-hive
depends_on:
metastore-hive:
condition: service_healthy
ports:
- 9820:9820 # HDFS IPC
- 9870:9870 # HDFS WebHDFS
- 8088:8088 # Yarn UI
- 8042:8042 # NodeManager UI
- 10000:10000 # Hive server
- 10002:10002 # Hive server Admin UI
- 19888:19888 # MapReduce JobServer History UI
- 9083:9083 # Hive Metastore server
- 9864:9864 # Datanode UI
environment:
WITH_HIVE_SERVER: 'false' # We leave only the metastore server, we don’t need Hive itself, we don’t waste resources on it
HIVE_METASTORE_DB_URL: jdbc:postgresql://metastore-hive:5432/metastore
HIVE_METASTORE_DB_DRIVER: org.postgresql.Driver
HIVE_METASTORE_DB_USER: test_hive
HIVE_METASTORE_DB_PASSWORD: test_hive
profiles: [hive, hdfs, all]
volumes:
postgres_test_data:
rabbitmq_test_data: