-
Notifications
You must be signed in to change notification settings - Fork 0
/
docker-compose.yaml
106 lines (100 loc) · 3.51 KB
/
docker-compose.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
version: "3.7"
services:
# This service runs the postgres DB used by dagster for run storage, schedule storage,
# and event log storage.
dagster_postgresql:
image: postgres:11
container_name: dagster_postgresql
environment:
POSTGRES_USER: "postgres_user"
POSTGRES_PASSWORD: "postgres_password"
POSTGRES_DB: "postgres_db"
networks:
- dagster_network
# This service runs the gRPC server that loads your user code, in both dagster-webserver
# and dagster-daemon. By setting DAGSTER_CURRENT_IMAGE to its own image, we tell the
# run launcher to use this same image when launching runs in a new container as well.
# Multiple containers like this can be deployed separately - each just needs to run on
# its own port, and have its own entry in the workspace.yaml file that's loaded by the
# webserver.
clash_royale_etl_project:
build:
context: ./dagster
dockerfile: Dockerfile
container_name: clash_royale_etl_project
# image: data_lake_orchestration_image
image: clash_royale_etl_project
restart: always
environment:
DAGSTER_POSTGRES_USER: "postgres_user"
DAGSTER_POSTGRES_PASSWORD: "postgres_password"
DAGSTER_POSTGRES_DB: "postgres_db"
DAGSTER_CURRENT_IMAGE: "clash_royale_etl_project"
# set environment variable
# pass to Docker by specifying ENVIRONMENT=dev docker-compose up --build
ENVIRONMENT: ${ENVIRONMENT:-dev} # defaults to dev
networks:
- dagster_network
volumes:
- ./dagster:/opt/dagster/app
- .dbt:/root/.dbt
# This service runs dagster-webserver, which loads your user code from the user code container.
# Since our instance uses the QueuedRunCoordinator, any runs submitted from the webserver will be put on
# a queue and later dequeued and launched by dagster-daemon.
dagster_webserver:
build:
context: .
dockerfile: Dockerfile
entrypoint:
- dagster-webserver
- -h
- "0.0.0.0"
- -p
- "3000"
- -w
- workspace.yaml
container_name: dagster_webserver
expose:
- "3000"
ports:
- "3000:3000"
environment:
DAGSTER_POSTGRES_USER: "postgres_user"
DAGSTER_POSTGRES_PASSWORD: "postgres_password"
DAGSTER_POSTGRES_DB: "postgres_db"
DAGSTER_GRPC_TIMEOUT_SECONDS: "300"
volumes: # Make docker client accessible so we can terminate containers from the webserver
- /var/run/docker.sock:/var/run/docker.sock
- /tmp/io_manager_storage:/tmp/io_manager_storage
networks:
- dagster_network
depends_on:
- dagster_postgresql
- clash_royale_etl_project
# This service runs the dagster-daemon process, which is responsible for taking runs
# off of the queue and launching them, as well as creating runs from schedules or sensors.
dagster_daemon:
build:
context: .
dockerfile: Dockerfile
entrypoint:
- dagster-daemon
- run
container_name: dagster_docker_daemon
restart: on-failure
environment:
DAGSTER_POSTGRES_USER: "postgres_user"
DAGSTER_POSTGRES_PASSWORD: "postgres_password"
DAGSTER_POSTGRES_DB: "postgres_db"
volumes: # Make docker client accessible so we can launch containers using host docker
- /var/run/docker.sock:/var/run/docker.sock
- /tmp/io_manager_storage:/tmp/io_manager_storage
networks:
- dagster_network
depends_on:
- dagster_postgresql
- clash_royale_etl_project
networks:
dagster_network:
driver: bridge
name: dagster_network