-
Notifications
You must be signed in to change notification settings - Fork 5
/
docker-compose.yml
77 lines (72 loc) · 3.04 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
# This docker-compose file starts up the api, chat client, and a docker postgres container
version: '3.8'
services:
api:
container_name: llm-gateway-api
build: ./backend/
command: bash -c 'cd ./backend; uvicorn src.app:app --host 0.0.0.0'
volumes:
- .:/app
ports:
- 8000:8000
environment:
# General Config
- DATABASE_URL=postgresql://fastapi_traefik:fastapi_traefik@db:5432/fastapi_traefik
- LLM_API_CONTENT_TYPE=application/json
# Open AI Config / Sagemaker URL
- LLM_ENDPOINT=https://api.openai.com/v1/chat/completions # LLM endpoint you want to proxy to
- LLM_IMG_ENDPOINT=https://api.openai.com/v1/images/generations # Another llm endpoint you want to proxy to
- LLM_API_AUTHORIZATION=[LLM TOKEN/API KEY HERE. E,g. Bearer sk-asda...]
# Submissions Monitoring Config
- ENABLE_SUBMISSIONS_API=false # True of an API is enabled to send logs to eSentire
- SUBMISSIONS_API_URL=[SUBMISSIONS URL PROVIDED IF CONFIGURED] # The URL provided by eSentire if llm log submissions is enabled
# LLM Service Options
- LLM_TYPE=[BEDROCK, OPEN_AI, SAGEMAKER] # Pass a list of which llm services to use. Supported services in this repo include: [OPEN_AI, SAGEMAKER, BEDROCK]
# Bedrock Config
- BEDROCK_ASSUMED_ROLE=arn:aws:iam::{accountID}:role/{role name} # ARN of an AWS IAM role to assume for calling the Bedrock service.
- AWS_REGION=us-east-1 # Optional name of the AWS Region in which the bedrock service is deployed (e.g. "us-east-1").
- MODEL_ID=ai21.j2-mid-v1 # Keep this model to follow the bedrock tutorial. https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids-arns.html
networks:
- llm-demo
depends_on:
db:
condition: service_healthy
db:
container_name: llm-gateway-db
image: postgres:15-alpine
volumes:
- postgres_data:/var/lib/postgresql/data/
expose:
- 5432
ports:
- 5432:5432
environment:
- POSTGRES_USER=fastapi_traefik
- POSTGRES_PASSWORD=fastapi_traefik
- POSTGRES_DB=fastapi_traefik
networks:
- llm-demo
healthcheck: # This health check is necessary so that we can accurately determin that the service is ready before starting the api
test: ["CMD-SHELL", "pg_isready -U fastapi_traefik"]
interval: 10s
timeout: 5s
retries: 5
frontend:
container_name: llm-gateway-chat
build:
context: ./frontend/
args:
- LLM_TYPE=BEDROCK # Modify this to either one of OPEN_AI, SAGEMAKER, or BEDROCK to configure which backend api the frontend code should use to serve the chat client
- BEDROCK_MODEL_ID=ai21.j2-mid-v1 # Keep this model to follow the bedrock tutorial
environment:
- LLM_GATEWAY_URL=http://llm-gateway-api:8000 # Replace this with your internal LLM Gateway Proxy url. This default value is currently pointing to the backend api docker container
networks:
- llm-demo
ports:
- 3000:3000
depends_on:
- api
volumes:
postgres_data:
networks:
llm-demo: