forked from NVIDIA-AI-Blueprints/vulnerability-analysis
-
Notifications
You must be signed in to change notification settings - Fork 0
/
docker-compose.yml
executable file
·132 lines (121 loc) · 4.66 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
# SPDX-FileCopyrightText: Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Set project name environment variable to avoid container collision in shared environment
name: ${DOCKER_COMPOSE_PROJECT_NAME:-morpheus_vuln_analysis}
services:
morpheus-vuln-analysis:
image: nvcr.io/nvidia/morpheus/morpheus-vuln-analysis:24.10
build:
context: ./
dockerfile: ./Dockerfile
target: runtime
ports:
- "8000:8000"
- "26466:26466"
working_dir: /workspace_examples
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [ gpu ]
networks:
- app_network
environment:
- TERM=${TERM:-}
- HF_HUB_CACHE=/workspace_examples/.cache/huggingface
- XDG_CACHE_HOME=/workspace_examples/.cache/am_cache # Allow logs to be written back to host
# Required API Keys
- NVD_API_KEY=${NVD_API_KEY:?"NVD_API_KEY is required"}
- NVIDIA_API_KEY=${NVIDIA_API_KEY:?"NVIDIA_API_KEY is required"}
- SERPAPI_API_KEY=${SERPAPI_API_KEY:?"SERPAPI_API_KEY is required"}
# Optional API Keys
- GHSA_API_KEY=${GHSA_API_KEY:-""}
- NGC_API_KEY=${NGC_API_KEY:-""}
- NGC_ORG_ID=${NGC_ORG_ID:-""}
- OPENAI_API_KEY=${OPENAI_API_KEY:-""}
# Base URLs for API endpoints
- CVE_DETAILS_BASE_URL=${CVE_DETAILS_BASE_URL:-http://nginx-cache/cve-details}
- CWE_DETAILS_BASE_URL=${CWE_DETAILS_BASE_URL:-http://nginx-cache/cwe-details}
- DEPSDEV_BASE_URL=${CWE_DETAILS_BASE_URL:-http://nginx-cache/depsdev}
- FIRST_BASE_URL=${FIRST_BASE_URL:-http://nginx-cache/first}
- GHSA_BASE_URL=${GHSA_BASE_URL:-http://nginx-cache/ghsa}
- NGC_API_BASE=${NGC_API_BASE:-http://nginx-cache/nemo/v1}
- NIM_EMBED_BASE_URL=${NIM_EMBED_BASE_URL:-http://nginx-cache/nim_embed/v1}
- NVD_BASE_URL=${NVD_BASE_URL:-http://nginx-cache/nvd}
- NVIDIA_API_BASE=${NVIDIA_API_BASE:-http://nginx-cache/nim_llm/v1}
- OPENAI_API_BASE=${OPENAI_API_BASE:-http://nginx-cache/openai/v1} # Used by `langchain` for embedding generation
- OPENAI_BASE_URL=${OPENAI_BASE_URL:-http://nginx-cache/openai/v1} # Used by `openai` for LLM inference
- RHSA_BASE_URL=${RHSA_BASE_URL:-http://nginx-cache/rhsa}
- SERPAPI_BASE_URL=${SERPAPI_BASE_URL:-http://nginx-cache/serpapi}
- UBUNTU_BASE_URL=${UBUNTU_BASE_URL:-http://nginx-cache/ubuntu}
volumes:
- ./:/workspace_examples
cap_add:
- sys_nice
depends_on:
- nginx-cache
restart: always
nginx-cache:
image: nginx
volumes:
- ./nginx/nginx_cache.conf:/etc/nginx/nginx.conf:ro
- ./nginx/logs:/var/log/nginx
- ./nginx/templates:/etc/nginx/templates:ro
- service-cache:/server_cache_intel:rw
- llm-cache:/server_cache_llm:rw
ports:
# Set custom ports in environment variables to avoid port collision
- "${NGINX_HOST_HTTP_PORT:-8080}:80"
environment:
# API Keys
- GHSA_API_KEY=${GHSA_API_KEY:-""}
- NGC_API_KEY=${NGC_API_KEY:-""}
- NGC_ORG_ID=${NGC_ORG_ID:-""}
- NVD_API_KEY=${NVD_API_KEY:-""}
- NVIDIA_API_KEY=${NVIDIA_API_KEY:-""}
- OPENAI_API_KEY=${OPENAI_API_KEY:-""}
# Route Variables
- NGINX_UPSTREAM_NVAI=${NGINX_UPSTREAM_NVAI:-https://api.nvcf.nvidia.com}
- NGINX_UPSTREAM_NIM_LLM=${NGINX_UPSTREAM_NIM_LLM:-https://integrate.api.nvidia.com}
- NGINX_UPSTREAM_NIM_EMBED=${NGINX_UPSTREAM_NIM_EMBED:-https://integrate.api.nvidia.com}
networks:
- app_network
restart: always
nginx-ssl:
image: nginx
volumes:
- ./nginx/nginx_ssl.conf:/etc/nginx/nginx.conf:ro
- ./nginx/key.pem:/etc/nginx/ssl/key.pem
- ./nginx/cert.pem:/etc/nginx/ssl/cert.pem
ports:
# Set custom ports in environment variables to avoid port collision
- "${NGINX_HOST_HTTPS_PORT:-443}:443"
networks:
- app_network
depends_on:
- nginx-cache
profiles:
- ssl
restart: always
networks:
app_network:
driver: bridge
volumes:
service-cache:
driver: local
llm-cache:
driver: local