This commit is contained in:
Sean C 2025-02-23 22:36:50 -05:00
parent 8347f0b0f3
commit c6d93786cb

View File

@ -1,16 +1,20 @@
version: '3.8'
services:
ollama:
volumes:
- ./ollama:/root/.ollama
container_name: ollama
environment:
- OLLAMA_NUM_PARALLEL=2
pull_policy: always
tty: true
restart: unless-stopped
image: ollama/ollama:latest
healthcheck:
test: ["CMD-SHELL", "nvidia-smi > /dev/null"]
interval: 10s
timeout: 30s
retries: 3
environment:
- OLLAMA_KEEP_ALIVE=60m
- OLLAMA_NUM_PARALLEL=4
ports:
- 11434:11434
deploy:
@ -20,18 +24,14 @@ services:
- driver: nvidia
count: all
capabilities: [gpu]
ollama-webui:
image: ghcr.io/ollama-webui/ollama-webui:main
container_name: ollama-webui
volumes:
- ./ollama-webui:/app/backend/data
depends_on:
- ollama
ports:
- 3000:8080
environment:
- '/ollama/api=http://ollama:11434/api'
extra_hosts:
- host.docker.internal:host-gateway
restart: unless-stopped
open-webui:
image: ghcr.io/open-webui/open-webui:main
ports:
- "3000:8080"
environment:
- OLLAMA_BASE_URL=http://ollama:11434
volumes:
- ./open-webui:/app/backend/data
container_name: open-webui
restart: unless-stopped # Optional: Add a restart policy