Compare commits

..

4 Commits
main ... nvidia

Author SHA1 Message Date
Sean C
c6d93786cb updated 2025-02-23 22:36:50 -05:00
imp4ct
8347f0b0f3 Added parallelism 2024-12-06 14:38:54 -05:00
imp4ct
94a68b523e made local data folders 2024-12-06 13:42:30 -05:00
imp4ct
b730c68303 nvidia gpu compat 2024-12-06 13:09:54 -05:00

View File

@ -1,29 +1,37 @@
version: '3.8'
services: services:
ollama: ollama:
volumes: volumes:
- $HOME/ollama/ollama:/root/.ollama - ./ollama:/root/.ollama
container_name: ollama container_name: ollama
pull_policy: always pull_policy: always
tty: true tty: true
restart: unless-stopped restart: unless-stopped
image: ollama/ollama:latest image: ollama/ollama:latest
healthcheck:
test: ["CMD-SHELL", "nvidia-smi > /dev/null"]
interval: 10s
timeout: 30s
retries: 3
environment:
- OLLAMA_KEEP_ALIVE=60m
- OLLAMA_NUM_PARALLEL=4
ports: ports:
- 11434:11434 - 11434:11434
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
ollama-webui: open-webui:
image: ghcr.io/ollama-webui/ollama-webui:main image: ghcr.io/open-webui/open-webui:main
container_name: ollama-webui
volumes:
- $HOME/ollama/ollama-webui:/app/backend/data
depends_on:
- ollama
ports: ports:
- 3000:8080 - "3000:8080"
environment: environment:
- '/ollama/api=http://ollama:11434/api' - OLLAMA_BASE_URL=http://ollama:11434
extra_hosts: volumes:
- host.docker.internal:host-gateway - ./open-webui:/app/backend/data
restart: unless-stopped container_name: open-webui
restart: unless-stopped # Optional: Add a restart policy