Setup: Add "open-webui" service to compose.yaml config examples

Signed-off-by: Michael Mayer <michael@photoprism.app>
This commit is contained in:
Michael Mayer
2025-09-12 16:13:17 +02:00
parent 11c7d9f7af
commit cd81094d25
6 changed files with 89 additions and 33 deletions

View File

@@ -154,7 +154,7 @@ services:
count: "all"
## Ollama Large-Language Model Runner
## Run "ollama pull [name]:[version]" to download a vision model
## run "ollama pull [name]:[version]" to download a vision model
## listed at <https://ollama.com/search?c=vision>, for example:
## docker compose exec ollama ollama pull gemma3:latest
ollama:

View File

@@ -179,7 +179,7 @@ services:
## Web UI: https://qdrant.localssl.dev/dashboard
qdrant:
image: qdrant/qdrant:latest
profiles: ["all", "qdrant"]
profiles: [ "all", "qdrant" ]
links:
- "traefik:localssl.dev"
- "traefik:app.localssl.dev"
@@ -203,7 +203,7 @@ services:
- "./storage/services/qdrant:/qdrant/storage"
## Ollama Large-Language Model Runner
## Run "ollama pull [name]:[version]" to download a vision model
## run "ollama pull [name]:[version]" to download a vision model
## listed at <https://ollama.com/search?c=vision>, for example:
## docker compose exec ollama ollama pull gemma3:latest
ollama:
@@ -212,7 +212,7 @@ services:
stop_grace_period: 10s
## Only starts this service if the "all", "ollama", or "vision" profile is specified::
## docker compose --profile ollama up -d
profiles: ["all", "ollama", "vision"]
profiles: [ "all", "ollama", "vision" ]
## Insecurely exposes the Ollama service on port 11434
## without authentication (for private networks only):
# ports:
@@ -257,15 +257,16 @@ services:
# capabilities: [ gpu ]
# count: "all"
## Open WebUI, an extensible and user-friendly AI platform:
## https://github.com/open-webui/open-webui
## Open WebUI, a Web Interface for Ollama
## see https://github.com/open-webui/open-webui
open-webui:
image: ghcr.io/open-webui/open-webui:main
restart: unless-stopped
stop_grace_period: 10s
stop_grace_period: 5s
## Only starts this service if the "all", "ollama", "open-webui", or "vision" profile is specified::
## docker compose --profile ollama up -d
profiles: [ "all", "ollama", "open-webui", "vision" ]
## Exposes Open WebUI at http://localhost:8080 (use https://chat.localssl.dev/ to access it through Traefik):
ports:
- "127.0.0.1:8080:8080"
labels:
@@ -277,7 +278,7 @@ services:
- "traefik.http.routers.open-webui.tls=true"
environment:
WEBUI_URL: "https://chat.localssl.dev"
# WEBUI_SECRET_KEY: "AiBo5eeY3aeJami3ro7ahtohh6Xoh4fed8aid4feighaiYoa"
# WEBUI_SECRET_KEY: ""
OLLAMA_BASE_URL: "http://ollama:11434"
ANONYMIZED_TELEMETRY: "false" # disable Chroma telemetry
HF_HUB_DISABLE_TELEMETRY: "1" # disable Hugging Face telemetry
@@ -286,12 +287,12 @@ services:
- "./storage/services/open-webui:/app/backend/data"
## PhotoPrism® Computer Vision API
## See: https://github.com/photoprism/photoprism-vision
## see https://github.com/photoprism/photoprism-vision
photoprism-vision:
image: photoprism/vision:latest
## Only starts this service if the "all" or "vision" profile is specified::
## docker compose --profile vision up -d
profiles: ["all", "vision"]
profiles: [ "all", "vision" ]
stop_grace_period: 15s
working_dir: "/app"
links:
@@ -414,7 +415,7 @@ services:
## ./photoprism client add --id=cs5cpu17n6gj2qo5 --secret=xcCbOrw6I0vcoXzhnOmXhjpVSyFq0l0e -s metrics -n Prometheus -e 60 -t 1
prometheus:
image: prom/prometheus:latest
profiles: ["all", "auth", "prometheus"]
profiles: [ "all", "auth", "prometheus" ]
labels:
- "traefik.enable=true"
- "traefik.http.services.prometheus.loadbalancer.server.port=9090"

View File

@@ -114,7 +114,7 @@ services:
# PHOTOPRISM_GID: 1000
# PHOTOPRISM_UMASK: 0000
## Shared devices for video hardware transcoding (optional):
## See: https://www.raspberrypi.com/documentation/accessories/camera.html#driver-differences-when-using-libcamera-or-the-legacy-stack
## see https://www.raspberrypi.com/documentation/accessories/camera.html#driver-differences-when-using-libcamera-or-the-legacy-stack
# devices:
# - "/dev/dri:/dev/dri" # Required Intel QSV or VAAPI hardware transcoding
# - "/dev/video11:/dev/video11" # Video4Linux Video Encode Device (h264_v4l2m2m)
@@ -153,16 +153,16 @@ services:
MARIADB_ROOT_PASSWORD: "insecure"
## Ollama Large-Language Model Runner (optional)
## Run "ollama pull [name]:[version]" to download a vision model
## run "ollama pull [name]:[version]" to download a vision model
## listed at <https://ollama.com/search?c=vision>, for example:
## docker compose exec ollama ollama pull gemma3:latest
ollama:
image: ollama/ollama:latest
restart: unless-stopped
stop_grace_period: 15s
## Only starts this service if the "ollama" profile is specified::
## Only starts this service if the "all", "ollama", or "vision" profile is specified::
## docker compose --profile ollama up -d
profiles: ["ollama"]
profiles: ["all", "ollama", "vision"]
## Insecurely exposes the Ollama service on port 11434
## without authentication (for private networks only):
# ports:
@@ -186,19 +186,30 @@ services:
OLLAMA_NEW_ENGINE: "true" # enables the new Ollama engine
# OLLAMA_DEBUG: "true" # shows additional debug information
# OLLAMA_INTEL_GPU: "true" # enables experimental Intel GPU detection
## NVIDIA GPU Hardware Acceleration (see https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html):
# NVIDIA_VISIBLE_DEVICES: "all"
# NVIDIA_DRIVER_CAPABILITIES: "compute,utility"
volumes:
- "./ollama:/root/.ollama"
## NVIDIA GPU Hardware Acceleration (see https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html):
# deploy:
# resources:
# reservations:
# devices:
# - driver: "nvidia"
# capabilities: [ gpu ]
# count: "all"
## Open WebUI, a Web Interface for Ollama (optional)
## see https://github.com/open-webui/open-webui
open-webui:
image: ghcr.io/open-webui/open-webui:main
restart: unless-stopped
stop_grace_period: 5s
## Only starts this service if the "all", "ollama", "open-webui", or "vision" profile is specified::
## docker compose --profile ollama up -d
profiles: [ "all", "ollama", "open-webui", "vision" ]
## Exposes Open WebUI at http://localhost:8080 (use an HTTPS reverse proxy for remote access):
ports:
- "127.0.0.1:8080:8080"
environment:
WEBUI_URL: "http://localhost:80080"
# WEBUI_SECRET_KEY: ""
OLLAMA_BASE_URL: "http://ollama:11434"
ANONYMIZED_TELEMETRY: "false" # disable Chroma telemetry
HF_HUB_DISABLE_TELEMETRY: "1" # disable Hugging Face telemetry
# HUGGING_FACE_HUB_TOKEN: "" # see https://huggingface.co/docs/hub/en/security-tokens
volumes:
- "./open-webui:/app/backend/data"
## Watchtower upgrades services automatically (optional)
## see https://docs.photoprism.app/getting-started/updates/#watchtower

View File

@@ -103,7 +103,7 @@ services:
# PHOTOPRISM_GID: 1000
# PHOTOPRISM_UMASK: 0000
## Shared devices for video hardware transcoding (optional):
## See: https://www.raspberrypi.com/documentation/accessories/camera.html#driver-differences-when-using-libcamera-or-the-legacy-stack
## see https://www.raspberrypi.com/documentation/accessories/camera.html#driver-differences-when-using-libcamera-or-the-legacy-stack
# devices:
# - "/dev/video11:/dev/video11" # Video4Linux Video Encode Device (h264_v4l2m2m)
working_dir: "/photoprism" # do not change or remove

View File

@@ -158,16 +158,16 @@ services:
# MARIADB_REPLICATION_PASSWORD: ""
## Ollama Large-Language Model Runner (optional)
## Run "ollama pull [name]:[version]" to download a vision model
## run "ollama pull [name]:[version]" to download a vision model
## listed at <https://ollama.com/search?c=vision>, for example:
## docker compose exec ollama ollama pull gemma3:latest
ollama:
image: ollama/ollama:latest
restart: unless-stopped
stop_grace_period: 15s
## Only starts this service if the "ollama" profile is specified::
## Only starts this service if the "all", "ollama", or "vision" profile is specified::
## docker compose --profile ollama up -d
profiles: ["ollama"]
profiles: ["all", "ollama", "vision"]
## Insecurely exposes the Ollama service on port 11434
## without authentication (for private networks only):
# ports:
@@ -205,6 +205,28 @@ services:
# capabilities: [ gpu ]
# count: "all"
## Open WebUI, a Web Interface for Ollama (optional)
## see https://github.com/open-webui/open-webui
open-webui:
image: ghcr.io/open-webui/open-webui:main
restart: unless-stopped
stop_grace_period: 5s
## Only starts this service if the "all", "ollama", "open-webui", or "vision" profile is specified::
## docker compose --profile ollama up -d
profiles: [ "all", "ollama", "open-webui", "vision" ]
## Exposes Open WebUI at http://localhost:8080 (use an HTTPS reverse proxy for remote access):
ports:
- "127.0.0.1:8080:8080"
environment:
WEBUI_URL: "http://localhost:80080"
# WEBUI_SECRET_KEY: ""
OLLAMA_BASE_URL: "http://ollama:11434"
ANONYMIZED_TELEMETRY: "false" # disable Chroma telemetry
HF_HUB_DISABLE_TELEMETRY: "1" # disable Hugging Face telemetry
# HUGGING_FACE_HUB_TOKEN: "" # see https://huggingface.co/docs/hub/en/security-tokens
volumes:
- "./open-webui:/app/backend/data"
## Watchtower upgrades services automatically (optional)
## see https://docs.photoprism.app/getting-started/updates/#watchtower
## activate via "COMPOSE_PROFILES=update docker compose up -d"

View File

@@ -158,16 +158,16 @@ services:
# MARIADB_REPLICATION_PASSWORD: ""
## Ollama Large-Language Model Runner (optional)
## Run "ollama pull [name]:[version]" to download a vision model
## run "ollama pull [name]:[version]" to download a vision model
## listed at <https://ollama.com/search?c=vision>, for example:
## docker compose exec ollama ollama pull gemma3:latest
ollama:
image: ollama/ollama:latest
restart: unless-stopped
stop_grace_period: 15s
## Only starts this service if the "ollama" profile is specified::
## Only starts this service if the "all", "ollama", or "vision" profile is specified::
## docker compose --profile ollama up -d
profiles: ["ollama"]
profiles: ["all", "ollama", "vision"]
## Insecurely exposes the Ollama service on port 11434
## without authentication (for private networks only):
# ports:
@@ -205,6 +205,28 @@ services:
capabilities: [ gpu ]
count: "all"
## Open WebUI, a Web Interface for Ollama (optional)
## see https://github.com/open-webui/open-webui
open-webui:
image: ghcr.io/open-webui/open-webui:main
restart: unless-stopped
stop_grace_period: 5s
## Only starts this service if the "all", "ollama", "open-webui", or "vision" profile is specified::
## docker compose --profile ollama up -d
profiles: [ "all", "ollama", "open-webui", "vision" ]
## Exposes Open WebUI at http://localhost:8080 (use an HTTPS reverse proxy for remote access):
ports:
- "127.0.0.1:8080:8080"
environment:
WEBUI_URL: "http://localhost:80080"
# WEBUI_SECRET_KEY: ""
OLLAMA_BASE_URL: "http://ollama:11434"
ANONYMIZED_TELEMETRY: "false" # disable Chroma telemetry
HF_HUB_DISABLE_TELEMETRY: "1" # disable Hugging Face telemetry
# HUGGING_FACE_HUB_TOKEN: "" # see https://huggingface.co/docs/hub/en/security-tokens
volumes:
- "./open-webui:/app/backend/data"
## Watchtower upgrades services automatically (optional)
## see https://docs.photoprism.app/getting-started/updates/#watchtower
## activate via "COMPOSE_PROFILES=update docker compose up -d"