All checks were successful
lint-test-docker / lint-docker-compose (pull_request) Successful in 34s
42 lines
1.2 KiB
YAML
42 lines
1.2 KiB
YAML
services:
|
|
tailscale-invoke-ai:
|
|
image: ghcr.io/tailscale/tailscale:v1.96.5
|
|
container_name: tailscale-invoke-ai
|
|
cap_add:
|
|
- net_admin
|
|
- sys_module
|
|
environment:
|
|
- TS_STATE_DIR=/var/lib/tailscale
|
|
- TS_ENABLE_METRICS=true
|
|
- TS_HOSTNAME=invoke-ai-pd05wd
|
|
- TS_SERVE_CONFIG=/config/serve.json
|
|
env_file:
|
|
- .ts-env
|
|
network_mode: service:invoke-ai
|
|
restart: always
|
|
volumes:
|
|
- tailscale:/var/lib/tailscale
|
|
- ./serve.json:/config/serve.json
|
|
devices:
|
|
- /dev/net/tun:/dev/net/tun
|
|
|
|
invoke-ai:
|
|
image: ghcr.io/invoke-ai/invokeai:main-cuda@sha256:6d4c2edc3013ce7001008707722620bc7306325c4de59620ac470aa63e80448b
|
|
container_name: stable-diffusion
|
|
environment:
|
|
- INVOKEAI_ROOT=~/invokeai
|
|
- INVOKEAI_DEVICE=cuda
|
|
restart: always
|
|
deploy:
|
|
resources:
|
|
reservations:
|
|
devices:
|
|
- driver: nvidia
|
|
capabilities: ["gpu"]
|
|
count: all
|
|
volumes:
|
|
- ./invokeai:/invokeai
|
|
|
|
volumes:
|
|
tailscale:
|