Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- version: '3.8'
- services:
- ollama-intel-gpu:
- container_name: ollama-intel-gpu
- image: intelanalytics/ipex-llm-inference-cpp-xpu:latest
- restart: unless-stopped
- ports:
- - "11434:11434"
- devices:
- - /dev/dri:/dev/dri
- volumes:
- - ./ollama:/root/.ollama
- environment:
- - no_proxy=localhost,127.0.0.1
- - DEVICE=Arc
- - OLLAMA_HOST=0.0.0.0:11434
- entrypoint: /bin/bash /root/.ollama/start.sh
- #command: /bin/bash source ipex-llm-init --gpu --device $DEVICE && chmod +X /llm/scripts/start-ollama.sh && bash /llm/scripts/start-ollama.sh
- #ollama:
- #image: ollama/ollama
- #container_name: ollama
- #ports:
- # - "11434:11434"
- #volumes:
- # - ./ollama:/root/.ollama
- #restart: unless-stopped
- #deploy:
- # resources:
- # reservations:
- # devices:
- # - capabilities: ["gpu"]
- # For older versions of Docker, you might need to use the environment method
- # environment:
- # - NVIDIA_VISIBLE_DEVICES=all
- open-webui:
- image: ghcr.io/open-webui/open-webui:main
- container_name: open-webui
- ports:
- - "8282:8080"
- extra_hosts:
- - "host.docker.internal:host-gateway"
- volumes:
- - ./open-webui:/app/backend/data
- restart: unless-stopped
Advertisement
Add Comment
Please, Sign In to add comment