Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- Directory Structure:
- └── ./
- └── LLV
- ├── .dockerignore
- ├── Caddyfile
- ├── Dockerfile
- └── start_services.sh
- ---
- File: /LLV/.dockerignore
- ---
- # Ignore everything
- *
- # Re-include only what we actually need in the image
- !Dockerfile
- !Caddyfile
- !start_services.sh
- ---
- File: /LLV/Caddyfile
- ---
- {
- # We terminate inside Tailscale; no public HTTPS here.
- auto_https off
- }
- :80 {
- encode gzip zstd
- # ── ComfyUI UI under /comfyui (prefix stripped before proxy) ───────────────
- handle_path /comfyui/* {
- reverse_proxy 127.0.0.1:8188
- }
- # ── ComfyUI APIs & WebSocket (no prefix strip) ─────────────────────────────
- handle /api/* {
- reverse_proxy 127.0.0.1:8188
- }
- # ComfyUI uses a websocket at /ws for live updates.
- handle /ws/* {
- reverse_proxy 127.0.0.1:8188
- }
- # ── Static model folders proxied to ComfyUI (matches your old config) ──────
- handle /loras/* {
- reverse_proxy 127.0.0.1:8188
- }
- handle /loras_static/* {
- reverse_proxy 127.0.0.1:8188
- }
- handle /checkpoints_static/* {
- reverse_proxy 127.0.0.1:8188
- }
- # Optional: direct static file browse of the loras directory, independent of
- # ComfyUI. Visit /_loras/… if you want a raw listing/download.
- handle_path /_loras/* {
- root * /workspace/ComfyUI/models/loras
- file_server browse
- }
- # ── JupyterLab under /jupyter/ (Jupyter is started with base_url=/jupyter/) ─
- handle /jupyter/* {
- reverse_proxy 127.0.0.1:8888
- }
- # Default: send anything else to ComfyUI root (useful for top‑level assets)
- handle /* {
- reverse_proxy 127.0.0.1:8188
- }
- }
- ---
- File: /LLV/Dockerfile
- ---
- # syntax=docker/dockerfile:1.7
- ###############################################################################
- # BUILD STAGE #################################################################
- ###############################################################################
- ARG CUDA_VERSION=12.6.0
- ARG UBUNTU_VERSION=22.04
- FROM nvidia/cuda:${CUDA_VERSION}-cudnn-devel-ubuntu${UBUNTU_VERSION} AS builder
- # --- base tooling -----------------------------------------------------------
- RUN apt-get update && \
- DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
- python3 python3-venv python3-distutils python3-pip \
- build-essential git curl ca-certificates && \
- rm -rf /var/lib/apt/lists/*
- # --- Python venv & Core ML stack ---------------------------------------------
- ARG TORCH_CUDA=cu126
- ARG TORCH_VER=2.6.0
- ARG TVISION_VER=0.21.0+${TORCH_CUDA}
- ARG TAUDIO_VER=2.6.0+${TORCH_CUDA}
- ARG XFORMERS_VER=0.0.29.post2
- ENV VENV_PATH=/opt/venv
- RUN python3 -m venv ${VENV_PATH} && \
- . ${VENV_PATH}/bin/activate && \
- pip install --upgrade --no-cache-dir pip setuptools wheel && \
- pip install --extra-index-url https://download.pytorch.org/whl/${TORCH_CUDA} \
- torch==${TORCH_VER}+${TORCH_CUDA} \
- torchvision==${TVISION_VER} \
- torchaudio==${TAUDIO_VER} \
- xformers==${XFORMERS_VER} \
- onnx==1.16.2 onnxruntime-gpu==1.22.0 numpy==1.26.4 && \
- curl -L -o /tmp/sageattention.whl \
- https://huggingface.co/MonsterMMORPG/SECourses_Premium_Flash_Attention/resolve/main/sageattention-2.1.1-cp310-cp310-linux_x86_64.whl && \
- mv /tmp/sageattention.whl /tmp/sageattention-2.1.1-py3-none-any.whl && \
- pip install /tmp/sageattention-2.1.1-py3-none-any.whl && rm /tmp/sageattention-2.1.1-py3-none-any.whl && \
- find ${VENV_PATH} -name '__pycache__' -type d -exec rm -rf {} + && \
- pip cache purge
- ###############################################################################
- # RUNTIME STAGE ###############################################################
- ###############################################################################
- FROM nvidia/cuda:${CUDA_VERSION}-cudnn-devel-ubuntu${UBUNTU_VERSION}
- # Install runtime system dependencies
- RUN apt-get update && \
- DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
- python3 python3-dev git curl ca-certificates gpg ffmpeg tini && \
- \
- # Add repositories and install Caddy & Tailscale
- curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg && \
- curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | tee /etc/apt/sources.list.d/caddy-stable.list > /dev/null && \
- curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/jammy.noarmor.gpg | gpg --dearmor -o /usr/share/keyrings/tailscale-archive-keyring.gpg && \
- curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/jammy.tailscale-keyring.list | tee /etc/apt/sources.list.d/tailscale.list > /dev/null && \
- apt-get update && \
- DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends caddy tailscale && \
- rm -rf /var/lib/apt/lists/*
- # Copy the pre-built Python virtual environment
- ENV VENV_PATH=/opt/venv
- COPY --from=builder ${VENV_PATH} ${VENV_PATH}
- # Copy configuration and startup script
- WORKDIR /workspace
- COPY Caddyfile /etc/caddy/Caddyfile
- COPY start_services.sh /usr/local/bin/start_services.sh
- RUN chmod +x /usr/local/bin/start_services.sh
- # Set environment variables for the container runtime
- ENV PATH="${VENV_PATH}/bin:${PATH}" \
- PYTHONUNBUFFERED=1 \
- # THIS IS THE FIX: Disable FP8 compilation for Ampere compatibility
- TORCH_INDUCTOR_FORCE_DISABLE_FP8="1"
- # Use tini as the entrypoint to properly manage processes
- ENTRYPOINT ["/usr/bin/tini", "-s", "--", "/usr/local/bin/start_services.sh"]
- ---
- File: /LLV/start_services.sh
- ---
- #!/usr/bin/env bash
- set -euo pipefail
- VENV_COMFY=${VENV_COMFY:-/opt/venv}
- COMFY_DIR="/workspace/ComfyUI"
- COMFY_LAUNCH_ARGS=${COMFY_LAUNCH_ARGS:-"--listen 0.0.0.0 --port 8188 --disable-auto-launch"}
- echo "[services] Starting Tailscale..."
- TS_STATE_DIR="/workspace/tailscale"
- TS_STATE_FILE="${TS_STATE_DIR}/tailscaled.state"
- TS_SOCKET_FILE="/var/run/tailscale/tailscaled.sock"
- mkdir -p "${TS_STATE_DIR}"
- mkdir -p "$(dirname "${TS_SOCKET_FILE}")"
- tailscaled \
- --state="${TS_STATE_FILE}" \
- --socket="${TS_SOCKET_FILE}" \
- --tun=userspace-networking &
- TAILSCALED_PID=$!
- sleep 4
- TS_UP_ARGS=("--hostname=${RUNPOD_POD_HOSTNAME:-comfy-pod}" "--accept-dns=false")
- if [[ -n "${TAILSCALE_AUTHKEY:-}" ]]; then
- TS_UP_ARGS+=("--auth-key=${TAILSCALE_AUTHKEY}")
- fi
- if tailscale --socket="${TS_SOCKET_FILE}" up "${TS_UP_ARGS[@]}"; then
- echo "[services] Tailscale started successfully."
- else
- echo "[services] Tailscale 'up' command failed or already up. Continuing..."
- fi
- echo "[services] Setting up ComfyUI..."
- source "${VENV_COMFY}/bin/activate"
- export TORCH_INDUCTOR_FORCE_DISABLE_FP8="1"
- echo "[services] Forcing TORCH_INDUCTOR_FORCE_DISABLE_FP8=${TORCH_INDUCTOR_FORCE_DISABLE_FP8}"
- COMFY_PID=""
- install_node_deps() {
- local NODES_DIR_PATH="$1"
- local ACTION_DESCRIPTION="$2"
- if [ -d "${NODES_DIR_PATH}" ]; then
- echo "[services] Checking custom node dependencies in ${NODES_DIR_PATH} (${ACTION_DESCRIPTION})..."
- while IFS= read -r -d '' NODE_DIR; do
- if [ -f "${NODE_DIR}/requirements.txt" ]; then
- NODE_NAME=$(basename "${NODE_DIR}")
- echo "[services] Installing deps for node: ${NODE_NAME}"
- ( cd "${NODE_DIR}" && "${VENV_COMFY}/bin/pip" install -r requirements.txt ) || \
- echo "[services] Warning: pip install failed for ${NODE_NAME}"
- fi
- done < <(find "${NODES_DIR_PATH}" -mindepth 1 -maxdepth 1 -type d -print0)
- else
- echo "[services] Custom nodes dir not found at ${NODES_DIR_PATH}."
- fi
- }
- if [ -f "${COMFY_DIR}/main.py" ]; then
- echo "[services] Found existing ComfyUI in ${COMFY_DIR}."
- cd "${COMFY_DIR}"
- if [ -f "requirements.txt" ]; then
- echo "[services] Ensuring ComfyUI requirements are present..."
- "${VENV_COMFY}/bin/pip" install --no-cache-dir -r requirements.txt || \
- echo "[services] Warning: ComfyUI requirements install failed."
- fi
- install_node_deps "${COMFY_DIR}/custom_nodes" "Existing"
- echo "[services] Launching ComfyUI: ${COMFY_LAUNCH_ARGS}"
- TORCH_INDUCTOR_FORCE_DISABLE_FP8="1" python main.py ${COMFY_LAUNCH_ARGS} &
- COMFY_PID=$!
- echo "[services] ComfyUI PID: ${COMFY_PID}"
- else
- echo "[services] ComfyUI not found; cloning into ${COMFY_DIR}..."
- mkdir -p "$(dirname "${COMFY_DIR}")"
- if git clone https://github.com/comfyanonymous/ComfyUI.git "${COMFY_DIR}"; then
- echo "[services] Clone complete."
- cd "${COMFY_DIR}"
- if [ -f "requirements.txt" ]; then
- echo "[services] Installing ComfyUI requirements..."
- "${VENV_COMFY}/bin/pip" install --no-cache-dir -r requirements.txt || \
- echo "[services] Warning: ComfyUI requirements install failed."
- fi
- install_node_deps "${COMFY_DIR}/custom_nodes" "New clone"
- echo "[services] Launching ComfyUI: ${COMFY_LAUNCH_ARGS}"
- python main.py ${COMFY_LAUNCH_ARGS} &
- COMFY_PID=$!
- echo "[services] ComfyUI PID: ${COMFY_PID}"
- else
- echo "[services] Failed to clone ComfyUI. Please place it at ${COMFY_DIR}."
- fi
- fi
- deactivate
- echo "[services] ComfyUI setup complete."
- JUPYTER_PID=""
- if [[ "${ENABLE_JUPYTER:-false}" == "true" ]]; then
- echo "[services] Starting JupyterLab..."
- source "${VENV_COMFY}/bin/activate"
- if ! command -v jupyter-lab &> /dev/null; then
- echo "[services] Warning: ENABLE_JUPYTER is true, but jupyterlab is not installed."
- echo "[services] Please rebuild your container with '--build-arg WITH_JUPYTER=true'"
- else
- JUPYTER_TOKEN='runpod'
- echo "[services] JupyterLab available at: /jupyter/?token=${JUPYTER_TOKEN}"
- jupyter-lab --ip=127.0.0.1 --port=8888 --no-browser \
- --ServerApp.base_url=/jupyter \
- --ServerApp.token="${JUPYTER_TOKEN}" --ServerApp.password='' \
- --notebook-dir=/workspace --allow-root &
- JUPYTER_PID=$!
- echo "[services] JupyterLab PID: ${JUPYTER_PID}"
- fi
- deactivate
- else
- echo "[services] JupyterLab disabled."
- fi
- echo "[services] Starting Caddy..."
- caddy run --config /etc/caddy/Caddyfile --adapter caddyfile &
- CADDY_PID=$!
- echo "[services] Caddy PID: ${CADDY_PID}"
- PIDS_TO_KILL=()
- [[ -n "${TAILSCALED_PID}" ]] && PIDS_TO_KILL+=("${TAILSCALED_PID}")
- [[ -n "${COMFY_PID}" ]] && PIDS_TO_KILL+=("${COMFY_PID}")
- [[ -n "${JUPYTER_PID}" ]] && PIDS_TO_KILL+=("${JUPYTER_PID}")
- [[ -n "${CADDY_PID}" ]] && PIDS_TO_KILL+=("${CADDY_PID}")
- PIDS_TO_WAIT=()
- [[ -n "${COMFY_PID}" ]] && PIDS_TO_WAIT+=("${COMFY_PID}")
- [[ -n "${JUPYTER_PID}" ]] && PIDS_TO_WAIT+=("${JUPYTER_PID}")
- if [[ ${#PIDS_TO_WAIT[@]} -eq 0 && -n "${CADDY_PID}" ]]; then
- PIDS_TO_WAIT+=("${CADDY_PID}")
- fi
- cleanup() {
- echo "[services] Terminating services..."
- if [[ ${#PIDS_TO_KILL[@]} -gt 0 ]]; then
- kill -SIGTERM "${PIDS_TO_KILL[@]}" 2>/dev/null || true
- sleep 5
- for pid in "${PIDS_TO_KILL[@]}"; do
- if kill -0 "$pid" 2>/dev/null; then
- echo "[services] PID $pid still alive; sending SIGKILL."
- kill -SIGKILL "$pid" 2>/dev/null || true
- fi
- done
- fi
- if [[ -n "${TAILSCALED_PID}" ]] && kill -0 "${TAILSCALED_PID}" 2>/dev/null; then
- tailscale --socket="${TS_SOCKET_FILE}" logout || true
- fi
- echo "[services] Shutdown complete."
- exit 0
- }
- trap cleanup SIGTERM SIGINT
- echo "[services] Startup complete. Waiting on: ${PIDS_TO_WAIT[*]:-none}"
- if [[ ${#PIDS_TO_WAIT[@]} -gt 0 ]]; then
- wait -n "${PIDS_TO_WAIT[@]}"
- echo "[services] A primary process exited; shutting down..."
- cleanup
- else
- if [[ -n "${CADDY_PID}" && -n "${TAILSCALED_PID}" ]]; then
- wait "${CADDY_PID}" "${TAILSCALED_PID}"
- elif [[ -n "${CADDY_PID}" ]]; then
- wait "${CADDY_PID}"
- elif [[ -n "${TAILSCALED_PID}" ]]; then
- wait "${TAILSCALED_PID}"
- else
- echo "[services] No services are running; exiting."
- fi
- fi
Advertisement
Add Comment
Please, Sign In to add comment