Guest User

Untitled

a guest
Aug 5th, 2025
92
1
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 11.94 KB | None | 1 0
  1. Directory Structure:
  2.  
  3. └── ./
  4. └── LLV
  5. ├── .dockerignore
  6. ├── Caddyfile
  7. ├── Dockerfile
  8. └── start_services.sh
  9.  
  10.  
  11.  
  12. ---
  13. File: /LLV/.dockerignore
  14. ---
  15.  
  16. # Ignore everything
  17. *
  18.  
  19. # Re-include only what we actually need in the image
  20. !Dockerfile
  21. !Caddyfile
  22. !start_services.sh
  23.  
  24.  
  25. ---
  26. File: /LLV/Caddyfile
  27. ---
  28.  
  29. {
  30. # We terminate inside Tailscale; no public HTTPS here.
  31. auto_https off
  32. }
  33.  
  34. :80 {
  35. encode gzip zstd
  36.  
  37. # ── ComfyUI UI under /comfyui (prefix stripped before proxy) ───────────────
  38. handle_path /comfyui/* {
  39. reverse_proxy 127.0.0.1:8188
  40. }
  41.  
  42. # ── ComfyUI APIs & WebSocket (no prefix strip) ─────────────────────────────
  43. handle /api/* {
  44. reverse_proxy 127.0.0.1:8188
  45. }
  46. # ComfyUI uses a websocket at /ws for live updates.
  47. handle /ws/* {
  48. reverse_proxy 127.0.0.1:8188
  49. }
  50.  
  51. # ── Static model folders proxied to ComfyUI (matches your old config) ──────
  52. handle /loras/* {
  53. reverse_proxy 127.0.0.1:8188
  54. }
  55. handle /loras_static/* {
  56. reverse_proxy 127.0.0.1:8188
  57. }
  58. handle /checkpoints_static/* {
  59. reverse_proxy 127.0.0.1:8188
  60. }
  61.  
  62. # Optional: direct static file browse of the loras directory, independent of
  63. # ComfyUI. Visit /_loras/… if you want a raw listing/download.
  64. handle_path /_loras/* {
  65. root * /workspace/ComfyUI/models/loras
  66. file_server browse
  67. }
  68.  
  69. # ── JupyterLab under /jupyter/ (Jupyter is started with base_url=/jupyter/) ─
  70. handle /jupyter/* {
  71. reverse_proxy 127.0.0.1:8888
  72. }
  73.  
  74. # Default: send anything else to ComfyUI root (useful for top‑level assets)
  75. handle /* {
  76. reverse_proxy 127.0.0.1:8188
  77. }
  78. }
  79.  
  80.  
  81. ---
  82. File: /LLV/Dockerfile
  83. ---
  84.  
  85. # syntax=docker/dockerfile:1.7
  86. ###############################################################################
  87. # BUILD STAGE #################################################################
  88. ###############################################################################
  89. ARG CUDA_VERSION=12.6.0
  90. ARG UBUNTU_VERSION=22.04
  91. FROM nvidia/cuda:${CUDA_VERSION}-cudnn-devel-ubuntu${UBUNTU_VERSION} AS builder
  92.  
  93. # --- base tooling -----------------------------------------------------------
  94. RUN apt-get update && \
  95. DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
  96. python3 python3-venv python3-distutils python3-pip \
  97. build-essential git curl ca-certificates && \
  98. rm -rf /var/lib/apt/lists/*
  99.  
  100. # --- Python venv & Core ML stack ---------------------------------------------
  101. ARG TORCH_CUDA=cu126
  102. ARG TORCH_VER=2.6.0
  103. ARG TVISION_VER=0.21.0+${TORCH_CUDA}
  104. ARG TAUDIO_VER=2.6.0+${TORCH_CUDA}
  105. ARG XFORMERS_VER=0.0.29.post2
  106.  
  107. ENV VENV_PATH=/opt/venv
  108. RUN python3 -m venv ${VENV_PATH} && \
  109. . ${VENV_PATH}/bin/activate && \
  110. pip install --upgrade --no-cache-dir pip setuptools wheel && \
  111. pip install --extra-index-url https://download.pytorch.org/whl/${TORCH_CUDA} \
  112. torch==${TORCH_VER}+${TORCH_CUDA} \
  113. torchvision==${TVISION_VER} \
  114. torchaudio==${TAUDIO_VER} \
  115. xformers==${XFORMERS_VER} \
  116. onnx==1.16.2 onnxruntime-gpu==1.22.0 numpy==1.26.4 && \
  117. curl -L -o /tmp/sageattention.whl \
  118. https://huggingface.co/MonsterMMORPG/SECourses_Premium_Flash_Attention/resolve/main/sageattention-2.1.1-cp310-cp310-linux_x86_64.whl && \
  119. mv /tmp/sageattention.whl /tmp/sageattention-2.1.1-py3-none-any.whl && \
  120. pip install /tmp/sageattention-2.1.1-py3-none-any.whl && rm /tmp/sageattention-2.1.1-py3-none-any.whl && \
  121. find ${VENV_PATH} -name '__pycache__' -type d -exec rm -rf {} + && \
  122. pip cache purge
  123.  
  124. ###############################################################################
  125. # RUNTIME STAGE ###############################################################
  126. ###############################################################################
  127. FROM nvidia/cuda:${CUDA_VERSION}-cudnn-devel-ubuntu${UBUNTU_VERSION}
  128.  
  129. # Install runtime system dependencies
  130. RUN apt-get update && \
  131. DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
  132. python3 python3-dev git curl ca-certificates gpg ffmpeg tini && \
  133. \
  134. # Add repositories and install Caddy & Tailscale
  135. curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg && \
  136. curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | tee /etc/apt/sources.list.d/caddy-stable.list > /dev/null && \
  137. curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/jammy.noarmor.gpg | gpg --dearmor -o /usr/share/keyrings/tailscale-archive-keyring.gpg && \
  138. curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/jammy.tailscale-keyring.list | tee /etc/apt/sources.list.d/tailscale.list > /dev/null && \
  139. apt-get update && \
  140. DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends caddy tailscale && \
  141. rm -rf /var/lib/apt/lists/*
  142.  
  143. # Copy the pre-built Python virtual environment
  144. ENV VENV_PATH=/opt/venv
  145. COPY --from=builder ${VENV_PATH} ${VENV_PATH}
  146.  
  147. # Copy configuration and startup script
  148. WORKDIR /workspace
  149. COPY Caddyfile /etc/caddy/Caddyfile
  150. COPY start_services.sh /usr/local/bin/start_services.sh
  151. RUN chmod +x /usr/local/bin/start_services.sh
  152.  
  153. # Set environment variables for the container runtime
  154. ENV PATH="${VENV_PATH}/bin:${PATH}" \
  155. PYTHONUNBUFFERED=1 \
  156. # THIS IS THE FIX: Disable FP8 compilation for Ampere compatibility
  157. TORCH_INDUCTOR_FORCE_DISABLE_FP8="1"
  158.  
  159. # Use tini as the entrypoint to properly manage processes
  160. ENTRYPOINT ["/usr/bin/tini", "-s", "--", "/usr/local/bin/start_services.sh"]
  161.  
  162.  
  163. ---
  164. File: /LLV/start_services.sh
  165. ---
  166.  
  167. #!/usr/bin/env bash
  168. set -euo pipefail
  169.  
  170. VENV_COMFY=${VENV_COMFY:-/opt/venv}
  171. COMFY_DIR="/workspace/ComfyUI"
  172. COMFY_LAUNCH_ARGS=${COMFY_LAUNCH_ARGS:-"--listen 0.0.0.0 --port 8188 --disable-auto-launch"}
  173.  
  174. echo "[services] Starting Tailscale..."
  175. TS_STATE_DIR="/workspace/tailscale"
  176. TS_STATE_FILE="${TS_STATE_DIR}/tailscaled.state"
  177. TS_SOCKET_FILE="/var/run/tailscale/tailscaled.sock"
  178.  
  179. mkdir -p "${TS_STATE_DIR}"
  180. mkdir -p "$(dirname "${TS_SOCKET_FILE}")"
  181.  
  182. tailscaled \
  183. --state="${TS_STATE_FILE}" \
  184. --socket="${TS_SOCKET_FILE}" \
  185. --tun=userspace-networking &
  186. TAILSCALED_PID=$!
  187. sleep 4
  188.  
  189. TS_UP_ARGS=("--hostname=${RUNPOD_POD_HOSTNAME:-comfy-pod}" "--accept-dns=false")
  190. if [[ -n "${TAILSCALE_AUTHKEY:-}" ]]; then
  191. TS_UP_ARGS+=("--auth-key=${TAILSCALE_AUTHKEY}")
  192. fi
  193.  
  194. if tailscale --socket="${TS_SOCKET_FILE}" up "${TS_UP_ARGS[@]}"; then
  195. echo "[services] Tailscale started successfully."
  196. else
  197. echo "[services] Tailscale 'up' command failed or already up. Continuing..."
  198. fi
  199.  
  200. echo "[services] Setting up ComfyUI..."
  201. source "${VENV_COMFY}/bin/activate"
  202.  
  203. export TORCH_INDUCTOR_FORCE_DISABLE_FP8="1"
  204. echo "[services] Forcing TORCH_INDUCTOR_FORCE_DISABLE_FP8=${TORCH_INDUCTOR_FORCE_DISABLE_FP8}"
  205.  
  206. COMFY_PID=""
  207.  
  208. install_node_deps() {
  209. local NODES_DIR_PATH="$1"
  210. local ACTION_DESCRIPTION="$2"
  211.  
  212. if [ -d "${NODES_DIR_PATH}" ]; then
  213. echo "[services] Checking custom node dependencies in ${NODES_DIR_PATH} (${ACTION_DESCRIPTION})..."
  214. while IFS= read -r -d '' NODE_DIR; do
  215. if [ -f "${NODE_DIR}/requirements.txt" ]; then
  216. NODE_NAME=$(basename "${NODE_DIR}")
  217. echo "[services] Installing deps for node: ${NODE_NAME}"
  218. ( cd "${NODE_DIR}" && "${VENV_COMFY}/bin/pip" install -r requirements.txt ) || \
  219. echo "[services] Warning: pip install failed for ${NODE_NAME}"
  220. fi
  221. done < <(find "${NODES_DIR_PATH}" -mindepth 1 -maxdepth 1 -type d -print0)
  222. else
  223. echo "[services] Custom nodes dir not found at ${NODES_DIR_PATH}."
  224. fi
  225. }
  226.  
  227. if [ -f "${COMFY_DIR}/main.py" ]; then
  228. echo "[services] Found existing ComfyUI in ${COMFY_DIR}."
  229. cd "${COMFY_DIR}"
  230.  
  231. if [ -f "requirements.txt" ]; then
  232. echo "[services] Ensuring ComfyUI requirements are present..."
  233. "${VENV_COMFY}/bin/pip" install --no-cache-dir -r requirements.txt || \
  234. echo "[services] Warning: ComfyUI requirements install failed."
  235. fi
  236.  
  237. install_node_deps "${COMFY_DIR}/custom_nodes" "Existing"
  238.  
  239. echo "[services] Launching ComfyUI: ${COMFY_LAUNCH_ARGS}"
  240. TORCH_INDUCTOR_FORCE_DISABLE_FP8="1" python main.py ${COMFY_LAUNCH_ARGS} &
  241. COMFY_PID=$!
  242. echo "[services] ComfyUI PID: ${COMFY_PID}"
  243. else
  244. echo "[services] ComfyUI not found; cloning into ${COMFY_DIR}..."
  245. mkdir -p "$(dirname "${COMFY_DIR}")"
  246. if git clone https://github.com/comfyanonymous/ComfyUI.git "${COMFY_DIR}"; then
  247. echo "[services] Clone complete."
  248. cd "${COMFY_DIR}"
  249. if [ -f "requirements.txt" ]; then
  250. echo "[services] Installing ComfyUI requirements..."
  251. "${VENV_COMFY}/bin/pip" install --no-cache-dir -r requirements.txt || \
  252. echo "[services] Warning: ComfyUI requirements install failed."
  253. fi
  254.  
  255. install_node_deps "${COMFY_DIR}/custom_nodes" "New clone"
  256.  
  257. echo "[services] Launching ComfyUI: ${COMFY_LAUNCH_ARGS}"
  258. python main.py ${COMFY_LAUNCH_ARGS} &
  259. COMFY_PID=$!
  260. echo "[services] ComfyUI PID: ${COMFY_PID}"
  261. else
  262. echo "[services] Failed to clone ComfyUI. Please place it at ${COMFY_DIR}."
  263. fi
  264. fi
  265.  
  266. deactivate
  267. echo "[services] ComfyUI setup complete."
  268.  
  269. JUPYTER_PID=""
  270. if [[ "${ENABLE_JUPYTER:-false}" == "true" ]]; then
  271. echo "[services] Starting JupyterLab..."
  272. source "${VENV_COMFY}/bin/activate"
  273. if ! command -v jupyter-lab &> /dev/null; then
  274. echo "[services] Warning: ENABLE_JUPYTER is true, but jupyterlab is not installed."
  275. echo "[services] Please rebuild your container with '--build-arg WITH_JUPYTER=true'"
  276. else
  277. JUPYTER_TOKEN='runpod'
  278. echo "[services] JupyterLab available at: /jupyter/?token=${JUPYTER_TOKEN}"
  279. jupyter-lab --ip=127.0.0.1 --port=8888 --no-browser \
  280. --ServerApp.base_url=/jupyter \
  281. --ServerApp.token="${JUPYTER_TOKEN}" --ServerApp.password='' \
  282. --notebook-dir=/workspace --allow-root &
  283. JUPYTER_PID=$!
  284. echo "[services] JupyterLab PID: ${JUPYTER_PID}"
  285. fi
  286. deactivate
  287. else
  288. echo "[services] JupyterLab disabled."
  289. fi
  290.  
  291. echo "[services] Starting Caddy..."
  292. caddy run --config /etc/caddy/Caddyfile --adapter caddyfile &
  293. CADDY_PID=$!
  294. echo "[services] Caddy PID: ${CADDY_PID}"
  295.  
  296. PIDS_TO_KILL=()
  297. [[ -n "${TAILSCALED_PID}" ]] && PIDS_TO_KILL+=("${TAILSCALED_PID}")
  298. [[ -n "${COMFY_PID}" ]] && PIDS_TO_KILL+=("${COMFY_PID}")
  299. [[ -n "${JUPYTER_PID}" ]] && PIDS_TO_KILL+=("${JUPYTER_PID}")
  300. [[ -n "${CADDY_PID}" ]] && PIDS_TO_KILL+=("${CADDY_PID}")
  301.  
  302. PIDS_TO_WAIT=()
  303. [[ -n "${COMFY_PID}" ]] && PIDS_TO_WAIT+=("${COMFY_PID}")
  304. [[ -n "${JUPYTER_PID}" ]] && PIDS_TO_WAIT+=("${JUPYTER_PID}")
  305. if [[ ${#PIDS_TO_WAIT[@]} -eq 0 && -n "${CADDY_PID}" ]]; then
  306. PIDS_TO_WAIT+=("${CADDY_PID}")
  307. fi
  308.  
  309. cleanup() {
  310. echo "[services] Terminating services..."
  311. if [[ ${#PIDS_TO_KILL[@]} -gt 0 ]]; then
  312. kill -SIGTERM "${PIDS_TO_KILL[@]}" 2>/dev/null || true
  313. sleep 5
  314. for pid in "${PIDS_TO_KILL[@]}"; do
  315. if kill -0 "$pid" 2>/dev/null; then
  316. echo "[services] PID $pid still alive; sending SIGKILL."
  317. kill -SIGKILL "$pid" 2>/dev/null || true
  318. fi
  319. done
  320. fi
  321. if [[ -n "${TAILSCALED_PID}" ]] && kill -0 "${TAILSCALED_PID}" 2>/dev/null; then
  322. tailscale --socket="${TS_SOCKET_FILE}" logout || true
  323. fi
  324. echo "[services] Shutdown complete."
  325. exit 0
  326. }
  327.  
  328. trap cleanup SIGTERM SIGINT
  329.  
  330. echo "[services] Startup complete. Waiting on: ${PIDS_TO_WAIT[*]:-none}"
  331. if [[ ${#PIDS_TO_WAIT[@]} -gt 0 ]]; then
  332. wait -n "${PIDS_TO_WAIT[@]}"
  333. echo "[services] A primary process exited; shutting down..."
  334. cleanup
  335. else
  336. if [[ -n "${CADDY_PID}" && -n "${TAILSCALED_PID}" ]]; then
  337. wait "${CADDY_PID}" "${TAILSCALED_PID}"
  338. elif [[ -n "${CADDY_PID}" ]]; then
  339. wait "${CADDY_PID}"
  340. elif [[ -n "${TAILSCALED_PID}" ]]; then
  341. wait "${TAILSCALED_PID}"
  342. else
  343. echo "[services] No services are running; exiting."
  344. fi
  345. fi
  346.  
Advertisement
Add Comment
Please, Sign In to add comment