Skip to content

Instantly share code, notes, and snippets.

@yakovkhalinsky
Created April 20, 2025 11:39
Show Gist options
  • Save yakovkhalinsky/6cebc48a2ee6294064968609abb12cb0 to your computer and use it in GitHub Desktop.
Save yakovkhalinsky/6cebc48a2ee6294064968609abb12cb0 to your computer and use it in GitHub Desktop.
Docker LLM AI Setup
# Base image with Python 3.11 and CUDA 12.5 support
FROM nvidia/cuda:12.8.1-runtime-ubuntu22.04
# Install system dependencies
RUN apt-get update && apt-get install -y \
git \
python3-pip \
libgl1-mesa-glx \
&& rm -rf /var/lib/apt/lists/*
# Set working directory
WORKDIR /app
# Copy the cloned ComfyUI repository
COPY ./comfyui/comfyui-src /app
# Install Python dependencies
RUN pip install --upgrade pip
RUN pip install -r requirements.txt
# Clone and install ComfyUI Manager
RUN git clone https://github.com/ltdrdata/ComfyUI-Manager.git /app/custom_nodes/ComfyUI-Manager && \
pip install -r /app/custom_nodes/ComfyUI-Manager/requirements.txt
# Clone and install GGUF support for ComfyUI
RUN git clone https://github.com/city96/ComfyUI-GGUF.git /app/custom_nodes/ComfyUI-GGUF && \
pip install --upgrade gguf
# Expose the port used by ComfyUI
EXPOSE 8188
# Run ComfyUI with the server binding to 0.0.0.0
CMD ["python3", "main.py", "--listen", "0.0.0.0"]
services:
ollama:
image: ollama/ollama
runtime: nvidia
environment:
- NVIDIA_VISIBLE_DEVICES=all
ports:
- "11434:11434"
volumes:
- "./ollama:/root/.ollama"
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
open-webui:
image: ghcr.io/open-webui/open-webui:latest
ports:
- "8080:8080"
volumes:
- ./open-webui:/app/backend/data
extra_hosts:
- "host.docker.internal:host-gateway"
restart: always
searxng:
image: searxng/searxng:latest
ports:
- "8088:8080"
volumes:
- ./searxng/settings.yml:/etc/searxng/settings.yml
restart: always
comfyui:
build:
context: .
dockerfile: comfyui/Dockerfile
runtime: nvidia
environment:
- NVIDIA_VISIBLE_DEVICES
ports:
- "8188:8188"
volumes:
- ./comfyui/models:/app/models
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
restart: always
# test:
# image: nvidia/cuda:12.8.1-base-ubuntu24.04
# command: nvidia-smi
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: all
# capabilities: [gpu]
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment