Last active
July 10, 2025 21:12
-
-
Save sycomix/88756f00c3489a0d16cf7c2d6cad852f to your computer and use it in GitHub Desktop.
unified open-webui and tools setup script
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/bin/bash | |
| # Exit immediately if a command exits with a non-zero status. | |
| set -e | |
| # --- WSL Docker Credential Helper Fix --- | |
| # In WSL, Docker can sometimes fail trying to find the Windows credential helper. | |
| # This creates a config file to disable it, fixing the "docker-credential-desktop.exe not found" error. | |
| echo "βΉοΈ Applying WSL Docker credential helper fix..." | |
| mkdir -p ~/.docker | |
| echo '{"credsStore": ""}' > ~/.docker/config.json | |
| echo "β Docker credential helper fix applied." | |
| # --- Docker Compose Command Detection --- | |
| if docker compose version &> /dev/null; then | |
| DOCKER_COMPOSE_CMD="docker compose" | |
| echo "β Found 'docker compose' (V2). Using it." | |
| elif docker-compose version &> /dev/null; then | |
| DOCKER_COMPOSE_CMD="docker-compose" | |
| echo "β Found 'docker-compose' (V1). Using it." | |
| else | |
| echo "β Neither 'docker compose' nor 'docker-compose' found. Please install Docker Compose." | |
| exit 1 | |
| fi | |
| # --- Argument Parsing for Update Flag --- | |
| UPDATE_FLAG=false | |
| if [[ "$1" == "--update" || "$1" == "-u" ]]; then | |
| UPDATE_FLAG=true | |
| echo "β Update flag detected. The script will check for and pull the latest container images." | |
| fi | |
| # --- Generate Secure Random Credentials --- | |
| echo "βοΈ Generating secure, random credentials..." | |
| LITELLM_MASTER_KEY=$(openssl rand -hex 32) | |
| DB_USER="user_$(openssl rand -hex 4)" | |
| DB_PASS=$(openssl rand -hex 16) | |
| # --- GPU Prerequisite Check (Informational) --- | |
| if ! command -v nvidia-smi &> /dev/null | |
| then | |
| echo "--------------------------------- WARNING ----------------------------------" | |
| echo "NVIDIA drivers ('nvidia-smi') not found. The GPU containers (LocalAI, vLLM)" | |
| echo "will fail to start without proper NVIDIA drivers and the NVIDIA Container Toolkit." | |
| echo "Press Ctrl+C to abort or Enter to continue at your own risk." | |
| echo "--------------------------------------------------------------------------" | |
| read -p "" | |
| fi | |
| # --- Interactive Port Configuration --- | |
| echo "-----------------------------------------------------" | |
| echo "Please specify the HOST ports for the services." | |
| echo "Using a more robust method to set default values." | |
| echo "-----------------------------------------------------" | |
| read -e -p "Enter port for Open WebUI [8088]: " -i "8088" WEBUI_PORT | |
| echo "-> Open WebUI Port set to: ${WEBUI_PORT}" | |
| read -e -p "Enter port for LiteLLM Proxy [4000]: " -i "4000" LITELLM_PORT | |
| echo "-> LiteLLM Port set to: ${LITELLM_PORT}" | |
| read -e -p "Enter port for vLLM API [8000]: " -i "8000" VLLM_PORT | |
| echo "-> vLLM Port set to: ${VLLM_PORT}" | |
| read -e -p "Enter port for LocalAI API [8082]: " -i "8082" LOCALAI_PORT | |
| echo "-> LocalAI Port set to: ${LOCALAI_PORT}" | |
| read -e -p "Enter port for SearXNG [8081]: " -i "8081" SEARXNG_PORT | |
| echo "-> SearXNG Port set to: ${SEARXNG_PORT}" | |
| read -e -p "Enter port for Ollama API [11434]: " -i "11434" OLLAMA_PORT | |
| echo "-> Ollama Port set to: ${OLLAMA_PORT}" | |
| read -e -p "Enter port for Qdrant REST API [6333]: " -i "6333" QDRANT_REST_PORT | |
| echo "-> Qdrant REST Port set to: ${QDRANT_REST_PORT}" | |
| read -e -p "Enter port for Qdrant gRPC API [6334]: " -i "6334" QDRANT_GRPC_PORT | |
| echo "-> Qdrant gRPC Port set to: ${QDRANT_GRPC_PORT}" | |
| read -e -p "Enter port for Tika API (on host) [9998]: " -i "9998" TIKA_PORT | |
| echo "-> Tika Port set to: ${TIKA_PORT}" | |
| # --- Create .env File for Docker Compose --- | |
| echo "" | |
| echo "Creating .env file for service configuration..." | |
| cat <<EOF > .env | |
| # This file is auto-generated by the setup script. | |
| # Service Ports | |
| WEBUI_PORT=${WEBUI_PORT} | |
| LITELLM_PORT=${LITELLM_PORT} | |
| VLLM_PORT=${VLLM_PORT} | |
| LOCALAI_PORT=${LOCALAI_PORT} | |
| SEARXNG_PORT=${SEARXNG_PORT} | |
| OLLAMA_PORT=${OLLAMA_PORT} | |
| QDRANT_REST_PORT=${QDRANT_REST_PORT} | |
| QDRANT_GRPC_PORT=${QDRANT_GRPC_PORT} | |
| TIKA_PORT=${TIKA_PORT} | |
| # Database Credentials | |
| DB_USER=${DB_USER} | |
| DB_PASS=${DB_PASS} | |
| EOF | |
| # --- Pre-configuration for SearXNG --- | |
| echo "Creating SearXNG configuration directory..." | |
| mkdir -p searxng | |
| SECRET_KEY=$(openssl rand -hex 32) | |
| echo "Generating SearXNG settings.yml with a new secret key..." | |
| # Note: The base_url here is for externally facing links generated by SearXNG. | |
| cat <<EOF > searxng/settings.yml | |
| use_default_settings: true | |
| server: | |
| secret_key: "${SECRET_KEY}" | |
| base_url: "http://localhost:${SEARXNG_PORT}" | |
| redis: | |
| host: 'redis' | |
| port: 6379 | |
| db: 0 | |
| EOF | |
| # --- Pre-configuration for LiteLLM --- | |
| echo "Generating LiteLLM configuration..." | |
| # Note: api_base URLs use the Docker service name and INTERNAL port for communication. | |
| cat <<EOF > litellm_config.yaml | |
| # LiteLLM Configuration - Auto-generated by setup script | |
| model_list: | |
| - model_name: llama3-from-ollama | |
| litellm_params: | |
| model: ollama/llama3 | |
| api_base: http://ollama:11434 | |
| - model_name: opt-125m-from-vllm | |
| litellm_params: | |
| model: openai/facebook/opt-125m | |
| api_base: http://vllm:8000 | |
| - model_name: localai-embedding-model | |
| litellm_params: | |
| model: text-embedding-ada-002 | |
| api_base: http://localai:8080 | |
| api_key: "not-needed" | |
| router_settings: | |
| allow_fallbacks: true | |
| litellm_settings: | |
| set_verbose: false | |
| cache: true | |
| cache_params: | |
| type: redis | |
| host: "redis" | |
| port: 6379 | |
| general_settings: | |
| master_key: "${LITELLM_MASTER_KEY}" | |
| EOF | |
| # --- Install Custom Tika on Host --- | |
| echo "" | |
| echo "--- Installing Custom Apache Tika on Host ---" | |
| echo "Downloading Tika installation script from Gist..." | |
| curl -o tika-install.sh https://gist.githubusercontent.com/sycomix/51a1ca264abe77673636b7201401dbd6/raw/376ae996987c5301877907d65245298d56f815f8/tika-install.sh | |
| chmod +x tika-install.sh | |
| echo "WARNING: The Tika installer script is about to run." | |
| echo "It may require sudo privileges to install dependencies and will compile from source." | |
| read -p "Press Enter to continue..." | |
| ./tika-install.sh | |
| echo "β Custom Tika installation process complete." | |
| echo "Configuration complete. Creating docker-compose.yml..." | |
| # --- Create the docker-compose.yml file --- | |
| # Note: The top-level 'version' tag has been removed as it is obsolete in modern Docker Compose. | |
| cat <<EOF > docker-compose.yml | |
| services: | |
| # --- High-Performance Inference Engines --- | |
| vllm: | |
| image: vllm/vllm-openai:latest | |
| container_name: vllm | |
| ports: | |
| - "\${VLLM_PORT}:8000" | |
| volumes: | |
| - huggingface_cache:/root/.cache/huggingface | |
| command: ["--host", "0.0.0.0", "--model", "facebook/opt-125m"] | |
| deploy: &gpu_deploy | |
| resources: | |
| reservations: | |
| devices: | |
| - driver: nvidia | |
| count: all | |
| capabilities: [gpu] | |
| networks: | |
| - webui-network | |
| restart: unless-stopped | |
| # --- General Purpose / Multi-Model Engines --- | |
| ollama: | |
| image: ollama/ollama | |
| container_name: ollama | |
| ports: | |
| - "\${OLLAMA_PORT}:11434" | |
| volumes: | |
| - ollama_data:/root/.ollama | |
| deploy: *gpu_deploy | |
| networks: | |
| - webui-network | |
| restart: unless-stopped | |
| localai: | |
| image: localai/localai:latest-aio-gpu-nvidia-cuda-12 | |
| container_name: localai | |
| ports: | |
| - "\${LOCALAI_PORT}:8080" | |
| volumes: | |
| - localai_data:/build/models | |
| deploy: *gpu_deploy | |
| networks: | |
| - webui-network | |
| restart: unless-stopped | |
| # --- Central Management and UI --- | |
| litellm: | |
| image: ghcr.io/berriai/litellm:main-stable | |
| container_name: litellm | |
| ports: | |
| - "\${LITELLM_PORT}:4000" | |
| environment: | |
| # Connect to postgres using the service name 'postgres' and internal port 5432 | |
| - DATABASE_URL=postgresql://\${DB_USER}:\${DB_PASS}@postgres:5432/litellm_db | |
| # Force prisma to use the correct binary engine to avoid "wolfi" warnings | |
| - PRISMA_CLI_BINARY_TARGETS=debian-openssl-3.0.x | |
| volumes: | |
| - ./litellm_config.yaml:/app/config.yaml:ro | |
| command: ["--config", "/app/config.yaml", "--host", "0.0.0.0"] | |
| depends_on: | |
| postgres: | |
| condition: service_healthy | |
| networks: | |
| - webui-network | |
| restart: unless-stopped | |
| open-webui: | |
| image: ghcr.io/open-webui/open-webui:dev | |
| container_name: open-webui | |
| ports: | |
| - "\${WEBUI_PORT}:8080" | |
| environment: | |
| - VECTOR_DB=qdrant | |
| # Connect to qdrant using the service name 'qdrant' and internal port 6333 | |
| - QDRANT_URI=http://qdrant:6333 | |
| - ENABLE_WEB_SEARCH=true | |
| - WEB_SEARCH_ENGINE=searxng | |
| # Connect to searxng using the service name 'searxng' and internal port 8080 | |
| - SEARXNG_URL=http://searxng:8080 | |
| # Enable and configure Tika for document parsing on the HOST | |
| - TIKA_ENABLED=true | |
| - TIKA_URL=http://host.docker.internal:\${TIKA_PORT} | |
| volumes: | |
| - open_webui_data:/app/backend/data | |
| depends_on: | |
| - litellm | |
| networks: | |
| - webui-network | |
| restart: unless-stopped | |
| # --- Supporting Services --- | |
| postgres: | |
| image: postgres:16 | |
| container_name: postgres | |
| environment: | |
| POSTGRES_USER: \${DB_USER} | |
| POSTGRES_PASSWORD: \${DB_PASS} | |
| POSTGRES_DB: litellm_db | |
| volumes: | |
| - postgres_data:/var/lib/postgresql/data | |
| networks: | |
| - webui-network | |
| healthcheck: | |
| test: ["CMD-SHELL", "pg_isready -U \${DB_USER} -d litellm_db"] | |
| interval: 10s | |
| timeout: 5s | |
| retries: 5 | |
| restart: unless-stopped | |
| qdrant: | |
| image: qdrant/qdrant | |
| container_name: qdrant | |
| ports: | |
| - "\${QDRANT_REST_PORT}:6333" | |
| - "\${QDRANT_GRPC_PORT}:6334" | |
| volumes: | |
| - qdrant_data:/qdrant/storage | |
| networks: | |
| - webui-network | |
| restart: unless-stopped | |
| redis: | |
| image: redis:7 | |
| container_name: redis | |
| volumes: | |
| - redis_data:/data | |
| networks: | |
| - webui-network | |
| restart: unless-stopped | |
| searxng: | |
| image: docker.io/searxng/searxng:latest | |
| container_name: searxng | |
| ports: | |
| # The host port is configurable, but the internal container port is 8080. | |
| - "\${SEARXNG_PORT}:8080" | |
| volumes: | |
| - ./searxng:/etc/searxng:rw | |
| depends_on: | |
| - redis | |
| networks: | |
| - webui-network | |
| restart: unless-stopped | |
| volumes: | |
| huggingface_cache: | |
| ollama_data: | |
| localai_data: | |
| open_webui_data: | |
| qdrant_data: | |
| redis_data: | |
| postgres_data: | |
| networks: | |
| webui-network: | |
| driver: bridge | |
| EOF | |
| # --- Verify Settings and Start Services --- | |
| echo "" | |
| echo "-----------------------------------------------------" | |
| echo "β The following settings have been written to the .env file:" | |
| echo "-----------------------------------------------------" | |
| cat .env | |
| echo "-----------------------------------------------------" | |
| read -p "Press Enter to continue and start the containers..." | |
| # --- Check for updates if flag is set --- | |
| if [ "$UPDATE_FLAG" = true ]; then | |
| echo "" | |
| echo "π Pulling the latest versions of all container images. This may take a moment..." | |
| ${DOCKER_COMPOSE_CMD} pull | |
| echo "β Image update check complete." | |
| fi | |
| echo "" | |
| # Conditionally remove volumes based on the update flag | |
| if [ "$UPDATE_FLAG" = true ]; then | |
| echo "Stopping any old containers before updating (preserving data volumes)..." | |
| ${DOCKER_COMPOSE_CMD} down | |
| else | |
| echo "Stopping any old containers and removing data volumes for a clean start..." | |
| ${DOCKER_COMPOSE_CMD} down --volumes | |
| fi | |
| echo "Starting all services in detached mode..." | |
| ${DOCKER_COMPOSE_CMD} up -d | |
| echo "" | |
| echo "--------------------------------------------------------------------------" | |
| echo "β Deployment Complete!" | |
| echo "" | |
| echo "Access Services at: (These URLs use the host ports you configured)" | |
| echo "- Open WebUI: http://localhost:${WEBUI_PORT}" | |
| echo "- LiteLLM UI/API: http://localhost:${LITELLM_PORT}" | |
| echo "- vLLM API: http://localhost:${VLLM_PORT}" | |
| echo "- LocalAI API: http://localhost:${LOCALAI_PORT}" | |
| echo "- Ollama API: http://localhost:${OLLAMA_PORT}" | |
| echo "- Qdrant REST API: http://localhost:${QDRANT_REST_PORT}" | |
| echo "- Tika API (on host): http://localhost:${TIKA_PORT}" | |
| echo "- SearXNG: http://localhost:${SEARXNG_PORT}" | |
| echo "" | |
| echo "------------------------- IMPORTANT INFORMATION --------------------------" | |
| echo "π SAVE THESE RANDOMLY GENERATED SECRETS! π" | |
| echo "--------------------------------------------------------------------------" | |
| echo "π LiteLLM Master Key: ${LITELLM_MASTER_KEY}" | |
| echo "π€ Database User: ${DB_USER}" | |
| echo "π Database Password: ${DB_PASS}" | |
| echo "--------------------------------------------------------------------------" | |
| echo "" | |
| echo "--------------------------- IMPORTANT NEXT STEPS -------------------------" | |
| echo "1. CHOOSE A MODEL FOR vLLM" | |
| echo " - Open the 'docker-compose.yml' file." | |
| echo " - In the 'vllm' service, change 'facebook/opt-125m' to your desired model." | |
| echo " - Apply changes with: ${DOCKER_COMPOSE_CMD} up -d --force-recreate" | |
| echo "" | |
| echo "2. CONFIGURE MODELS IN LITELLM" | |
| echo " - Open the 'litellm_config.yaml' file to add/edit models." | |
| echo " - Restart LiteLLM to apply changes: ${DOCKER_COMPOSE_CMD} restart litellm" |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment