Last active
January 30, 2024 10:57
-
-
Save sko00o/7b77a06ce881e57f7df63bebfa077a94 to your computer and use it in GitHub Desktop.
Install a1111 on wsl2
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs#windows-11-wsl2-instructions | |
# install conda (if not already done) | |
wget https://repo.anaconda.com/archive/Anaconda3-2022.05-Linux-x86_64.sh | |
chmod +x Anaconda3-2022.05-Linux-x86_64.sh | |
./Anaconda3-2022.05-Linux-x86_64.sh | |
# Clone webui repo | |
git clone -b v1.4.1 https://github.com/AUTOMATIC1111/stable-diffusion-webui.git | |
cd stable-diffusion-webui | |
# Create and activate conda env | |
conda env create -f environment-wsl2.yaml | |
conda activate automatic | |
# clone repositories for Stable Diffusion and (optionally) CodeFormer | |
mkdir repositories | |
git clone https://github.com/CompVis/stable-diffusion.git repositories/stable-diffusion-stability-ai | |
git clone https://github.com/CompVis/taming-transformers.git repositories/taming-transformers | |
git clone https://github.com/sczhou/CodeFormer.git repositories/CodeFormer | |
git clone https://github.com/salesforce/BLIP.git repositories/BLIP | |
# install requirements of Stable Diffusion | |
pip install transformers==4.19.2 diffusers invisible-watermark --prefer-binary | |
# install k-diffusion | |
pip install git+https://github.com/crowsonkb/k-diffusion.git --prefer-binary | |
# (optional) install GFPGAN (face restoration) | |
pip install git+https://github.com/TencentARC/GFPGAN.git --prefer-binary | |
# (optional) install requirements for CodeFormer (face restoration) | |
pip install -r repositories/CodeFormer/requirements.txt --prefer-binary | |
# install requirements of web ui | |
pip install -r requirements.txt --prefer-binary | |
# install CUDA, https://docs.nvidia.com/cuda/wsl-user-guide/index.html | |
wget https://developer.download.nvidia.com/compute/cuda/12.2.0/local_installers/cuda_12.2.0_535.54.03_linux.run | |
sudo sh cuda_12.2.0_535.54.03_linux.run | |
# check cuda | |
/usr/lib/wsl/lib/nvidia-smi | |
# install cu118 | |
pip install torch torchvision torchaudio torchtext torchdata --extra-index-url https://download.pytorch.org/whl/cu118 -U | |
# launch web server, it will install some deps at first. | |
python3 launch.py --listen --xformers --enable-insecure-extension-access --gradio-queue --port 10000 | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment