Skip to content

Instantly share code, notes, and snippets.

@7shi
Last active October 21, 2024 14:17
Show Gist options
  • Save 7shi/d6ed575d431f8c1155bf5273875be779 to your computer and use it in GitHub Desktop.
Save 7shi/d6ed575d431f8c1155bf5273875be779 to your computer and use it in GitHub Desktop.
[py]Animagine XL V3.1 Test
model = "cagliostrolab/animagine-xl-3.1"
vae = "madebyollin/taesdxl"
lcm_lora = "../lcm-lora/lcm-animagine-3.safetensors"
prompt = "1girl, original, eating pizza"
negative_prompt = "nsfw, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]"
import argparse
argparser = argparse.ArgumentParser()
argparser.add_argument("--dml", action="store_true")
argparser.add_argument("--lcm", action="store_true")
argparser.add_argument("--zluda", action="store_true")
args = argparser.parse_args()
if args.zluda:
import zluda
import diffusers, torch
device, suffix, dtype, steps, gsc = None, "", torch.float32, 10, 5
if args.dml:
import torch_directml
device = torch_directml.device()
suffix = "-dml"
dtype = torch.float16
print("DirectML enabled")
elif args.zluda:
device = torch.cuda.current_device()
suffix = "-zluda"
dtype = torch.float16
print("ZLUDA enabled")
pipe = diffusers.DiffusionPipeline.from_pretrained(
model,
torch_dtype=dtype,
use_safetensors=True,
)
if args.lcm:
pipe.vae = diffusers.AutoencoderTiny.from_pretrained(vae, torch_dtype=dtype)
pipe.load_lora_weights(lcm_lora, torch_dtype=dtype, adapter_name="lcm")
pipe.fuse_lora()
pipe.scheduler = diffusers.LCMScheduler.from_config(pipe.scheduler.config)
steps, gsc = 6, 1
suffix = f"-lcm{suffix}"
print("LCM enabled")
if device is not None:
pipe = pipe.to(device)
from datetime import datetime
seeds = [12345]
for seed in seeds:
start = datetime.now()
fn = f"{seed}-{steps}{suffix}"
print(f"[start] {fn}: {start}")
torch.manual_seed(seed)
result = pipe(
prompt = prompt,
negative_prompt=negative_prompt,
width = 1024,
height = 1024,
num_inference_steps = steps,
guidance_scale=gsc,
)
result.images[0].save(f"test/{fn}.png")
end = datetime.now()
dur = end - start
print(f"[end ] {fn}: {end} ({dur})")
with open("test/times.txt", "a", encoding="utf-8") as f:
f.write(f"{start}, {end}, {dur}: {fn}\n")
# copy `rocm.py` and `zluda_installer.py` from
# https://github.com/vladmandic/automatic/tree/master/modules
import os
from modules import zluda_installer
zluda_path = zluda_installer.get_path()
if not os.path.exists(zluda_path):
zluda_installer.install(zluda_path)
zluda_installer.make_copy(zluda_path)
zluda_installer.load(zluda_path)
# cf. https://github.com/vladmandic/automatic/blob/master/modules/zluda.py
import torch
do_nothing = lambda _: None # pylint: disable=unnecessary-lambda-assignment
torch.backends.cudnn.enabled = False
torch.backends.cuda.enable_flash_sdp(False)
torch.backends.cuda.enable_flash_sdp = do_nothing
torch.backends.cuda.enable_math_sdp(True)
torch.backends.cuda.enable_math_sdp = do_nothing
torch.backends.cuda.enable_mem_efficient_sdp(False)
torch.backends.cuda.enable_mem_efficient_sdp = do_nothing
if hasattr(torch.backends.cuda, "enable_cudnn_sdp"):
torch.backends.cuda.enable_cudnn_sdp(False)
torch.backends.cuda.enable_cudnn_sdp = do_nothing
@7shi
Copy link
Author

7shi commented Oct 17, 2024

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment