Skip to content

Instantly share code, notes, and snippets.

@thinkyhead
Last active January 20, 2025 03:54
Show Gist options
  • Save thinkyhead/6d0a63f67c1bab6a7eb7359c60183b22 to your computer and use it in GitHub Desktop.
Save thinkyhead/6d0a63f67c1bab6a7eb7359c60183b22 to your computer and use it in GitHub Desktop.
Imagine with ComfyUI in iTerm2, Ghostty, etc.

ComfyUI Terminal Inline Image

Invoke ComfyUI in a terminal program like Ghostty (Kitty Terminal Graphics Protocol) or iTerm2 (Inline Images Protocol) and display the resulting image inline.

Usage

usage: imagine-v1.py [-h] [--comfyui COMFYUI] [--workflow WORKFLOW] [--model MODEL] [--seed SEED]
                     [--steps STEPS] [--sampler SAMPLER] [--scheduler SCHEDULER]
                     [--negative NEGATIVE] [--ratio RATIO] [--size SIZE] [--width WIDTH]
                     [--height HEIGHT] [--scale SCALE]
                     [prompt]

Generate images using ComfyUI. Enquote the prompt or escape all quotes. Without a prompt run
interactively.

positional arguments:
  prompt                Prompt text for image generation.

options:
  -h, --help            show this help message and exit

workflow:
  --comfyui COMFYUI     ComfyUI base URL
  --workflow WORKFLOW   Choose a workflow template.
  --model MODEL         Override the model in the workflow.

generation:
  --seed SEED           Override the seed for the workflow.
  --steps STEPS         Override the number of steps in the workflow.
  --sampler SAMPLER     Override the sampler for the workflow. (e.g., ddim, euler, dpmpp_2m...)
  --scheduler SCHEDULER
                        Override the scheduler for the workflow. (e.g., normal, karras,
                        sgm_uniform, simple...)
  --negative NEGATIVE   Provide a negative prompt. Take care to enquote.

dimensions:
  --ratio RATIO         Set a w:h ratio, applying to the native pixel count. (e.g., 16:9)
  --size SIZE           Override both image width and height in the workflow.
  --width WIDTH         Override image width in the workflow.
  --height HEIGHT       Override image height in the workflow.
  --scale SCALE         Override image scaling (if any) in the workflow.

'imagine' Shortcut

Add this to your .zshrc or .bashrc file:

# imagine ... shortcut for easy image generation
imagine() {
  if [[ -z $IMAGINE_INIT ]]; then
    IMAGINE_INIT=1
    #eval "$(conda shell.zsh hook)"
    #conda activate imagine
  fi
  /path/to/imagine-v1.py $@
}

Then you can generate an image on the command line like so...

> imagine a kung fu teddy bear with a samurai sword fighting a magic dragon

Modify the commented line conda activate for your particular Python environment.

{
"fieldmap": {
"prompt": [ "3", "text" ],
"model": [ "1", "unet_name" ],
"width": [ "9", "width" ],
"height": [ "9", "height" ],
"sampler": [ "7", "sampler_name" ],
"scheduler": [ "8", "scheduler" ],
"steps": [ "8", "steps" ],
"seed": [ "4", "noise_seed" ],
"filename_prefix": [ "13", "filename_prefix" ]
},
"workflow": {
"1": {
"inputs": {
"unet_name": "flux1-dev-fp8.safetensors",
"weight_dtype": "default"
},
"class_type": "UNETLoader"
},
"2": {
"inputs": {
"clip_name1": "t5xxl_fp16.safetensors",
"clip_name2": "clip_l.safetensors",
"type": "flux"
},
"class_type": "DualCLIPLoader"
},
"3": {
"inputs": {
"text": "prompt",
"clip": [ "2", 0 ]
},
"class_type": "CLIPTextEncode"
},
"4": {
"inputs": {
"noise_seed": -1
},
"class_type": "RandomNoise"
},
"5": {
"inputs": {
"guidance": 3.5,
"conditioning": [ "3", 0 ]
},
"class_type": "FluxGuidance"
},
"6": {
"inputs": {
"model": [ "1", 0 ],
"conditioning": [ "5", 0 ]
},
"class_type": "BasicGuider"
},
"7": {
"inputs": {
"sampler_name": "euler"
},
"class_type": "KSamplerSelect"
},
"8": {
"inputs": {
"scheduler": "simple",
"steps": 20,
"denoise": 1,
"model": [ "1", 0 ]
},
"class_type": "BasicScheduler"
},
"9": {
"inputs": {
"width": 1024,
"height": 1024,
"batch_size": 1
},
"class_type": "EmptySD3LatentImage"
},
"10": {
"inputs": {
"noise": [ "4", 0 ],
"guider": [ "6", 0 ],
"sampler": [ "7", 0 ],
"sigmas": [ "8", 0 ],
"latent_image": [ "9", 0 ]
},
"class_type": "SamplerCustomAdvanced"
},
"11": {
"inputs": {
"vae_name": "FLUX1/ae.sft"
},
"class_type": "VAELoader"
},
"12": {
"inputs": {
"samples": [ "10", 0 ],
"vae": [ "11", 0 ]
},
"class_type": "VAEDecode"
},
"13": {
"inputs": {
"filename_prefix": "imagine",
"images": [ "12", 0 ]
},
"class_type": "SaveImage"
}
}
}
{
"fieldmap": {
"prompt": [ "3", "text" ],
"model": [ "1", "unet_name" ],
"width": [ "9", "width" ],
"height": [ "9", "height" ],
"sampler": [ "4", "sampler_name" ],
"scheduler": [ "5", "scheduler" ],
"steps": [ "5", "steps" ],
"seed": [ "6", "noise_seed" ],
"filename_prefix": [ "13", "filename_prefix" ]
},
"workflow": {
"1": {
"inputs": {
"unet_name": "FLUX1/flux1-schnell-fp8.safetensors",
"weight_dtype": "default"
},
"class_type": "UNETLoader"
},
"2": {
"inputs": {
"clip_name1": "t5xxl_fp8_e4m3fn.safetensors",
"clip_name2": "clip_l.safetensors",
"type": "flux"
},
"class_type": "DualCLIPLoader"
},
"3": {
"inputs": {
"text": "prompt",
"clip": [ "2", 0 ]
},
"class_type": "CLIPTextEncode"
},
"4": {
"inputs": {
"sampler_name": "euler"
},
"class_type": "KSamplerSelect"
},
"5": {
"inputs": {
"scheduler": "simple",
"steps": 4,
"denoise": 1,
"model": [ "1", 0 ]
},
"class_type": "BasicScheduler"
},
"6": {
"inputs": {
"noise_seed": -1
},
"class_type": "RandomNoise"
},
"7": {
"inputs": {
"guidance": 3.5,
"conditioning": [ "3", 0 ]
},
"class_type": "FluxGuidance"
},
"8": {
"inputs": {
"model": [ "1", 0 ],
"conditioning": [ "7", 0 ]
},
"class_type": "BasicGuider"
},
"9": {
"inputs": {
"width": 1024,
"height": 1024,
"batch_size": 1
},
"class_type": "EmptySD3LatentImage"
},
"10": {
"inputs": {
"noise": [ "6", 0 ],
"guider": [ "8", 0 ],
"sampler": [ "4", 0 ],
"sigmas": [ "5", 0 ],
"latent_image": [ "9", 0 ]
},
"class_type": "SamplerCustomAdvanced"
},
"11": {
"inputs": {
"vae_name": "ae.sft"
},
"class_type": "VAELoader"
},
"12": {
"inputs": {
"samples": [ "10", 0 ],
"vae": [ "11", 0 ]
},
"class_type": "VAEDecode"
},
"13": {
"inputs": {
"filename_prefix": "imagine",
"images": [ "12", 0 ]
},
"class_type": "SaveImage"
}
}
}
#!/usr/bin/env python3
'''
Terminal Diffusion - A simple image generator to demonstrate ComfyUI invocation
and inline graphics in terminals like Ghostty and iTerm2 that support it.
Author: @thinkyhead
Date: 2025-01-15
'''
import os, re
import argparse
import base64
import json
import websocket, asyncio, urllib
import random
from requests import get
# API Configuration
COMFYUI_BASE_URL = "http://127.0.0.1:8188"
# A unique identifier. We may need a serial number too.
CLIENT_ID = "imagine-v1"
HTTP_HEADERS = { "User-Agent": "Mozilla/5.0" }
# Local image folder, if known
COMFYUI_IMAGE_FOLDER = None
#HOME=os.environ.get('HOME')
#COMFYUI_IMAGE_FOLDER = f"{HOME}/Projects/StableDiffusion/ComfyUI/output/comfyui"
DEFAULT_WORKFLOW = "sd15-lcm"
#DEFAULT_WORKFLOW = "flux-dev"
#DEFAULT_WORKFLOW = "s35-large"
OUTPUT_PREFIX = "imagine"
HERE=os.path.dirname(os.path.abspath(__file__))
verbose = False
def log(message):
if verbose:
print(message)
spinner = ["🌄", "🌃", "🌅", "🌌", "🌇", "🌉", "🌆"]
def print_spinner():
print(spinner.pop(0) + ' hold on...', end="\r", flush=True)
spinner.append(spinner[0])
# Populate a workflow with our custom parameters
def populate_workflow(desc, args):
# CLI arguments plus other configuration...
workflow_inputs = args.__dict__
workflow_inputs['filename_prefix'] = OUTPUT_PREFIX
# Merge the workflow with the arguments
workflow = desc['workflow']
fieldmap = desc['fieldmap']
# Apply the ratio using the workflow width * height as the native pixel count
if args.ratio:
r = args.ratio.split("/") if ("/" in args.ratio) else args.ratio.split(":")
rw, rh = int(r[0]), int(r[1])
ratio = rw / rh
fw, fh = fieldmap['width'], fieldmap['height']
nw, nh = workflow[fw[0]]['inputs'][fw[1]], workflow[fh[0]]['inputs'][fh[1]]
pixels = nw * nh
# Keep close to the same number of pixels
ow = int((((pixels * ratio) ** 0.5) // 64) * 64)
oh = int(((ow / ratio) // 64) * 64)
# Update the workflow
workflow[fw[0]]['inputs'][fw[1]], workflow[fh[0]]['inputs'][fh[1]] = ow, oh
# Merge the workflow with the arguments
log(f"Workflow:")
for key, map in fieldmap.items():
nr, fld = map
if key in workflow_inputs:
if workflow_inputs[key] != None:
workflow[nr]["inputs"][fld] = workflow_inputs[key]
log(f" {key}: {workflow[nr]['inputs'][fld]}")
# Load a workflow from a JSON file and populate it with the given prompt
def load_and_populate_workflow(args):
workflow_file = f"{args.workflow}.json"
log(f"load_and_populate_workflow {workflow_file}")
try:
workflow_path = f"{HERE}/{workflow_file}"
with open(workflow_path, 'r') as file:
desc = json.load(file)
populate_workflow(desc, args)
return desc['workflow']
except FileNotFoundError:
print(f"The file {workflow_file} was not found.")
return None
except json.JSONDecodeError:
print(f"The file {workflow_file} contains invalid JSON.")
return None
# Enqueue a workflow for image generation
def queue_workflow(workflow):
p = { 'prompt': workflow, 'client_id': CLIENT_ID }
data = json.dumps(p).encode("utf-8")
log(f"queue_workflow data: {data}")
try:
req = urllib.request.Request(
f"{COMFYUI_BASE_URL}/prompt", data=data, headers=HTTP_HEADERS
)
response = urllib.request.urlopen(req).read()
return json.loads(response)
except Exception as e:
print(f"Error while queuing prompt: {e}")
raise e
# The URL for the given image filename, subfolder, and type (such as thumbnail)
def get_image_url(filename, subfolder, folder_type):
data = { 'filename': filename, 'subfolder': subfolder, 'type': folder_type }
url_values = urllib.parse.urlencode(data)
return f"{COMFYUI_BASE_URL}/view?{url_values}"
# Get the history of a prompt id
def get_history(prompt_id):
log("get_history")
req = urllib.request.Request(
f"{COMFYUI_BASE_URL}/history/{prompt_id}", headers=HTTP_HEADERS
)
with urllib.request.urlopen(req) as response:
return json.loads(response.read())
# Run a workflow, wait for it to complete, return the output images
def run_workflow(ws, workflow):
prompt_id = queue_workflow(workflow)['prompt_id']
output_images = []
while True:
print_spinner()
out = ws.recv()
if isinstance(out, str):
message = json.loads(out)
if message["type"] == "executing":
data = message["data"]
if data["node"] is None and data["prompt_id"] == prompt_id:
break # Execution is done
else:
continue # previews are binary data
history = get_history(prompt_id)[prompt_id]
for o in history['outputs']:
for node_id in history["outputs"]:
node_output = history["outputs"][node_id]
if "images" in node_output:
for image in node_output["images"]:
url = get_image_url(
image["filename"], image["subfolder"], image["type"]
)
output_images.append({"url": url})
return { 'data': output_images }
# Send a workflow to the ComfyUI server, await the result,
# and return the image results from the run_workflow thread.
async def send_workflow_sync(workflow):
try:
# Connect to the WebSocket server
try:
ws_url = COMFYUI_BASE_URL.replace("http", "ws")
ws = websocket.WebSocket()
ws.connect(f"{ws_url}/ws?clientId={CLIENT_ID}")
log("Connected to ComfyUI WebSocket.")
except Exception as e:
print(f"Failed to connect to WebSocket server: {e}")
return None
# Send the workflow
try:
log("Sending workflow to WebSocket server.")
log(f"Workflow: {workflow}")
images = await asyncio.to_thread(run_workflow, ws, workflow)
except Exception as e:
print(f"Error while receiving images: {e}")
images = None
ws.close()
return images
except Exception as e:
print(f"Error communicating with WebSocket server: {e}")
return None
# Inline Images Protocol
def detect_inline_images_protocol():
return os.environ.get("TERM_PROGRAM") == "iTerm.app"
# Kitty Graphics Display Protocol
def detect_kitty_graphics_protocol():
return os.environ.get("TERM_PROGRAM") == "ghostty"
# Display the image data in the terminal using available protocols
def display_image_data_in_terminal(img_data):
img_b64 = base64.b64encode(img_data).decode('utf-8')
if detect_inline_images_protocol():
# Based on print_image() above...
print(f"\033]1337;File=inline=1;size={len(img_data)};name={OUTPUT_PREFIX}.png;preserveAspectRatio=1;type=image/png:{img_b64}\a")
elif detect_kitty_graphics_protocol():
print(f"\033_Gf=100,a=T,t=d;{img_b64}\033\\") # Kitty display protocol
else:
log("No inline image support.")
def display_image_file_in_terminal(image_path):
with open(image_path, "rb") as img_file:
display_image_data_in_terminal(img_file.read())
# Fetch the image at the given URL for display. Delete the image after display.
def fetch_and_display_image(url):
response = get(url)
if response.status_code == 200:
display_image_data_in_terminal(response.content)
else:
print(f"Failed to fetch image from URL {url}")
# Run the workflow with the given prompt and other parameters
# For simplicity assume only one image will be generated.
# With minor changes this could handle workflow batches of any size.
async def generate_image(args):
log("generate_image")
# Load and populate the workflow
workflow = load_and_populate_workflow(args)
print_spinner()
result = await send_workflow_sync(workflow)
if result:
log(f"Image result: {result}")
# Result: {'data': [{'url': 'http://127.0.0.1:8188/view?filename=kitty_00006_.png&subfolder=&type=output'}]}
url = result['data'][0]['url']
# If COMFYUI_IMAGE_FOLDER is provided, we can directly display the image
if COMFYUI_IMAGE_FOLDER != None:
image_name = url.split("filename=")[1].split("&")[0]
display_image_file_in_terminal(f"{COMFYUI_IMAGE_FOLDER}/{image_name}")
else:
fetch_and_display_image(url)
else:
print("Failed to generate image.")
async def imagine(args):
if args.prompt:
await generate_image(args)
else:
# Otherwise run interactively
while True:
prompt = input("Imagine... > ").strip()
if not prompt: print("Bye!") ; break
await generate_image({'prompt': prompt})
def get_arguments():
# Define parser arguments for Usage and parsing
# 'comfyui', 'workflow', and 'seed' are set to defaults if not provided
parser = argparse.ArgumentParser(description="Generate images using ComfyUI. Enquote the prompt or escape all quotes. Without a prompt run interactively.")
wgroup = parser.add_argument_group("workflow")
wgroup.add_argument('--comfyui', help="ComfyUI base URL", default=os.environ.get('COMFYUI_BASE_URL', "http://127.0.0.1:8188"))
wgroup.add_argument('--workflow', help="Choose a workflow template.", default=DEFAULT_WORKFLOW)
wgroup.add_argument("--model", help="Override the model in the workflow.")
ggroup = parser.add_argument_group("generation")
ggroup.add_argument("--seed", help="Override the seed for the workflow.", default=random.randint(0, 18446744073709551614), type=int)
ggroup.add_argument("--steps", help="Override the number of steps in the workflow.", type=int)
ggroup.add_argument("--sampler", help="Override the sampler for the workflow. (e.g., ddim, euler, dpmpp_2m...)")
ggroup.add_argument("--scheduler", help="Override the scheduler for the workflow. (e.g., normal, karras, sgm_uniform, simple...)")
ggroup.add_argument("--negative", help="Provide a negative prompt. Take care to enquote.")
sgroup = parser.add_argument_group("dimensions")
sgroup.add_argument("--ratio", help="Set a w:h ratio, applying to the native pixel count. (e.g., 16:9)")
sgroup.add_argument("--size", help="Override both image width and height in the workflow.", type=int)
sgroup.add_argument("--width", help="Override image width in the workflow.", type=int)
sgroup.add_argument("--height", help="Override image height in the workflow.", type=int)
sgroup.add_argument("--scale", help="Override image scaling (if any) in the workflow.", type=float)
parser.add_argument("prompt", nargs="?", help="Prompt text for image generation.")
# Parse known arguments, forgiving extra and unknown arguments
parsed_args = parser.parse_known_args()
args = parsed_args[0]
global COMFYUI_BASE_URL
args.comfyui = args.comfyui.strip("/")
COMFYUI_BASE_URL = args.comfyui
# Ratio should not be combined with width, height, or size
if args.ratio:
if args.width or args.height or args.size:
parser.error("--ratio cannot be combined with --width, --height, or --size.")
elif not re.match(r"\d+:\d+", args.ratio) and not re.match(r"\d+/\d+", args.ratio):
parser.error("--ratio must be in the form w:h or w/h.")
if args.size:
print(f"Size: {args.size}")
if args.size < 512:
parser.error("--size must be at least 512 pixels.")
elif args.size % 64 != 0:
parser.error("--size must be a multiple of 64 pixels.")
else:
if not args.width: args.width = args.size
if not args.height: args.height = args.size
if args.width:
if args.width % 64 != 0:
parser.error("--width must be a multiple of 64 pixels.")
elif args.width < 256:
parser.error("--width must be at least 256 pixels.")
elif args.height and args.width * args.height < 512 * 512:
parser.error("--width * --height must be at least 256K pixels.")
if args.height:
if args.height % 64 != 0:
parser.error("--height must be a multiple of 64 pixels.")
elif args.height < 256:
parser.error("--height must be at least 256 pixels.")
if args.prompt:
args.prompt = args.prompt.strip()
more = parsed_args[1]
if len(more):
args.prompt += " " + " ".join(more)
if more[0].startswith("--"):
unk = more[0].split("=")[0]
parser.error(f"Unknown argument {unk}")
return args
def main():
# Randomize for this session
random.seed(int(os.urandom(10).hex(), 16))
# Get arguments
args = get_arguments()
# We need asynchronous I/O
asyncio.run(imagine(args))
if __name__ == "__main__":
main()
{
"fieldmap": {
"prompt": [ "2", "text" ],
"negative": [ "3", "text" ],
"model": [ "1", "ckpt_name" ],
"width": [ "4", "width" ],
"height": [ "4", "height" ],
"sampler": [ "5", "sampler_name" ],
"scheduler": [ "5", "scheduler" ],
"steps": [ "5", "steps" ],
"seed": [ "5", "seed" ],
"filename_prefix": [ "9", "filename_prefix" ]
},
"workflow": {
"1": {
"inputs": {
"ckpt_name": "SD1.5/v1-5-pruned-emaonly.ckpt"
},
"class_type": "CheckpointLoaderSimple",
"_meta": { "title": "Load Checkpoint" }
},
"2": {
"inputs": {
"text": "prompt",
"clip": [ "1", 1 ]
},
"class_type": "CLIPTextEncode",
"_meta": { "title": "Positive Prompt" }
},
"3": {
"inputs": {
"text": "signature, watermark",
"clip": [ "1", 1 ]
},
"class_type": "CLIPTextEncode",
"_meta": { "title": "Negative Prompt" }
},
"4": {
"inputs": {
"width": 512,
"height": 512,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": { "title": "Empty Latent Image" }
},
"5": {
"inputs": {
"seed": 0,
"steps": 30,
"cfg": 7.5,
"sampler_name": "euler_ancestral",
"scheduler": "normal",
"denoise": 1,
"model": [ "1", 0 ],
"positive": [ "2", 0 ],
"negative": [ "3", 0 ],
"latent_image": [ "4", 0 ]
},
"class_type": "KSampler",
"_meta": { "title": "KSampler" }
},
"6": {
"inputs": {
"samples": [ "5", 0 ],
"vae": [ "1", 2 ]
},
"class_type": "VAEDecode",
"_meta": { "title": "VAE Decode" }
},
"7": {
"inputs": {
"model_name": "RealESRGAN_x2plus.pth"
},
"class_type": "UpscaleModelLoader",
"_meta": { "title": "Load Upscale Model" }
},
"8": {
"inputs": {
"upscale_model": [ "7", 0 ],
"image": [ "6", 0 ]
},
"class_type": "ImageUpscaleWithModel",
"_meta": { "title": "Upscale Image (using Model)" }
},
"9": {
"inputs": {
"filename_prefix": "imagine",
"images": [ "8", 0 ]
},
"class_type": "SaveImage",
"_meta": { "title": "Save Image" }
}
}
}
{
"fieldmap": {
"prompt": [ "5", "text" ],
"negative": [ "6", "text" ],
"model": [ "2", "ckpt_name" ],
"width": [ "4", "width" ],
"height": [ "4", "height" ],
"scale": [ "8", "scale_by" ],
"sampler": [ "1", "sampler_name" ],
"scheduler": [ "1", "scheduler" ],
"steps": [ "1", "steps" ],
"seed": [ "1", "seed" ],
"filename_prefix": [ "3", "filename_prefix" ]
},
"workflow": {
"1": {
"inputs": {
"seed": 0,
"steps": 8,
"cfg": 1.2,
"sampler_name": "lcm",
"scheduler": "normal",
"denoise": 1,
"model": [ "3", 0 ],
"positive": [ "5", 0 ],
"negative": [ "6", 0 ],
"latent_image": [ "4", 0 ]
},
"class_type": "KSampler",
"_meta": { "title": "KSampler" }
},
"2": {
"inputs": {
"ckpt_name": "SD1.5/deliberate_v2.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": { "title": "Load Checkpoint" }
},
"3": {
"inputs": {
"model": [ "2", 0 ],
"clip": [ "2", 1 ],
"lora_name": "lcm-lora-sdv1-5.safetensors",
"strength_model": 1,
"strength_clip": 1
},
"class_type": "LoraLoader",
"_meta": { "title": "Load LoRA" }
},
"4": {
"inputs": {
"width": 512,
"height": 512,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": { "title": "Empty Latent Image" }
},
"5": {
"inputs": {
"text": "Prompt",
"clip": [ "3", 1 ]
},
"class_type": "CLIPTextEncode",
"_meta": { "title": "CLIP Text Encode (Prompt)" }
},
"6": {
"inputs": {
"text": "",
"clip": [ "3", 1 ]
},
"class_type": "CLIPTextEncode",
"_meta": { "title": "CLIP Text Encode (Prompt)" }
},
"7": {
"inputs": {
"samples": [ "1", 0 ],
"vae": [ "2", 2 ]
},
"class_type": "VAEDecode",
"_meta": { "title": "VAE Decode" }
},
"8": {
"inputs": {
"upscale_method": "bilinear",
"scale_by": 2,
"image": [ "7", 0 ]
},
"class_type": "ImageScaleBy",
"_meta": { "title": "Upscale Image By" }
},
"9": {
"inputs": {
"filename_prefix": "imagine",
"images": [ "8", 0 ]
},
"class_type": "SaveImage",
"_meta": { "title": "Save Image" }
}
}
}
{
"fieldmap": {
"prompt": [ "3", "text" ],
"negative": [ "4", "text" ],
"model": [ "1", "ckpt_name" ],
"width": [ "5", "width" ],
"height": [ "5", "height" ],
"sampler": [ "6", "sampler_name" ],
"scheduler": [ "6", "scheduler" ],
"steps": [ "6", "steps" ],
"seed": [ "6", "seed" ],
"filename_prefix": [ "8", "filename_prefix" ]
},
"workflow": {
"1": {
"inputs": {
"ckpt_name": "SD1.5/deliberate_v2.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": { "title": "Load Checkpoint" }
},
"2": {
"inputs": {
"model": [ "1", 0 ],
"clip": [ "1", 1 ],
"lora_name": "lcm-lora-sdv1-5.safetensors",
"strength_model": 1,
"strength_clip": 1
},
"class_type": "LoraLoader",
"_meta": { "title": "Load LoRA" }
},
"3": {
"inputs": {
"text": "Prompt",
"clip": [ "2", 1 ]
},
"class_type": "CLIPTextEncode",
"_meta": { "title": "CLIP Text Encode (Prompt)" }
},
"4": {
"inputs": {
"text": "",
"clip": [ "2", 1 ]
},
"class_type": "CLIPTextEncode",
"_meta": { "title": "CLIP Text Encode (Prompt)" }
},
"5": {
"inputs": {
"width": 512,
"height": 512,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": { "title": "Empty Latent Image" }
},
"6": {
"inputs": {
"seed": 0,
"steps": 8,
"cfg": 1.2,
"sampler_name": "lcm",
"scheduler": "normal",
"denoise": 1,
"model": [ "2", 0 ],
"positive": [ "3", 0 ],
"negative": [ "4", 0 ],
"latent_image": [ "5", 0 ]
},
"class_type": "KSampler",
"_meta": { "title": "KSampler" }
},
"7": {
"inputs": {
"samples": [ "6", 0 ],
"vae": [ "1", 2 ]
},
"class_type": "VAEDecode",
"_meta": { "title": "VAE Decode" }
},
"8": {
"inputs": {
"filename_prefix": "imagine",
"images": [ "7", 0 ]
},
"class_type": "SaveImage",
"_meta": { "title": "Save Image" }
}
}
}
{
"fieldmap": {
"prompt": [ "4", "text" ],
"negative": [ "5", "text" ],
"model": [ "2", "ckpt_name" ],
"width": [ "3", "width" ],
"height": [ "3", "height" ],
"sampler": [ "1", "sampler_name" ],
"scheduler": [ "1", "scheduler" ],
"steps": [ "1", "steps" ],
"seed": [ "1", "seed" ],
"filename_prefix": [ "7", "filename_prefix" ]
},
"workflow": {
"1": {
"inputs": {
"seed": 0,
"steps": 30,
"cfg": 7.5,
"sampler_name": "euler_ancestral",
"scheduler": "normal",
"denoise": 1,
"model": [ "2", 0 ],
"positive": [ "4", 0 ],
"negative": [ "5", 0 ],
"latent_image": [ "3", 0 ]
},
"class_type": "KSampler",
"_meta": { "title": "KSampler" }
},
"2": {
"inputs": {
"ckpt_name": "SD1.5/v1-5-pruned-emaonly.ckpt"
},
"class_type": "CheckpointLoaderSimple",
"_meta": { "title": "Load Checkpoint" }
},
"3": {
"inputs": {
"width": 512,
"height": 512,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": { "title": "Empty Latent Image" }
},
"4": {
"inputs": {
"text": "prompt",
"clip": [ "2", 1 ]
},
"class_type": "CLIPTextEncode",
"_meta": { "title": "Positive Prompt" }
},
"5": {
"inputs": {
"text": "signature, watermark",
"clip": [ "2", 1 ]
},
"class_type": "CLIPTextEncode",
"_meta": { "title": "Negative Prompt" }
},
"6": {
"inputs": {
"samples": [ "1", 0 ],
"vae": [ "2", 2 ]
},
"class_type": "VAEDecode",
"_meta": { "title": "VAE Decode" }
},
"7": {
"inputs": {
"filename_prefix": "imagine",
"images": [ "6", 0 ]
},
"class_type": "SaveImage",
"_meta": { "title": "Save Image" }
}
}
}
{
"fieldmap": {
"prompt": [ "3", "text" ],
"negative": [ "4", "text" ],
"model": [ "1", "ckpt_name" ],
"width": [ "5", "width" ],
"height": [ "5", "height" ],
"sampler": [ "6", "sampler_name" ],
"scheduler": [ "6", "scheduler" ],
"steps": [ "6", "steps" ],
"seed": [ "6", "seed" ],
"filename_prefix": [ "8", "filename_prefix" ]
},
"workflow": {
"1": {
"inputs": {
"ckpt_name": "SD3.5/sd3.5_large.safetensors"
},
"class_type": "CheckpointLoaderSimple"
},
"2": {
"inputs": {
"clip_name1": "clip_l.safetensors",
"clip_name2": "clip_g.safetensors",
"clip_name3": "t5/t5xxl_fp16.safetensors"
},
"class_type": "TripleCLIPLoader"
},
"3": {
"inputs": {
"text": "prompt",
"clip": [ "2", 0 ]
},
"class_type": "CLIPTextEncode"
},
"4": {
"inputs": {
"text": "",
"clip": [ "2", 0 ]
},
"class_type": "CLIPTextEncode"
},
"5": {
"inputs": {
"width": 1024,
"height": 1024,
"batch_size": 1
},
"class_type": "EmptySD3LatentImage"
},
"6": {
"inputs": {
"seed": 0,
"steps": 30,
"cfg": 5.45,
"sampler_name": "euler",
"scheduler": "sgm_uniform",
"denoise": 1,
"model": [ "1", 0 ],
"positive": [ "3", 0 ],
"negative": [ "4", 0 ],
"latent_image": [ "5", 0 ]
},
"class_type": "KSampler"
},
"7": {
"inputs": {
"samples": [ "6", 0 ],
"vae": [ "1", 2 ]
},
"class_type": "VAEDecode"
},
"8": {
"inputs": {
"filename_prefix": "imagine",
"images": [ "7", 0 ]
},
"class_type": "SaveImage"
}
}
}
{
"fieldmap": {
"prompt": [ "3", "text" ],
"negative": [ "4", "text" ],
"model": [ "1", "ckpt_name" ],
"width": [ "5", "width" ],
"height": [ "5", "height" ],
"sampler": [ "6", "sampler_name" ],
"scheduler": [ "6", "scheduler" ],
"steps": [ "6", "steps" ],
"seed": [ "6", "seed" ],
"filename_prefix": [ "8", "filename_prefix" ]
},
"workflow": {
"1": {
"inputs": {
"ckpt_name": "SD3.5/sd3.5_medium.safetensors"
},
"class_type": "CheckpointLoaderSimple"
},
"2": {
"inputs": {
"clip_name1": "clip_l.safetensors",
"clip_name2": "clip_g.safetensors",
"clip_name3": "t5/t5xxl_fp16.safetensors"
},
"class_type": "TripleCLIPLoader"
},
"3": {
"inputs": {
"text": "prompt",
"clip": [ "2", 0 ]
},
"class_type": "CLIPTextEncode"
},
"4": {
"inputs": {
"text": "",
"clip": [ "2", 0 ]
},
"class_type": "CLIPTextEncode"
},
"5": {
"inputs": {
"width": 1024,
"height": 1024,
"batch_size": 1
},
"class_type": "EmptySD3LatentImage"
},
"6": {
"inputs": {
"seed": 0,
"steps": 30,
"cfg": 5.45,
"sampler_name": "euler",
"scheduler": "sgm_uniform",
"denoise": 1,
"model": [ "1", 0 ],
"positive": [ "3", 0 ],
"negative": [ "4", 0 ],
"latent_image": [ "5", 0 ]
},
"class_type": "KSampler"
},
"7": {
"inputs": {
"samples": [ "6", 0 ],
"vae": [ "1", 2 ]
},
"class_type": "VAEDecode"
},
"8": {
"inputs": {
"filename_prefix": "imagine",
"images": [ "7", 0 ]
},
"class_type": "SaveImage"
}
}
}
{
"fieldmap": {
"prompt": [ "4", "text" ],
"negative": [ "5", "text" ],
"model": [ "1", "ckpt_name" ],
"width": [ "10", "width" ],
"height": [ "10", "height" ],
"sampler": [ "11", "sampler_name" ],
"scheduler": [ "11", "scheduler" ],
"steps": [ "11", "steps" ],
"seed": [ "11", "seed" ],
"filename_prefix": [ "13", "filename_prefix" ]
},
"workflow": {
"1": {
"inputs": {
"ckpt_name": "SD3.5/sd3.5_large_turbo.safetensors"
},
"class_type": "CheckpointLoaderSimple"
},
"2": {
"inputs": {
"shift": 3,
"model": [ "1", 0 ]
},
"class_type": "ModelSamplingSD3"
},
"3": {
"inputs": {
"clip_name1": "clip_g.safetensors",
"clip_name2": "clip_l.safetensors",
"clip_name3": "t5/t5xxl_fp16.safetensors"
},
"class_type": "TripleCLIPLoader"
},
"4": {
"inputs": {
"text": "prompt",
"clip": [ "3", 0 ]
},
"class_type": "CLIPTextEncode"
},
"5": {
"inputs": {
"text": "",
"clip": [ "3", 0 ]
},
"class_type": "CLIPTextEncode"
},
"6": {
"inputs": {
"conditioning": [ "5", 0 ]
},
"class_type": "ConditioningZeroOut"
},
"7": {
"inputs": {
"start": 0.1,
"end": 1,
"conditioning": [ "6", 0 ]
},
"class_type": "ConditioningSetTimestepRange"
},
"8": {
"inputs": {
"start": 0,
"end": 0.1,
"conditioning": [ "5", 0 ]
},
"class_type": "ConditioningSetTimestepRange"
},
"9": {
"inputs": {
"conditioning_1": [ "7", 0 ],
"conditioning_2": [ "8", 0 ]
},
"class_type": "ConditioningCombine"
},
"10": {
"inputs": {
"width": 1024,
"height": 1024,
"batch_size": 1
},
"class_type": "EmptySD3LatentImage"
},
"11": {
"inputs": {
"seed": 0,
"steps": 4,
"cfg": 1,
"sampler_name": "dpmpp_2m",
"scheduler": "sgm_uniform",
"denoise": 1,
"model": [ "2", 0 ],
"positive": [ "4", 0 ],
"negative": [ "9", 0 ],
"latent_image": [ "10", 0 ]
},
"class_type": "KSampler"
},
"12": {
"inputs": {
"samples": [ "11", 0 ],
"vae": [ "1", 2 ]
},
"class_type": "VAEDecode"
},
"13": {
"inputs": {
"filename_prefix": "imagine",
"images": [ "12", 0 ]
},
"class_type": "SaveImage"
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment