Skip to content

Instantly share code, notes, and snippets.

@blepping
blepping / bleh_apg_guider.py
Last active May 17, 2025 16:01
APG implementation for ComfyUI
# By https://github.com/blepping
# License: Apache2
# Initial APG implementation referenced from https://arxiv.org/pdf/2410.02416 and https://github.com/ace-step/ACE-Step/blob/e5610345db9f450a855994169f4ca7a7b5fb4f1d/acestep/apg_guidance.py
#
# Changes:
# 250616: Removed alt2 mode, it was the same as positive momentum. Derp.
# 250616: New alt2 mode that blends history with the current diff. Added advanced YAML parameters input.
import math
import yaml
@blepping
blepping / bleh_flashattention_node.py
Last active March 10, 2025 12:45
Simple ComfyUI nodes to force use of FlashAttention2
# By https://github.com/blepping
# License: Apache2
#
# Usage: Place in ComfyUI's custom_nodes directory.
# It will add BlehFlashAttentionSampler and BlehGlobalFlashAttention nodes.
# Requires FlashAttention2 installed into the Python venv: https://github.com/Dao-AILab/flash-attention
#
from __future__ import annotations
@blepping
blepping / dumpst.py
Created March 5, 2025 06:07
Simple script to pretty print a SafeTensors file
#!python3
import argparse
import mmap
import json
import os
import struct
import sys
from pathlib import Path
from typing import NamedTuple
@blepping
blepping / cosmos_scheduler_node.py
Last active January 13, 2025 07:43
ComfyUI scheduler for Nvidia's Cosmos models
# Usage: Put in custom_nodes and restart ComfyUI/refresh browser.
import torch
class CosmosSchedulerNode:
RETURN_TYPES = ("SIGMAS",)
FUNCTION = "go"
CATEGORY = "sampling/custom_sampling/schedulers"
@classmethod
def INPUT_TYPES(cls):
@blepping
blepping / denoised_history_sampler.py
Last active December 5, 2024 05:01
ComfyUI sampler wrapper that saves all model predictions
# ComfyUI sampler wrapper that saves the state of denoised for every model call.
# This ignores whatever the sampler actually returns. You will get a result of
# batch_size * times_model_was_called latents.
from __future__ import annotations
import torch
from comfy.samplers import KSAMPLER
from comfy.model_management import device_supports_non_blocking
@blepping
blepping / NoAnnoy.py
Last active November 24, 2024 08:01
RiftWizard NoAnnoy mod
# RiftWizard mod, should work with RiftWizard 1 and 2. Usage:
# 1. Find your mods directory. Right-click the game -> Properties -> Installed Files -> Browse -> Navigate to RiftWizard2/mods
# 2. Create a NoAnnoy directory (case-sensitive) and put NoAnnoy.py in there.
# 3. Restart Rift Wizard. You should see a message about the mod in the game console.
#
# You can use Steam's Launch Options to override settings instead of editing the mod.
# Right click the game -> Properties -> General. Example launch string:
# NOANNOY_MONSTER_BLACKLIST=SilentSpecter,FaeSniper NOANNOY_GAMEOVER_SPEED_FACTOR=100 %command%
### User configurable options:
@blepping
blepping / mochi_split_latent.py
Created November 5, 2024 21:00
ComfyUI node to split Mochi latent frames
# By https://github.com/blepping/
import torch
class SplitMochiVideoLatent:
DESCRIPTION = "Hack for Mochi latents to split the frames. Mochi has 6x temporal compression so a latent frame is worth 6 frames in your video. Splitting up a Mochi latent may reduce quality."
FUNCTION = "go"
CATEGORY = "latent/mochi"
RETURN_TYPES = ("LATENT",)
@blepping
blepping / lying_sigma_sampler.py
Last active October 25, 2024 13:31
Lying sigma sampler
# Based on the concept from https://github.com/muerrilla/sd-webui-detail-daemon
#
# See DESCRIPTION below for use case/explanation.
from __future__ import annotations
from typing import TYPE_CHECKING
from comfy.samplers import KSAMPLER
# NO LONGER MAINTAINED
# There is an improved version in my ComfyUI-bleh node pack:
# https://github.com/blepping/ComfyUI-bleh#blehsageattentionsampler
# If you really want to use the gist, see the previous revision.
@blepping
blepping / testsolver.py
Last active December 7, 2024 12:40
Sampler thought experiments
from functools import partial
import torch
torch.set_printoptions(profile="full")
dtype = torch.float64
def randn_like(x, generator=None):
return torch.randn(*x.shape, out=x.detach().clone(), generator=generator)