Skip to content

Instantly share code, notes, and snippets.

@blepping
Last active October 25, 2024 13:31
Show Gist options
  • Save blepping/50103df387945b3fb2691a46812b4a64 to your computer and use it in GitHub Desktop.
Save blepping/50103df387945b3fb2691a46812b4a64 to your computer and use it in GitHub Desktop.
Lying sigma sampler
# Based on the concept from https://github.com/muerrilla/sd-webui-detail-daemon
#
# See DESCRIPTION below for use case/explanation.
from __future__ import annotations
from typing import TYPE_CHECKING
from comfy.samplers import KSAMPLER
if TYPE_CHECKING:
import torch
def lying_sigma_sampler(
model: object,
x: torch.Tensor,
sigmas: torch.Tensor,
*,
lss_wrapped_sampler: object,
lss_dishonesty_factor: float,
lss_startend_percent: tuple[float, float],
**kwargs: dict,
) -> torch.Tensor:
start_percent, end_percent = lss_startend_percent
ms = model.inner_model.inner_model.model_sampling
start_sigma, end_sigma = (
round(ms.percent_to_sigma(start_percent), 4),
round(ms.percent_to_sigma(end_percent), 4),
)
del ms
def model_wrapper(x: torch.Tensor, sigma: torch.Tensor, **extra_args: dict):
sigma_float = float(sigma.max().detach().cpu())
if end_sigma <= sigma_float <= start_sigma:
sigma = sigma * (1.0 + lss_dishonesty_factor)
return model(x, sigma, **extra_args)
for k in (
"inner_model",
"sigmas",
):
if hasattr(model, k):
setattr(model_wrapper, k, getattr(model, k))
return lss_wrapped_sampler.sampler_function(
model_wrapper,
x,
sigmas,
**kwargs,
**lss_wrapped_sampler.extra_options,
)
class LyingSigmaSamplerNode:
DESCRIPTION = "This sampler wrapper works by adjusting the sigma passed to the model, while the rest of sampling stays the same. Telling the model we're at a lower sigma (noise level) than we actually are can increase detail. The parameter can also be set to positive values to reduce detail and smooth things out. Even low values like -0.01 can have a noticeable effect - a little goes a long way."
CATEGORY = "sampling/custom_sampling/samplers"
RETURN_TYPES = ("SAMPLER",)
FUNCTION = "go"
@classmethod
def INPUT_TYPES(cls) -> dict:
return {
"required": {
"sampler": ("SAMPLER",),
"dishonesty_factor": (
"FLOAT",
{
"default": -0.05,
"min": -0.999,
"step": 0.001,
"tooltip": "Adjustment factor for sigmas passed to the model. -0.05 means we reduce the sigma by 5%. Negative values will tend to increase details, positive values will reduce it. -0.05 is a relatively high value for this parameter - if the effect is too extreme, try reducing it.",
},
),
},
"optional": {
"start_percent": (
"FLOAT",
{
"default": 0.1,
"min": 0.0,
"max": 1.0,
"step": 0.001,
"tooltip": "Time the effect becomes active as a percentage of sampling, not steps.",
},
),
"end_percent": (
"FLOAT",
{
"default": 0.9,
"min": 0.0,
"max": 1.0,
"step": 0.001,
"tooltip": "Time the effect ends (inclusive) as a percentage of sampling, not steps.",
},
),
},
}
@classmethod
def go(
cls,
sampler: object,
dishonesty_factor: float,
*,
start_percent=0.0,
end_percent=1.0,
) -> tuple:
return (
KSAMPLER(
lying_sigma_sampler,
extra_options={
"lss_wrapped_sampler": sampler,
"lss_dishonesty_factor": dishonesty_factor,
"lss_startend_percent": (start_percent, end_percent),
},
),
)
NODE_CLASS_MAPPINGS = {
"LyingSigmaSampler": LyingSigmaSamplerNode,
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment