はい、承知いたしました。llama-cpp-python の内部構造や開発に関心のある方向けに、開発者ドキュメントを作成します。
llama-cpp-python は、C++ で実装された高性能な LLM 推論ライブラリ llama.cpp の Python バインディングです。主な目的は、llama.cpp の持つ高速な CPU/GPU 推論能力、メモリ効率(特に量子化モデル)、そして豊富な機能を、Python 開発者が容易に利用できるようにすることです。
| import node_helpers | |
| import comfy.utils | |
| import math | |
| from typing_extensions import override | |
| from comfy_api.latest import ComfyExtension, io | |
| from comfy.patcher_extension import WrappersMP | |
| import torch | |
| class TextEncodeQwenImageEditPlusFixPixelShift(io.ComfyNode): | |
| @classmethod |
| from typing_extensions import override | |
| from comfy_api.latest import ComfyExtension, io | |
| import torch | |
| class QwenImageScaleRope(io.ComfyNode): | |
| @classmethod | |
| def define_schema(cls): | |
| return io.Schema( | |
| node_id="QwenImageScaleRope", | |
| category="_for_testing", |
| from typing_extensions import override | |
| from comfy_api.latest import ComfyExtension, io | |
| import comfy | |
| class Noise_ReferenceAlign: | |
| def __init__(self, seed, reference_latents, strength): | |
| self.seed = seed | |
| self.reference_latents = reference_latents | |
| self.strength = strength |
| import numpy as np | |
| import pandas as pd | |
| import gradio as gr | |
| HUNYUAN_VIDEO_DEFAULT = [ | |
| "Hunyuan Video 544p", | |
| "1.0, 1.06971, 1.29073, 1.11245, 1.09596, 1.05233, 1.01415, 1.05672, 1.00848, 1.03632, 1.02974, 1.00984, 1.03028, 1.00681, 1.06614, 1.05022, 1.02592, 1.01776, 1.02985, 1.00726, 1.03727, 1.01502, 1.00992, 1.03371, 0.9976, 1.02742, 1.0093, 1.01869, 1.00815, 1.01461, 1.01152, 1.03082, 1.0061, 1.02162, 1.01999, 0.99063, 1.01186, 1.0217, 0.99947, 1.01711, 0.9904, 1.00258, 1.00878, 0.97039, 0.97686, 0.94315, 0.97728, 0.91154, 0.86139, 0.76592", | |
| 50, | |
| 0.24, | |
| -0.01, |
| ''' | |
| https://github.com/Zehong-Ma/MagCache | |
| ''' | |
| from comfy.ldm.modules.diffusionmodules.openaimodel import forward_timestep_embed, timestep_embedding, th, apply_control | |
| import comfy.patcher_extension | |
| import json | |
| def linear_interpolate(data: dict, num: float, scale: float) -> float: | |
| if not data: |
| import torch | |
| from PIL import Image, ImageDraw, ImageFont | |
| import numpy as np | |
| from comfy.cli_args import args | |
| import os | |
| FONT_PATH = "/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf" | |
| def flatten_dict(d): | |
| items = {} |
| import random | |
| import comfy.patcher_extension | |
| kawaii_aegigoe_list = [ | |
| "んっ…♡", | |
| "あぁんっ…!", | |
| "はぅっ…♡", | |
| "くぅ…ん…", | |
| "ひゃんっ!", | |
| "ふぁ…♡", |
| import torch | |
| def new_vec(mode, chunks, x): | |
| xs = x.clone().chunk(chunks, dim=0) | |
| ref_xs = torch.cat([xi[0].unsqueeze(0).expand(xi.shape[0], -1, -1).clone() for xi in xs], dim=0).clone() | |
| if mode == "concat": | |
| new_x = x.clone() | |
| return torch.cat([new_x, ref_xs], dim=1) | |
| else: |
| import gradio as gr | |
| import pandas as pd | |
| import random | |
| query_general_cache = None | |
| query_character_cache = None | |
| df = pd.read_csv("https://huggingface.co/datasets/furusu/aesthetic_score_danbooru2024/resolve/main/part/aes6_5.csv") | |
| #df = pd.read_csv("aes6_5.csv") | |
| df[["tags", "characters"]] = df[["tags", "characters"]].astype(str) |