Skip to content

Instantly share code, notes, and snippets.

@minimalefforttech
Last active March 19, 2025 20:27
Show Gist options
  • Save minimalefforttech/661e71280096e6d2f3dd0a415fcabc98 to your computer and use it in GitHub Desktop.
Save minimalefforttech/661e71280096e6d2f3dd0a415fcabc98 to your computer and use it in GitHub Desktop.
Example UI using local LLM
# CC-0 license, I put very little effort into setting this up so it may behave sporadically at times.
# To run this you need to install the following:
# pip install requests PySide6 difflib psutil
# You also need to install ollama from https://ollama.ai/
# Then pull the following models:
# ollama pull gemma3:1b
# see more models at https://ollama.ai/models
import random
import re
import sys
import threading
import time
import psutil
import os
import json
import requests
from collections import defaultdict
from typing import List, Optional, Tuple, Dict, Any
import difflib
from PySide6 import QtCore, QtWidgets, QtQml, QtGui, QtQuick
# Constants
ELEMENT_NAME_REGEX = re.compile(r"([a-z]+)([A-Za-z][A-Za-z0-9]+?)(?:_)?(\d+)")
# This is the function that matches names using regex and difflib rather than an LLM, remember to use actual code when you can as LLMs are heavy and unreliable.
def match_names(
text: str,
element_names: List[str],
) -> List[str]:
"""Match text against a list of element names and return the closest matches.
Args:
text: The text to match
element_names: List of element names to match against
category_map: Dictionary mapping category shortcuts to full names
Returns:
List of matched element names
"""
if not text:
return []
text = text.lower()
elements_lower = [name.lower() for name in element_names]
if text in elements_lower:
return [element_names[elements_lower.index(text)]]
match_number = text[-1].isdigit()
if match_number:
suffix_number = int(re.search(r'\d+$', text).group())
normalized_to_full = defaultdict(list)
for name in element_names:
if (regex_match := ELEMENT_NAME_REGEX.match(name)):
category_short, name_part, number = regex_match.groups()
if match_number and int(number) != suffix_number:
continue
normalized_to_full[name_part.lower()].append(name)
normalized_to_full[(category_short+name_part).lower()].append(name)
else:
print(f"Failed to parse {name}")
if not element_names:
return []
is_plural = text.endswith('s')
text_name = re.sub(r'\d+$', '', text.rstrip('s'))
closest_matches = difflib.get_close_matches(
text_name,
normalized_to_full.keys(),
n=3,
cutoff=0.6
)
if not closest_matches:
return []
if is_plural:
matched_plural = []
for match in closest_matches:
matched_plural.extend(normalized_to_full[match])
return matched_plural
best_match = closest_matches[0]
return [normalized_to_full[best_match][0]]
def query_llm(provider: str, model: str, prompt: str, api_key: Optional[str] = None) -> str:
"""Query LLM using direct API calls.
Args:
provider: "ollama" or "openrouter"
model: Model name
prompt: Prompt text
api_key: API key (required for OpenRouter)
Returns:
Model response as string
"""
if provider == "openrouter":
if not api_key:
api_key = os.environ.get("OPENROUTER_API_KEY")
if not api_key:
raise ValueError("API key is required for OpenRouter")
response = requests.post(
"https://openrouter.ai/api/v1/chat/completions",
headers={
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
},
json={
"model": model,
"messages": [{"role": "user", "content": prompt}]
}
)
response_data = response.json()
return response_data["choices"][0]["message"]["content"]
else: # ollama
response = requests.post(
"http://localhost:11434/api/generate",
json={
"model": model,
"prompt": prompt,
"stream": False
}
)
response_data = response.json()
if "error" in response_data:
raise ValueError(f"Error from Ollama: {response_data['error']}")
return response_data["response"]
# These are the LLM prompts in plain text format
NAME_EXTRACTION_PROMPT = """You are a helpful assistant that extracts referenced names from text.
Extract all referenced names (such as people's names, objects, or identifiers) from the given text.
The output should be a list of strings, where each string is a referenced name.
Do not include general words or phrases, only the specific names.
The result should strictly follow the JSON schema: List[str].
Text: {text}"""
RANDOM_NOTES_PROMPT = """<instructions>
You are a VFX supervisor writing notes about a scene an artist is working on.
Provide a single short critique in one sentence, up to 20 words.
Names must not include spaces. Use valid shorthand or abbreviations (e.g., chrGolemMale01 → Golem, GolemMale, or chrGolemMale01).
Do not split names into separate words (e.g., chrGolemMale01 cannot become Golem male 01).
Typos are acceptable, but ensure shorthand remains clear and does not add spaces within identifiers.
Do not generate numberered lists.
</instructions>
<examples>
Elements: chrBob01, amlBird01, amlBird02, prpGun02
Output: Bob is floating; adjust foot contacts. The birds are too high.
Elements: chrGolemMale01, chrGolemFemale01, prpSword01
Output: GolemMale looks stiff; improve pose. GolemFemale clothing clips. Sword reflection too dull.
Elements: amlCheetah02, prpGun01_01
Output: Cheetah fur density inconsistent; refine textures. Gun01 reflections look unrealistic.
Elements: chrGolemMale02, amlCrab01
Output: GolemMale02 arm bends unnaturally; fix rig. Crab shell lacks depth; improve material.
</examples>
{elements}"""
def extract_element_references(
text: str,
element_names: List[str],
) -> Tuple[List[str], List[str]]:
"""Extract and match element references from text using LLM.
Args:
text: The text to extract references from
element_names: List of valid element names
Returns:
List of matched element names
"""
text = text.replace("'s", "")
text = re.sub(r'["<>]', '', str(text))
start_time = time.time()
# Call LLM directly instead of using langchain
prompt = NAME_EXTRACTION_PROMPT.format(text=text)
llm_response = query_llm("ollama", "gemma3:1b", prompt)
# Parse JSON response manually
try:
# Try to extract JSON from the response
extracted_names = []
# Find potential JSON in the response
json_start = llm_response.find('[')
json_end = llm_response.rfind(']') + 1
if json_start >= 0 and json_end > json_start:
json_str = llm_response[json_start:json_end]
extracted_names = json.loads(json_str)
else:
print(f"Failed to parse JSON from response: {llm_response}")
except json.JSONDecodeError as e:
print(f"JSON parse error: {e}, response: {llm_response}")
extracted_names = []
result = []
matched_extractions = []
for name in extracted_names:
matched = match_names(name, element_names)
if matched:
result.extend(matched)
matched_extractions.append(name)
end_time = time.time()
print("input: ", text)
print("extracted_names: ", extracted_names)
print("output: ", result)
print(f"Time taken: {end_time - start_time} seconds")
return (matched_extractions, result)
class WordHighlighter(QtGui.QSyntaxHighlighter):
""" A simple highlighter that lets me highlight the matches in qml, it's a bit of a hack but it works for a quick demo."""
def __init__(self, parent=None):
super().__init__(parent)
self._highlight_words = []
self._highlight_format = QtGui.QTextCharFormat()
self._highlight_format.setForeground(QtGui.QColor("#3c7ee8"))
@QtCore.Slot(QtQuick.QQuickItem)
def attachToQml(self, text_area: QtQuick.QQuickItem):
# Should probably do validations here...
ptr = text_area.property("textDocument")
super().setDocument(ptr.textDocument())
@QtCore.Property("QStringList")
def highlight_words(self):
return self._highlight_words
@highlight_words.setter
def highlight_words(self, words):
"""Set the words to highlight and refresh the document."""
self._highlight_words = words
self.rehighlight()
def highlightBlock(self, text):
# Eh, not great but it works for the demo, tbh this is plenty fast enough anyway.
for word in self._highlight_words:
regex = QtCore.QRegularExpression(fr"\b{QtCore.QRegularExpression.escape(word)}\b", QtCore.QRegularExpression.CaseInsensitiveOption)
match_iterator = regex.globalMatch(text)
while match_iterator.hasNext():
match = match_iterator.next()
start = match.capturedStart()
length = match.capturedLength()
self.setFormat(start, length, self._highlight_format)
class Backend(QtCore.QObject):
"""Backend class handling element matching and UI interactions.
Signals:
highlight_elements_changed: Emitted to highlight elements in the list view
partial_matches_changed: Emitted alongside the highlight to trigger highlighting in the text editor
"""
highlight_elements_changed = QtCore.Signal(list)
partial_matches_changed = QtCore.Signal(list)
def __init__(self) -> None:
super().__init__()
# Hardcoded list for the demo.
self._elements = [
"chrAlice01", "chrGreg01",
"chrGenericHuman04", "chrGenericHuman08",
"chrOrc04", "chrOrc06",
"amlSnail01", "amlCheetah02",
"prpGun01_01", "prpGun01_02",
"prpSword01"
]
self._current_match_thread: Optional[threading.Thread] = None
self._stop_thread: bool = False
@QtCore.Property("QStringList", constant=True)
def elements(self) -> List[str]:
return self._elements
def _run_llm_match(self, notes: str) -> None:
"""Run LLM matching in a separate thread.
Args:
notes: The notes text to process
"""
try:
if not self._stop_thread:
extracted_names, matches = extract_element_references(notes, self._elements)
if not self._stop_thread:
self.partial_matches_changed.emit(extracted_names)
self.highlight_elements_changed.emit(matches)
except Exception as e:
print(f"Error in LLM matching: {e}")
raise
@QtCore.Slot(str)
def notes_updated(self, notes: str) -> None:
"""Handle notes text updates.
Args:
notes: The updated notes text
"""
if self._current_match_thread and self._current_match_thread.is_alive():
self._stop_thread = True
self._current_match_thread.join()
self._stop_thread = False
self._current_match_thread = threading.Thread(
target=self._run_llm_match,
args=(notes,)
)
self._current_match_thread.start()
@QtCore.Slot(result=str)
def generate_random_notes(self) -> str:
"""Generate and emit random notes using LLM."""
# Randomly choose 1-3 elements from self._elements
random_elements = random.sample(self._elements, random.randint(1, 3))
try:
prompt = RANDOM_NOTES_PROMPT.format(elements=random_elements)
response = query_llm("ollama", "wizardlm2", prompt)
# Clean up the response
return response.strip("1. ").strip("01 ") # Sometimes the llm will start writing a list despite the instructions
except Exception as e:
return f"Error generating random notes: {e}"
@QtCore.Slot(result=str)
def get_process_memory_usage(self) -> str:
"""Returns the total memory usage of this process and all its subprocesses.
Recursively traverses the process tree starting from the current process
and sums up the RSS (Resident Set Size) memory usage of all processes.
Returns:
str: Total memory usage formatted as "XXXMb"
"""
try:
# Get memory usage of this process and all its subprocesses
process = psutil.Process(os.getpid())
memory = process.memory_info().rss
for child in process.children(recursive=True):
try:
memory += child.memory_info().rss
except (psutil.NoSuchProcess, psutil.AccessDenied):
continue
# Get any memory usage of "ollama" processes, this is not technically accurate as they may have already been running.
for proc in psutil.process_iter():
if proc.name() == "ollama":
memory += proc.memory_info().rss
return f"{memory / (1024 ** 2):.0f}Mb"
except Exception as e:
return "N/A"
def main() -> None:
app = QtWidgets.QApplication(sys.argv)
engine = QtQml.QQmlApplicationEngine()
QtQml.qmlRegisterType(WordHighlighter, "BackendComponents", 1, 0, "WordHighlighter")
backend = Backend()
engine.rootContext().setContextProperty("backend", backend)
engine.load("main.qml")
if not engine.rootObjects():
sys.exit(-1)
sys.exit(app.exec())
if __name__ == "__main__":
main()
import QtQuick 2.15
import QtQuick.Controls 2.15
import QtQuick.Controls.Material 2.15
import QtQuick.Layouts 1.15
import BackendComponents 1.0
ApplicationWindow {
id: root
visible: true
width: 800
height: 600
title: "Element Manager"
Material.theme: Material.Dark
Material.accent: Material.Blue
// Properties
property var highlightedElements: []
property var partialMatches: []
// Backend Connections
Connections {
id: backendConnections
target: backend
function onHighlight_elements_changed(elements) {
root.highlightedElements = elements
elementListView.model.modelReset()
}
function onPartial_matches_changed(elements) {
root.partialMatches = elements
}
}
// Initialization
Component.onCompleted: {
elementListView.model.clear()
for (let element of backend.elements) {
elementListView.model.append({
text: element,
checked: false,
highlighted: false
})
}
}
ColumnLayout {
id: mainLayout
anchors.fill: parent
spacing: 0
// Header
Rectangle {
id: headerRect
Layout.fillWidth: true
height: 50
color: Material.color(Material.Grey, Material.Shade900)
RowLayout {
id: headerLayout
anchors.fill: parent
anchors.margins: 10
Label {
id: headerLabel
text: "LLM Element Name Demo"
font.pixelSize: 18
font.bold: true
color: Material.foreground
}
Item {
id: headerSpacer
Layout.fillWidth: true
}
}
}
// Main Content
SplitView {
id: mainSplitView
Layout.fillWidth: true
Layout.fillHeight: true
// Left Panel
Rectangle {
id: leftPanel
SplitView.preferredWidth: 300
SplitView.minimumWidth: 200
color: Material.color(Material.Grey, Material.Shade800)
ColumnLayout {
id: leftPanelLayout
anchors.fill: parent
anchors.margins: 10
spacing: 10
ScrollView {
id: elementListScrollView
Layout.fillWidth: true
Layout.fillHeight: true
clip: true
ListView {
id: elementListView
model: ListModel {}
delegate: CheckDelegate {
id: elementDelegate
width: elementListView.width
text: model.text
checked: model.checked
onCheckStateChanged: {
if (checked !== model.checked) {
model.checked = checked
}
}
Rectangle {
id: highlightRect
anchors.fill: parent
color: Material.accent
opacity: root.highlightedElements.includes(model.text) ? 0.2 : 0
z: -1
}
}
}
}
// Memory Usage Display
Rectangle {
id: memoryDisplayRect
Layout.fillWidth: true
height: memoryLabel.height + 10
color: Material.color(Material.Grey, Material.Shade900)
radius: 4
Label {
id: memoryLabel
anchors.centerIn: parent
property string usage: ""
text: "Memory: " + usage
}
Timer {
id: memoryUpdateTimer
interval: 1000
repeat: true
triggeredOnStart: true
running: true
onTriggered: {
memoryLabel.usage = backend.get_process_memory_usage()
}
}
}
}
}
// Right Panel
ColumnLayout {
id: rightPanel
SplitView.fillWidth: true
spacing: 5
ScrollView {
id: notesScrollView
Layout.fillWidth: true
Layout.margins: 10
Layout.preferredHeight: 100
TextArea {
id: notesInput
placeholderText: "Enter notes..."
wrapMode: TextArea.Wrap
Timer {
id: notesUpdateTimer
interval: 500
property string last_text: ""
onTriggered: {
if (notesInput.text !== last_text) {
last_text = notesInput.text
backend.notes_updated(notesInput.text)
}
}
}
onTextChanged: {
notesUpdateTimer.restart()
}
}
WordHighlighter {
id: wordHighlighter
highlight_words: root.partialMatches
Component.onCompleted: {
wordHighlighter.attachToQml(notesInput)
}
}
}
// Action Buttons
RowLayout {
id: actionButtonsLayout
Layout.alignment: Qt.AlignRight
Layout.margins: 10
spacing: 10
Button {
id: generateNotesButton
text: "Generate Notes"
onClicked: {
notesInput.text = backend.generate_random_notes()
}
}
Button {
id: selectElementsButton
text: "Select Elements"
onClicked: {
for (let i = 0; i < elementListView.model.count; i++) {
let item = elementListView.model.get(i)
item.checked = root.highlightedElements.includes(item.text)
}
}
}
}
Item {
id: rightPanelSpacer
Layout.fillHeight: true
}
Button {
id: loadButton
text: "Load"
Layout.alignment: Qt.AlignRight
Layout.margins: 10
onClicked: {
console.log("Load button clicked")
}
}
}
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment