Skip to content

Instantly share code, notes, and snippets.

@martinbowling
Created January 28, 2025 20:08
Show Gist options
  • Save martinbowling/50473b74d19ac5f8a03254285d52100d to your computer and use it in GitHub Desktop.
Save martinbowling/50473b74d19ac5f8a03254285d52100d to your computer and use it in GitHub Desktop.
Groq powered Deepseek R1 llama 70b distilled model mixture of reasoners
#!/usr/bin/env python3
# /// script
# requires-python = ">=3.11"
# dependencies = [
# "rich>=13.7.1",
# "groq>=0.5.0",
# "python-dotenv>=1.0.0",
# "questionary>=2.0.1",
# ]
# ///
import asyncio
import os
from dataclasses import dataclass
from typing import Any, Dict, List
from groq import Groq
from dotenv import load_dotenv
from questionary import text
from rich.console import Console
from rich.live import Live
from rich.panel import Panel
from rich.progress import Progress, SpinnerColumn, TextColumn
from rich.syntax import Syntax
from rich.text import Text
load_dotenv()
@dataclass
class Reasoner:
emoji: str
name: str
class CLIDisplay:
def __init__(self):
self.console = Console()
self.current_live = None
self.progress = None
def print_header(self):
self.console.print(
Panel.fit(
"[bold magenta]🧠 Lightning Minds[/bold magenta]",
subtitle="Explore questions through multiple analytical perspectives",
style="bold cyan"
)
)
def print_error(self, message: str):
self.console.print(f"[bold red]🚨 Error:[/bold red] {message}")
def start_progress(self, message: str):
self.progress = Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
transient=True
)
self.progress.add_task(description=message, total=None)
return self.progress
def update_analysis(self, reasoner: Reasoner, content: str):
panel = Panel(
Syntax(content, "markdown", theme="monokai", word_wrap=True),
title=f"{reasoner.emoji} {reasoner.name}",
border_style="yellow"
)
if self.current_live:
self.current_live.update(panel)
def show_synthesis(self, content: str):
panel = Panel(
Syntax(content, "markdown", theme="gruvbox-dark", word_wrap=True),
title="🌟 Integrated Synthesis",
border_style="green"
)
self.console.print(panel)
def show_reasoners(self, reasoners: List[Reasoner]):
self.console.print("\n[bold]Identified Perspectives:[/bold]")
for r in reasoners:
self.console.print(f" {r.emoji} [cyan]{r.name}[/cyan]")
self.console.print("")
def show_analysis(self, reasoner: Reasoner, content: str):
"""Display a completed analysis with a panel"""
panel = Panel(
Syntax(content, "markdown", theme="monokai", word_wrap=True),
title=f"{reasoner.emoji} {reasoner.name}",
border_style="yellow"
)
self.console.print(panel)
self.console.print("") # Add spacing between analyses
class LightningMindsCLI:
def __init__(self):
self.client = None
self.display = CLIDisplay()
self.groq_key = os.getenv("GROQ_API_KEY")
async def initialize(self):
self.display.print_header()
if not self.groq_key:
self.groq_key = text(
"πŸ”‘ Enter your Groq API key:",
validate=lambda x: len(x) > 10
).ask()
self.client = Groq(api_key=self.groq_key)
async def get_reasoners(self, message: str) -> List[Reasoner]:
prompt = f"""For this question: "{message}", determine 3-4 essential analytical perspectives.
Return only the list in format: "EMOJI PerspectiveName, EMOJI PerspectiveName"
Example: "πŸ”¬ Technical Analyst, βš–οΈ Legal Expert, 🌍 Environmental Scientist"
Focus on perspectives that will provide unique and valuable insights."""
try:
response = self.client.chat.completions.create(
model="llama-3.3-70b-specdec",
messages=[{"role": "user", "content": prompt}],
temperature=0.7,
max_tokens=150
)
raw_text = response.choices[0].message.content
entries = [e.strip() for e in raw_text.split(',')]
return [
Reasoner(emoji=e[0], name=e[1:].strip())
for e in entries if len(e) > 1
][:4]
except Exception as e:
self.display.print_error(str(e))
return []
async def get_analysis(self, message: str, reasoner: Reasoner) -> str:
prompt = f"""As {reasoner.emoji} {reasoner.name}, analyze:
{message}
Provide your analysis in this structure:
1. Core principles and methodology
2. Key observations
3. Critical implications
4. Potential limitations
Maintain focus on your specific domain expertise while acknowledging interconnections."""
try:
response = self.client.chat.completions.create(
model="deepseek-r1-distill-llama-70b",
messages=[{"role": "user", "content": prompt}],
temperature=0.7,
max_tokens=4096,
stream=True
)
analysis = ""
for chunk in response:
if chunk.choices[0].delta.content:
analysis += chunk.choices[0].delta.content
self.display.update_analysis(reasoner, analysis)
return analysis
except Exception as e:
self.display.print_error(str(e))
return f"Analysis failed: {str(e)}"
async def get_synthesis(self, message: str, analyses: List[Dict[str, Any]]) -> str:
formatted = "\n\n".join(
f"{a['reasoner'].emoji} {a['reasoner'].name}:\n{a['content']}"
for a in analyses
)
prompt = f"""Given these expert analyses on the question "{message}":
{formatted}
Create a balanced synthesis that:
1. Highlights key agreements and tensions
2. Identifies practical implications
3. Notes important considerations and tradeoffs
Focus on actionable insights while acknowledging complexity."""
try:
response = self.client.chat.completions.create(
model="deepseek-r1-distill-llama-70b",
messages=[{"role": "user", "content": prompt}],
temperature=0.7,
max_tokens=2048,
stream=True
)
synthesis = ""
for chunk in response:
if chunk.choices[0].delta.content:
synthesis += chunk.choices[0].delta.content
return synthesis
except Exception as e:
self.display.print_error(str(e))
return f"Synthesis failed: {str(e)}"
async def process_query(self, message: str):
with self.display.start_progress("πŸ” Identifying perspectives..."):
reasoners = await self.get_reasoners(message)
if not reasoners:
return
self.display.show_reasoners(reasoners)
with Live(transient=True) as live:
self.display.current_live = live
analyses = []
for reasoner in reasoners:
analysis = await self.get_analysis(message, reasoner)
analyses.append({
"reasoner": reasoner,
"content": analysis
})
if not analyses:
self.display.print_error("No analyses were completed")
return
# Display all completed analyses
self.display.console.print("\n[bold cyan]Individual Perspective Analyses:[/bold cyan]\n")
for analysis in analyses:
self.display.show_analysis(analysis["reasoner"], analysis["content"])
with self.display.start_progress("🌟 Creating integrated synthesis..."):
synthesis = await self.get_synthesis(message, analyses)
self.display.console.print("\n[bold cyan]Final Integrated Synthesis:[/bold cyan]")
self.display.show_synthesis(synthesis)
def run_sync(self):
"""Synchronous entry point for the CLI"""
asyncio.run(self.initialize())
while True:
try:
question = text(
"\nπŸ’‘ Enter your question (or 'quit' to exit):",
validate=lambda x: len(x) > 3
).ask()
if question.lower() in ("exit", "quit"):
break
asyncio.run(self.process_query(question))
except KeyboardInterrupt:
break
self.display.console.print("\n[bold magenta]✨ Thank you for exploring with Lightning Minds![/bold magenta]\n")
if __name__ == "__main__":
cli = LightningMindsCLI()
cli.run_sync()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment