Last active
July 20, 2025 17:19
-
-
Save hagope/b89abfe256bacf46c9147d33e4f5012e to your computer and use it in GitHub Desktop.
Query multiple online LLMs with a single prompt
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
__pycache__/ | |
*.txt | |
*.jpg | |
*.jpeg | |
*.png | |
llm_out/ | |
prompts/ |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from PIL import Image | |
import mimetypes | |
import base64 | |
def determine_image_type(image_path): | |
try: | |
with Image.open(image_path) as img: | |
# Get the format of the image | |
format = img.format | |
# Mapping formats to MIME types | |
format_to_mime = { | |
'JPEG': 'image/jpeg', | |
'PNG': 'image/png', | |
'GIF': 'image/gif', | |
'WEBP': 'image/webp', | |
} | |
# Convert format to MIME type | |
mime_type = format_to_mime.get(format, 'unknown') | |
return mime_type | |
except (IOError, OSError) as e: | |
return f'Error: {e}' | |
# Example usage: | |
# image_path = 'sushi.png' | |
# mime_type = determine_image_type(image_path) | |
# print(f'The MIME type of the image is: {mime_type}') | |
# Function to encode the image | |
def encode_image(image_path): | |
with open(image_path, "rb") as image_file: | |
return base64.b64encode(image_file.read()).decode('utf-8') |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import argparse | |
import os | |
from os import getenv | |
def list_gemini_models(): | |
import google.generativeai as genai | |
api_key = getenv("GEMINI_API_KEY") | |
genai.configure(api_key=api_key) | |
for m in genai.list_models(): | |
if 'generateContent' in m.supported_generation_methods: | |
print(m.name) | |
def list_groq_models(): | |
import requests | |
api_key = os.environ.get("GROQ_API_KEY") | |
url = "https://api.groq.com/openai/v1/models" | |
headers = { | |
"Authorization": f"Bearer {api_key}", | |
"Content-Type": "application/json" | |
} | |
response = requests.get(url, headers=headers) | |
print(response.json()) | |
def list_openai_models(): | |
from openai import OpenAI | |
api_key = getenv("OPENAI_API_KEY") | |
proj_key = getenv("OPENAI_PROJECT") | |
client = OpenAI(api_key=api_key, project=proj_key) | |
models = client.models.list() | |
for model in models: | |
print(model.id) | |
def list_claude_models(): | |
import anthropic | |
api_key = getenv("ANTHROPIC_API_KEY") | |
client = anthropic.Anthropic(api_key=api_key) | |
models = client.list_models() | |
for model in models: | |
print(model.name) | |
def main(): | |
parser = argparse.ArgumentParser(description="List models for a specific service.") | |
parser.add_argument('--service', type=str, required=True, choices=['gemini', 'groq', 'openai', 'claude'], | |
help="The service for which to list models.") | |
args = parser.parse_args() | |
if args.service == 'gemini': | |
list_gemini_models() | |
elif args.service == 'groq': | |
list_groq_models() | |
elif args.service == 'openai': | |
list_openai_models() | |
elif args.service == 'claude': | |
list_claude_models() | |
if __name__ == "__main__": | |
main() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Query multiple online LLMs with a single prompt | |
import argparse | |
from os import getenv, makedirs | |
import pyperclip | |
from simple_term_menu import TerminalMenu | |
import click | |
import tempfile | |
import datetime | |
from helper import determine_image_type, encode_image | |
import platform | |
MAX_TOKENS = 8192 | |
GROQ_MODELS = [ | |
"gemma-7b-it", | |
"mixtral-8x7b-32768", | |
"llama3-8b-8192", | |
"llama3-70b-8192", | |
"llama-3.2-90b-text-preview", | |
"llama-3.2-11b-text-preview", | |
"llama-3.1-70b-versatile", | |
"DeepSeek-R1-Distill-Llama-70b", | |
] | |
# append all the gemini models into a python list https://ai.google.dev/gemini-api/docs/models | |
GEMINI_MODELS = [ | |
"gemini-2.5-pro", | |
"gemini-2.5-flash", | |
"gemini-2.5-flash-lite-preview-06-17", | |
"gemini-2.0-flash", | |
"gemini-2.0-flash-lite", | |
"gemini-1.5-flash", | |
"gemini-1.5-flash-8b", | |
"gemini-1.5-pro", | |
] | |
# append all the claude models into a python list https://docs.anthropic.com/en/docs/about-claude/models/all-models | |
CLAUDE_MODELS = [ | |
"claude-opus-4-20250514", | |
"claude-sonnet-4-20250514", | |
"claude-3-7-sonnet-20250219", # Claude 3.7 Sonnet (latest) | |
"claude-3-5-sonnet-20241022", # Claude 3.5 Sonnet v2 (latest) | |
"claude-3-5-sonnet-20240620", # Claude 3.5 Sonnet (previous version) | |
"claude-3-5-haiku-20241022", # Claude 3.5 Haiku (latest) | |
"claude-3-opus-20240229", # Claude 3 Opus | |
"claude-3-sonnet-20240229", # Claude 3 Sonnet | |
"claude-3-haiku-20240307", # Claude 3 Haiku | |
] | |
# append all the openai models into a python list https://platform.openai.com/docs/models | |
OPENAI_MODELS = [ | |
# Reasoning models | |
"o4-mini", | |
"o3", | |
"o3-mini", | |
"o1", | |
"o1-mini", | |
"o1-pro", | |
# Flagship chat models | |
"gpt-4.1", | |
"gpt-4o", | |
"chatgpt-4o", | |
# Cost-optimized models | |
"gpt-4.1-mini", | |
"gpt-4.1-nano", | |
"gpt-4o-mini", | |
"gpt-4o-search-preview", | |
"gpt-4o-mini-search-preview", | |
# Older GPT models | |
"gpt-4-turbo", | |
"gpt-4", | |
"gpt-3.5-turbo", | |
] | |
SAMBA_NOVA_MODELS = [ | |
"Meta-Llama-3.1-405B-Instruct", | |
"Meta-Llama-3.1-70B-Instruct", | |
"Meta-Llama-3.3-70B-Instruct", | |
"Meta-Llama-3.1-8B-Instruct", | |
"DeepSeek-R1", | |
"DeepSeek-R1-Distill-Llama-70B", | |
"QwQ-32B", | |
"DeepSeek-V3-0324", | |
] | |
DEEPSEEK_MODELS = ["deepseek-chat", "deepseek-reasoner"] | |
REPLICATE_MODELS = ["meta-llama-3.1-405b-instruct"] | |
# append all the grok models to a python list for API use: https://docs.x.ai/docs/models#models-and-pricing | |
GROK_MODELS = [ | |
"grok-3-beta", | |
"grok-3-fast-beta", | |
"grok-3-mini-beta", | |
"grok-3-mini-fast-beta", | |
"grok-2-vision-1212", | |
"grok-2-image-1212", | |
] | |
SERVICES = { | |
"groq": GROQ_MODELS, | |
"gemini": GEMINI_MODELS, | |
"claude": CLAUDE_MODELS, | |
"openai": OPENAI_MODELS, | |
"replicate": REPLICATE_MODELS, | |
"sambaNova": SAMBA_NOVA_MODELS, | |
"deepseek": DEEPSEEK_MODELS, | |
"xai": GROK_MODELS, | |
} | |
# services with vision | |
SERVICES_VIS = { | |
"gemini": GEMINI_MODELS, | |
"claude": CLAUDE_MODELS, | |
"openai": OPENAI_MODELS, | |
"replicate": REPLICATE_MODELS, | |
"xai": ["grok-2-vision"], | |
} | |
def call_groq_api(prompt, model): | |
"""Groq API | |
https://console.groq.com/docs/models | |
""" | |
from groq import Groq | |
api_key = getenv("GROQ_API_KEY") | |
client = Groq(api_key=api_key) | |
chat_completion = client.chat.completions.create( | |
messages=[{"role": "user", "content": prompt}], model=model | |
) | |
return chat_completion.choices[0].message.content | |
def call_gemini_api(prompt, model, image=None): | |
"""Gemini API: | |
https://ai.google.dev/gemini-api/docs/get-started/python | |
""" | |
import google.generativeai as genai | |
import PIL.Image | |
api_key = getenv("GEMINI_API_KEY") | |
genai.configure(api_key=api_key) | |
model = genai.GenerativeModel(model) | |
if image: | |
img = PIL.Image.open(image) | |
return model.generate_content([prompt, img]).text | |
else: | |
return model.generate_content(prompt).text | |
def call_claude_api(prompt, model, image=None): | |
"""Claude API | |
https://docs.anthropic.com/claude/reference/client-sdks | |
""" | |
import anthropic | |
api_key = getenv("ANTHROPIC_API_KEY") | |
client = anthropic.Anthropic(api_key=api_key) | |
messages = [{"role": "user", "content": prompt}] | |
if image: | |
image_data = encode_image(image) | |
image_media_type = determine_image_type(image) | |
messages = [ | |
{ | |
"role": "user", | |
"content": [ | |
{ | |
"type": "image", | |
"source": { | |
"type": "base64", | |
"media_type": image_media_type, | |
"data": image_data, | |
}, | |
}, | |
{"type": "text", "text": prompt}, | |
], | |
} | |
] | |
# Add beta header for Claude 3.7 Sonnet to increase output token limit to 128k | |
headers = {} | |
max_tokens = MAX_TOKENS | |
if model == "claude-3-7-sonnet-20250219": | |
headers = {"anthropic-beta": "output-128k-2025-02-19"} | |
max_tokens = 128000 | |
message = client.messages.create( | |
model=model, max_tokens=max_tokens, messages=messages, extra_headers=headers | |
) | |
return message.content[0].text | |
def call_openai_api(prompt, model, image=None): | |
"""OpenAI API | |
https://platform.openai.com/docs/quickstart | |
""" | |
from openai import OpenAI | |
api_key = getenv("OPENAI_API_KEY") | |
proj_key = getenv("OPENAI_PROJECT") | |
client = OpenAI(api_key=api_key, project=proj_key) | |
if image: | |
base64_image = encode_image(image) | |
image_type = determine_image_type(image) | |
messages = [ | |
{ | |
"role": "user", | |
"content": [ | |
{"type": "text", "text": prompt}, | |
{ | |
"type": "image_url", | |
"image_url": { | |
"url": f"data:{image_type};base64,{base64_image}" | |
}, | |
}, | |
], | |
} | |
] | |
else: | |
messages = [{"role": "user", "content": prompt}] | |
chat_completion = client.chat.completions.create( | |
messages=messages, | |
model=model, | |
) | |
return chat_completion.choices[0].message.content | |
def call_xai_api(prompt, model, image=None): | |
"""XAI API | |
https://docs.x.ai/docs/tutorial#step-3-make-your-first-request | |
""" | |
import requests | |
import json | |
api_key = getenv("XAI_API_KEY") | |
base_url = "https://api.x.ai/v1/chat/completions" | |
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"} | |
if image: | |
base64_image = encode_image(image) | |
image_type = determine_image_type(image) | |
messages = [ | |
{ | |
"role": "user", | |
"content": [ | |
{"type": "text", "text": prompt}, | |
{ | |
"type": "image_url", | |
"image_url": { | |
"url": f"data:{image_type};base64,{base64_image}" | |
}, | |
}, | |
], | |
} | |
] | |
else: | |
messages = [{"role": "user", "content": prompt}] | |
payload = { | |
"messages": messages, | |
"model": model, | |
"search_parameters": {"mode": "auto"}, | |
} | |
response = requests.post(base_url, headers=headers, json=payload) | |
response_data = response.json() | |
return response_data["choices"][0]["message"]["content"] | |
def call_replicate_api(prompt, model, image=None): | |
import replicate | |
model = "meta/" + model | |
if image: | |
response = replicate.run( | |
model, | |
input={ | |
"prompt": prompt, | |
"image": open(image, "rb"), | |
"max_tokens": MAX_TOKENS, | |
}, | |
) | |
else: | |
response = replicate.run( | |
model, input={"prompt": prompt, "max_tokens": MAX_TOKENS} | |
) | |
return "".join(response) | |
def call_samba_nova_api(prompt, model, image=None): | |
from openai import OpenAI | |
client = OpenAI( | |
api_key=getenv("SAMBA_NOVA_API_TOKEN"), | |
base_url="https://api.sambanova.ai/v1", | |
) | |
response = client.chat.completions.create( | |
model=model, | |
messages=[{"role": "user", "content": prompt}], | |
temperature=0.1, | |
top_p=0.1, | |
) | |
return response.choices[0].message.content | |
def call_deepseek_api(prompt, model, image=None): | |
from openai import OpenAI | |
client = OpenAI( | |
api_key=getenv("DEEPSEEK_API_KEY"), | |
base_url="https://api.deepseek.com", | |
) | |
response = client.chat.completions.create( | |
model=model, | |
messages=[{"role": "user", "content": prompt}], | |
) | |
return response.choices[0].message.content | |
def log_response(model, prompt, response): | |
# Get the current date and time | |
current_time = datetime.datetime.now() | |
directory_path = "llm_out" | |
makedirs(directory_path, exist_ok=True) | |
filename = ( | |
f"{directory_path}/llm_{model}_{current_time.strftime('%Y-%m-%d-%H-%M')}.md" | |
) | |
save_str = f""" | |
{current_time.strftime("%Y-%m-%d %I:%M %p")} | |
### Model | |
{model} | |
### Prompt | |
{prompt} | |
### Response | |
{response} | |
""" | |
# Open the file in append mode | |
with open(filename, "a") as f: | |
# Write the user input to the file | |
f.write(save_str) | |
print(f"Response logged to {filename}") | |
# try: | |
# pyperclip.copy(save_str) | |
# except: | |
# None | |
return None | |
def prompt_from_file(): | |
# Create a temporary text file | |
f = tempfile.NamedTemporaryFile(suffix=".txt", delete=False) | |
# Open the file in a text editor | |
click.edit(require_save=True, filename=f.name) | |
print("Prompt saved to:", f.name) | |
with open(f.name, "r") as file: | |
lines = file.readlines() | |
return "".join(lines) | |
def main(): | |
parser = argparse.ArgumentParser() | |
parser.add_argument("--cmd", help="Quickly look up a bash command.") | |
parser.add_argument("--prompt", help="Specify a prompt") | |
parser.add_argument("--image", help="Path to image") | |
parser.add_argument("--file", help="Specify a prompt file") | |
parser.add_argument( | |
"--nolog", action="store_true", default=False, help="Do not log to file." | |
) | |
args = parser.parse_args() | |
if args.cmd: | |
cmd = args.cmd | |
prompt = f"Return the bash command to {cmd} using platform {platform.platform()}. Do not return any markdown or explanation. Do not wrap with backticks ``." | |
response = call_groq_api(prompt, "llama3-70b-8192") | |
print(response) | |
quit() | |
elif args.prompt: | |
prompt = args.prompt | |
elif args.file: | |
prompt = open(args.file).read() | |
else: | |
prompt = prompt_from_file() | |
if args.image: | |
service_options = list(SERVICES_VIS.keys()) | |
else: | |
service_options = list(SERVICES.keys()) | |
service_menu = TerminalMenu(service_options, multi_select=True) | |
for service_menu_entry_index in service_menu.show(): | |
service_selection = service_options[service_menu_entry_index] | |
if args.image: | |
model_options = SERVICES_VIS[service_selection] | |
else: | |
model_options = SERVICES[service_selection] | |
model_menu = TerminalMenu(model_options) | |
model_menu_entry_index = model_menu.show() | |
model_selection = model_options[model_menu_entry_index] | |
print("Using model:", model_selection) | |
match service_selection: | |
case "gemini": | |
response = call_gemini_api(prompt, model_selection, args.image) | |
case "groq": | |
response = call_groq_api(prompt, model_selection) | |
case "claude": | |
response = call_claude_api(prompt, model_selection, args.image) | |
case "openai": | |
response = call_openai_api(prompt, model_selection, args.image) | |
case "replicate": | |
response = call_replicate_api(prompt, model_selection, args.image) | |
case "sambaNova": | |
response = call_samba_nova_api(prompt, model_selection, args.image) | |
case "deepseek": | |
response = call_deepseek_api(prompt, model_selection, args.image) | |
case "xai": | |
response = call_xai_api(prompt, model_selection, args.image) | |
case _: | |
print("Invalid model.") | |
print(response) | |
if not args.nolog: | |
log_response(model_selection, prompt, response) | |
if __name__ == "__main__": | |
main() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
pyperclip | |
simple-term-menu | |
click | |
groq | |
openai | |
anthropic | |
Pillow | |
google-generativeai | |
vertexai | |
replicate |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment