Created
May 4, 2025 22:03
-
-
Save secemp9/f10f3c1bf6b5f41346d4f95b994a393e to your computer and use it in GitHub Desktop.
woa gemini
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python3 | |
import sys | |
import os | |
from google import genai | |
from google.genai import types | |
from PIL import Image | |
import readline # For command history | |
def print_usage(): | |
"""Print usage instructions.""" | |
print("Usage: python gemini_cli.py [options] or python gemini_cli.py \"your prompt here\"") | |
print("\nOptions:") | |
print(" -k, --api-key KEY Gemini API key (or set GEMINI_API_KEY environment variable)") | |
print(" -m, --model MODEL Gemini model to use (default: gemini-2.0-flash)") | |
print(" -p, --prompt TEXT Text prompt to send to the model") | |
print(" -f, --prompt-file FILE Files containing prompts (can list multiple files after this flag)") | |
print(" -i, --image FILE Path to an image file to include with the prompt") | |
print(" -s, --stream Use streaming response") | |
print(" -c, --chat Use interactive chat mode (streaming enabled by default)") | |
print(" -n, --no-stream Disable streaming (can be used with --chat to override default streaming)") | |
print(" -y, --system TEXT System instructions for the model") | |
print(" -t, --temperature VALUE Temperature for text generation (default: 0.7)") | |
print(" -x, --max-tokens VALUE Maximum output tokens (default: 1024)") | |
print(" -h, --help Display this help message and exit") | |
def parse_args(): | |
"""Parse command line arguments using sys.argv.""" | |
args = { | |
"api_key": "token_here", | |
"model": "gemini-2.0-flash-lite", | |
"prompt": None, | |
"prompt_files": [], | |
"image": None, | |
"stream": False, | |
"chat": False, | |
"no_stream": False, | |
"system": None, | |
"temperature": 0.7, | |
"max_tokens": 8024 | |
} | |
positional_args = [] | |
i = 1 | |
while i < len(sys.argv): | |
arg = sys.argv[i] | |
if arg in ["-h", "--help"]: | |
print_usage() | |
sys.exit(0) | |
elif (arg in ["-k", "--api-key"]) and i + 1 < len(sys.argv): | |
args["api_key"] = sys.argv[i + 1] | |
i += 2 | |
elif (arg in ["-m", "--model"]) and i + 1 < len(sys.argv): | |
args["model"] = sys.argv[i + 1] | |
i += 2 | |
elif (arg in ["-p", "--prompt"]) and i + 1 < len(sys.argv): | |
args["prompt"] = sys.argv[i + 1] | |
i += 2 | |
elif arg in ["-f", "--prompt-file"]: | |
# Collect all arguments until the next flag | |
i += 1 | |
while i < len(sys.argv) and not sys.argv[i].startswith("-"): | |
args["prompt_files"].append(sys.argv[i]) | |
i += 1 | |
elif (arg in ["-i", "--image"]) and i + 1 < len(sys.argv): | |
args["image"] = sys.argv[i + 1] | |
i += 2 | |
elif (arg in ["-y", "--system"]) and i + 1 < len(sys.argv): | |
args["system"] = sys.argv[i + 1] | |
i += 2 | |
elif (arg in ["-t", "--temperature"]) and i + 1 < len(sys.argv): | |
try: | |
args["temperature"] = float(sys.argv[i + 1]) | |
except ValueError: | |
print(f"Error: Invalid temperature value: {sys.argv[i + 1]}") | |
sys.exit(1) | |
i += 2 | |
elif (arg in ["-x", "--max-tokens"]) and i + 1 < len(sys.argv): | |
try: | |
args["max_tokens"] = int(sys.argv[i + 1]) | |
except ValueError: | |
print(f"Error: Invalid max tokens value: {sys.argv[i + 1]}") | |
sys.exit(1) | |
i += 2 | |
elif arg in ["-s", "--stream"]: | |
args["stream"] = True | |
i += 1 | |
elif arg in ["-n", "--no-stream"]: | |
args["no_stream"] = True | |
i += 1 | |
elif arg in ["-c", "--chat"]: | |
args["chat"] = True | |
i += 1 | |
elif arg.startswith("-"): | |
print(f"Error: Unknown argument: {arg}") | |
print_usage() | |
sys.exit(1) | |
else: | |
# This is a positional argument (no flag) | |
positional_args.append(arg) | |
i += 1 | |
# If we have positional arguments and no prompt set via --prompt, use the first one as prompt | |
if positional_args and args["prompt"] is None: | |
args["prompt"] = positional_args[0] | |
# If chat mode is enabled, enable streaming by default unless --no-stream is used | |
if args["chat"] and not args["no_stream"]: | |
args["stream"] = True | |
return args | |
def get_multiline_input(): | |
""" | |
Get multiline input from the user. | |
Handles pasted text with newlines properly. | |
Submit the input by pressing Ctrl+D (EOF) or by entering a single '.' on a line. | |
""" | |
print("You > ", end="", flush=True) | |
lines = [] | |
# Set up readline to handle pasted input properly | |
old_delims = readline.get_completer_delims() | |
readline.set_completer_delims(old_delims.replace('\n', '')) | |
try: | |
while True: | |
try: | |
line = input() if not lines else input("... ") | |
# Check for termination condition: single '.' on a line | |
if line.strip() == "." and lines: | |
# Remove the termination marker | |
break | |
lines.append(line) | |
except EOFError: # User pressed Ctrl+D | |
print() # Add a newline for better formatting | |
break | |
finally: | |
# Restore original readline settings | |
readline.set_completer_delims(old_delims) | |
# Join the lines with newlines to preserve the original formatting | |
return "\n".join(lines) | |
def interactive_chat(client, model, config, stream=True): | |
"""Run an interactive chat session similar to IPython.""" | |
print(f"\n=== Gemini Interactive Chat ({model}) ===") | |
print("Type your messages and press Enter. For multi-line input:") | |
print("- Paste text with newlines (they will be preserved)") | |
print("- End input with a single '.' on a new line or Ctrl+D\n") | |
print("Use 'exit', 'quit', or 'bye' to end the session.\n") | |
# Create a chat session | |
chat = client.chats.create(model=model) | |
history = [] | |
try: | |
while True: | |
# Get multiline user input | |
user_input = get_multiline_input() | |
# Check for exit commands | |
if user_input.lower() in ["exit", "quit", "bye"]: | |
print("Exiting chat session.") | |
break | |
# Skip empty inputs | |
if not user_input.strip(): | |
continue | |
try: | |
# Send the message to Gemini | |
if stream: | |
print("\nGemini > ", end="") | |
response = chat.send_message_stream(user_input) | |
full_response = "" | |
for chunk in response: | |
print(chunk.text, end="") | |
full_response += chunk.text | |
print("\n") | |
else: | |
response = chat.send_message(user_input) | |
print("\nGemini > ", response.text, "\n") | |
full_response = response.text | |
# Add to history for potential future use | |
history.append({"user": user_input, "model": full_response}) | |
except Exception as e: | |
print(f"\nError: {e}\n") | |
except KeyboardInterrupt: | |
print("\nExiting chat session.") | |
return history | |
def main(): | |
# Parse command line arguments | |
args = parse_args() | |
# Get API key from args or environment | |
api_key = args["api_key"] or os.environ.get("GEMINI_API_KEY") | |
if not api_key: | |
print("Error: Gemini API key is required. Provide it using -k/--api-key or set GEMINI_API_KEY environment variable.") | |
sys.exit(1) | |
# Initialize client | |
client = genai.Client(api_key=api_key) | |
# Prepare generation config | |
config = types.GenerateContentConfig( | |
temperature=args["temperature"], | |
max_output_tokens=args["max_tokens"] | |
) | |
# Add system instructions if specified | |
if args["system"]: | |
config.system_instruction = args["system"] | |
# If chat mode is enabled without a prompt, run the interactive chat | |
if args["chat"] and not args["prompt"] and not args["prompt_files"]: | |
try: | |
interactive_chat(client, args["model"], config, stream=args["stream"]) | |
return | |
except Exception as e: | |
print(f"Error in chat mode: {e}") | |
sys.exit(1) | |
# Get prompt from args and/or files | |
prompt_text = args["prompt"] | |
file_contents = [] | |
# Process all provided prompt files | |
for file_path in args["prompt_files"]: | |
try: | |
with open(file_path, 'r') as file: | |
file_content = file.read() | |
# Format with filename as header | |
formatted_content = f"# {file_path}\n{file_content}\n" | |
file_contents.append(formatted_content) | |
except Exception as e: | |
print(f"Error reading prompt file {file_path}: {e}") | |
sys.exit(1) | |
# Combine prompt and file contents | |
if file_contents: | |
combined_file_content = "\n".join(file_contents) | |
if prompt_text: | |
prompt = f"{prompt_text}\n\n{combined_file_content}" | |
else: | |
prompt = combined_file_content | |
elif prompt_text: | |
prompt = prompt_text | |
else: | |
print("Error: Prompt is required. Provide it using -p/--prompt, as a positional argument, or with -f/--prompt-file.") | |
sys.exit(1) | |
# Prepare contents | |
contents = [] | |
# Add image if specified | |
if args["image"]: | |
try: | |
image = Image.open(args["image"]) | |
contents.append(image) | |
except Exception as e: | |
print(f"Error loading image: {e}") | |
sys.exit(1) | |
# Add prompt | |
contents.append(prompt) | |
try: | |
if args["chat"]: | |
# Chat mode with initial prompt | |
chat = client.chats.create(model=args["model"]) | |
if args["stream"]: | |
# Streaming chat | |
response = chat.send_message_stream(prompt) | |
for chunk in response: | |
print(chunk.text, end="") | |
print() # Add newline at the end | |
else: | |
# Regular chat | |
response = chat.send_message(prompt) | |
print(response.text) | |
else: | |
# Non-chat mode | |
if args["stream"]: | |
# Streaming response | |
response = client.models.generate_content_stream( | |
model=args["model"], | |
contents=contents, | |
config=config | |
) | |
for chunk in response: | |
print(chunk.text, end="") | |
print() # Add newline at the end | |
else: | |
# Regular response | |
response = client.models.generate_content( | |
model=args["model"], | |
contents=contents, | |
config=config | |
) | |
print(response.text) | |
except Exception as e: | |
print(f"Error: {e}") | |
sys.exit(1) | |
if __name__ == "__main__": | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment