|
import os |
|
import sys |
|
from dotenv import load_dotenv, set_key |
|
from langchain_openai import ChatOpenAI |
|
from langchain.prompts import ChatPromptTemplate, PromptTemplate |
|
from langchain_core.runnables import RunnableSequence |
|
from langchain.tools import Tool |
|
from langchain.agents import create_react_agent, AgentExecutor |
|
from langchain.schema import HumanMessage |
|
import getpass |
|
import requests |
|
|
|
# Load environment variables |
|
load_dotenv() |
|
|
|
# Check if the WeatherAPI token is set in the environment |
|
WEATHER_API_KEY = os.getenv('WEATHER_API_KEY') |
|
|
|
# Check if the OpenAI token is set in the environment |
|
if not os.getenv('OPENAI_API_KEY'): |
|
# Prompt user for OpenAI token and save in .env file if not set |
|
openai_token = getpass.getpass(prompt="Enter your OpenAI token: ") |
|
set_key('.env', 'OPENAI_API_KEY', openai_token) |
|
load_dotenv() # Reload to include the newly set token |
|
|
|
# Initialize our language model |
|
if not WEATHER_API_KEY: |
|
# Prompt user for WeatherAPI token and save in .env file if not set |
|
weather_api_token = getpass.getpass(prompt="Enter your WeatherAPI token: ") |
|
set_key('.env', 'WEATHER_API_KEY', weather_api_token) |
|
WEATHER_API_KEY = weather_api_token |
|
load_dotenv() # Reload to include the newly set token |
|
|
|
llm = ChatOpenAI(model="gpt-4o-mini") |
|
|
|
# Define a simple function to use as a tool |
|
def get_current_weather(location): |
|
api_url = f"http://api.weatherapi.com/v1/current.json?key={WEATHER_API_KEY}&q={location}" |
|
response = requests.get(api_url) |
|
if response.status_code == 200: |
|
data = response.json() |
|
return f"The weather in {location} is {data['current']['condition']['text']} and {data['current']['temp_c']}\u00b0C.\n" |
|
else: |
|
return f"Failed to get weather for {location}.\n" |
|
|
|
# Create a generic tool |
|
tools = [ |
|
Tool( |
|
name="Weather Checker", |
|
func=get_current_weather, |
|
description="Useful for getting the current weather in a specific location" |
|
) |
|
] |
|
|
|
# Function to load prompt from file |
|
def load_prompt_template(file_path): |
|
with open(file_path, 'r') as file: |
|
return file.read() |
|
|
|
# Load the React prompt template from file |
|
react_prompt_template = load_prompt_template('tutorial.react-prompt.md') |
|
|
|
# Create the PromptTemplate |
|
react_prompt = PromptTemplate.from_template(react_prompt_template) |
|
|
|
# Create the agent |
|
agent = create_react_agent(llm, tools, react_prompt) |
|
|
|
# Create an agent executor |
|
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, handle_parsing_errors=True) |
|
|
|
# Create a prompt template for the main query |
|
prompt = ChatPromptTemplate.from_template( |
|
"Answer the following question: {question}" |
|
) |
|
|
|
# Print the prompt for debugging |
|
print(f"Prompt: {prompt}") |
|
|
|
def safe_invoke_agent(prompt_result): |
|
# Extract content from the model's output |
|
content = prompt_result.to_messages()[0].content |
|
|
|
# Validation: Check if the content is valid |
|
if not content or "invalid" in content.lower(): |
|
raise ValueError("The model produced invalid content.") |
|
|
|
# Proceed with invoking the agent |
|
return agent_executor.invoke({ |
|
"input": content, |
|
"tools": tools, |
|
"tool_names": ", ".join([tool.name for tool in tools]), |
|
"question": "What's the weather like in Tokyo?", |
|
"agent_scratchpad": "" |
|
}) |
|
|
|
# Create our RunnableSequence |
|
sequence = RunnableSequence( |
|
prompt, |
|
safe_invoke_agent |
|
) |
|
|
|
# Print the sequence for debugging |
|
print(f"Sequence: {sequence}") |
|
|
|
def main(): |
|
# Use the sequence |
|
print("Invoking sequence with question: What's the weather like in Tokyo?") |
|
result = sequence.invoke({ |
|
"question": "What's the weather like in Tokyo?" |
|
}) |
|
print(f"Sequence result: {result}") |
|
|
|
print("Answer:") |
|
print(result['output']) |
|
|
|
if __name__ == "__main__": |
|
main() |