Skip to content

Instantly share code, notes, and snippets.

@harijay
Created January 21, 2026 21:07
Show Gist options
  • Select an option

  • Save harijay/9e757bc8fa9e5a801389a173bd46a21b to your computer and use it in GitHub Desktop.

Select an option

Save harijay/9e757bc8fa9e5a801389a173bd46a21b to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "markdown",
"id": "2d2b118d-66db-4f8c-9d45-2909508acec0",
"metadata": {},
"source": [
"# Learn basics of tool use with Google Genai Python SDK"
]
},
{
"cell_type": "code",
"execution_count": 63,
"id": "dbe163fc-83b9-44fc-bf49-0764bd67ee76",
"metadata": {},
"outputs": [],
"source": [
"from google import genai\n",
"from google.genai import types"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "1b33d9ca-516e-4a65-a1a8-c613201e0bf5",
"metadata": {
"editable": true,
"slideshow": {
"slide_type": ""
},
"tags": []
},
"outputs": [],
"source": [
"from datetime import datetime\n",
"from zoneinfo import ZoneInfo\n",
"def get_current_time(timezone_str: str) -> str:\n",
" timezone = ZoneInfo(timezone_str)\n",
" return datetime.now(timezone).strftime(\"%D:%H:%M:%S\")"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "2d856b0c-71d1-4817-917d-ab4a2fd4f898",
"metadata": {},
"outputs": [],
"source": [
"def get_time_us_east() -> str:\n",
" return datetime.now(ZoneInfo(\"America/New_York\")).strftime(\"%H:%M:%S\")"
]
},
{
"cell_type": "code",
"execution_count": 66,
"id": "92f28bd5-c98f-44be-9708-3af277f51d0f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'01/21/26:15:11:58'"
]
},
"execution_count": 66,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"get_current_time(\"America/New_York\")"
]
},
{
"cell_type": "code",
"execution_count": 67,
"id": "5eae36a7-8501-4ebb-a368-84f75346d796",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"GEMINI_API_KEY = os.getenv(\"GEMINI_API_KEY\")\n",
"my_client = genai.Client(api_key=GEMINI_API_KEY)"
]
},
{
"cell_type": "code",
"execution_count": 68,
"id": "b87e3562-599a-4dfd-8045-bd92c1d71fb3",
"metadata": {},
"outputs": [],
"source": [
"my_config = types.GenerateContentConfig(\n",
" tools = [get_current_time],\n",
" system_instruction = \"Return information strictly as a single string\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 69,
"id": "45617aee-f82f-4e68-9284-f099024d8c75",
"metadata": {},
"outputs": [],
"source": [
"response = my_client.models.generate_content(\n",
" model = \"gemini-2.5-flash\",\n",
" contents = [\"What is the current time in Boston?\"],\n",
" config = my_config)"
]
},
{
"cell_type": "code",
"execution_count": 70,
"id": "781e0487-3e47-4453-80a8-d7c95d3e3b58",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The current time in Boston is 15:12:02 on 01/21/26.\n"
]
}
],
"source": [
"print(response.text)"
]
},
{
"cell_type": "markdown",
"id": "52ae1a00-bc74-4ce7-8cf7-6c9904dc4b7f",
"metadata": {},
"source": [
"# Now we try this exactly as indicated in the course"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "2552dc82-dbe7-4955-a761-7f964bc6fc90",
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import os\n",
"\n",
"import aisuite as ai\n",
"\n",
"import display_functions"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "102edbe4-59c8-4b81-bcca-0e788b769e87",
"metadata": {},
"outputs": [],
"source": [
"config = {\n",
" \"openai_api_key\" : os.getenv(\"OPENAI_API_KEY\"),\n",
" \"perplexity_api_key\" : os.getenv(\"PERPLEXITY_API_KEY\")\n",
"}\n",
"myaisuite_client = ai.Client()"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "1c4bfe73-eb30-48b8-a55e-9ac724450785",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<aisuite.client.Client at 0x7269381156a0>"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"myaisuite_client"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "894d0175-84e5-40fb-bfb0-99fe2cffb1a3",
"metadata": {},
"outputs": [],
"source": [
"from dotenv import load_dotenv\n",
"_ = load_dotenv()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "b6b1acc0-d767-4882-9cc0-941e01478d2b",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"_"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "0c81b937-78a2-40cf-807c-d5bc78b6d90b",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The current time in Boston is 3:34 PM on January 26, 2021.\n"
]
}
],
"source": [
"\n",
"prompt = \"What time is it in Boston?\"\n",
"response = myaisuite_client.chat.completions.create(\n",
" model = \"openai:gpt-4o\",\n",
" messages = [{\n",
" \"role\" : \"user\",\n",
" \"content\" : prompt\n",
" }],\n",
" tools = [get_current_time],\n",
" max_turns = 5\n",
" )\n",
"print(response.choices[0].message.content)\n"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "dd58454d-8e7d-4a99-ad02-ad926b5a78f1",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Role: assistant, Content: None\n",
"Role: tool, Content: \"01/21/26:15:34:08\"\n",
"Role: assistant, Content: The current time in Boston is 3:34 PM on January 26, 2021.\n"
]
}
],
"source": [
"# This safely handles both ChatCompletionMessage objects and standard dictionaries\n",
"for msg in response.choices[0].intermediate_messages:\n",
" if hasattr(msg, 'role'):\n",
" # It's a ChatCompletionMessage object (use dot notation)\n",
" role = msg.role\n",
" content = msg.content\n",
" else:\n",
" # It's a tool result dictionary (use square brackets)\n",
" role = msg['role']\n",
" content = msg.get('content')\n",
" \n",
" print(f\"Role: {role}, Content: {content}\")"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "736722cc-5b1f-449d-b6b5-f81e02e30bc8",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"ChatCompletion(id='chatcmpl-D0ZBwPdLDLVWNRd2gmBr1EtyFlTvn', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='The current time in Boston is 3:34 PM on January 26, 2021.', refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=None), intermediate_messages=[ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageFunctionToolCall(id='call_mccVzsBXYezlMaIK28v87sH7', function=Function(arguments='{\"timezone_str\":\"America/New_York\"}', name='get_current_time'), type='function')]), {'role': 'tool', 'name': 'get_current_time', 'content': '\"01/21/26:15:34:08\"', 'tool_call_id': 'call_mccVzsBXYezlMaIK28v87sH7'}, ChatCompletionMessage(content='The current time in Boston is 3:34 PM on January 26, 2021.', refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=None)])], created=1769027648, model='gpt-4o-2024-08-06', object='chat.completion', service_tier='default', system_fingerprint='fp_deacdd5f6f', usage=CompletionUsage(completion_tokens=21, prompt_tokens=87, total_tokens=108, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)), intermediate_responses=[ChatCompletion(id='chatcmpl-D0ZBvqL2clyDjoXAlDZSOE8p4gdb3', choices=[Choice(finish_reason='tool_calls', index=0, logprobs=None, message=ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageFunctionToolCall(id='call_mccVzsBXYezlMaIK28v87sH7', function=Function(arguments='{\"timezone_str\":\"America/New_York\"}', name='get_current_time'), type='function')]))], created=1769027647, model='gpt-4o-2024-08-06', object='chat.completion', service_tier='default', system_fingerprint='fp_deacdd5f6f', usage=CompletionUsage(completion_tokens=19, prompt_tokens=46, total_tokens=65, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))])\n"
]
}
],
"source": [
"print(response)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "f5522ab1-fde0-4bde-b377-d48b152fd5f2",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Role: assistant | Calling Tool: get_current_time | Args: {\"timezone_str\":\"America/New_York\"}\n",
"Role: tool | Tool Used: get_current_time | Result: \"01/21/26:15:34:08\"\n",
"Role: assistant | Content: The current time in Boston is 3:34 PM on January 26, 2021.\n"
]
}
],
"source": [
"for msg in response.choices[0].intermediate_messages:\n",
" if hasattr(msg, 'role'):\n",
" # It's an assistant message (ChatCompletionMessage object)\n",
" role = msg.role\n",
" content = msg.content\n",
" \n",
" # If the model is calling a tool, content is usually None. \n",
" # We must look into .tool_calls for the details.\n",
" if msg.tool_calls:\n",
" for tool_call in msg.tool_calls:\n",
" func_name = tool_call.function.name\n",
" args = tool_call.function.arguments\n",
" print(f\"Role: {role} | Calling Tool: {func_name} | Args: {args}\")\n",
" elif content:\n",
" print(f\"Role: {role} | Content: {content}\")\n",
" \n",
" else:\n",
" # It's a tool result (standard dictionary)\n",
" role = msg['role']\n",
" content = msg.get('content')\n",
" # Tool results often have the function name for clarity\n",
" tool_name = msg.get('name', 'unknown tool')\n",
" print(f\"Role: {role} | Tool Used: {tool_name} | Result: {content}\")"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "272674b1-ec05-4777-99c2-bc061293a787",
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
" <div style=\"border-left: 4px solid #444; margin: 10px 0; padding: 10px; background: #f0f0f0;\">\n",
" <strong style=\"color:#222;\">🧠 LLM Action:</strong> <code>get_current_time</code>\n",
" <pre style=\"color:#000; font-size:13px;\">{\n",
" \"timezone_str\": \"America/New_York\"\n",
"}</pre>\n",
" </div>\n",
" \n",
" <div style=\"border-left: 4px solid #007bff; margin: 10px 0; padding: 10px; background: #eef6ff;\">\n",
" <strong style=\"color:#222;\">🔧 Tool Response:</strong> <code>get_current_time</code>\n",
" <pre style=\"color:#000; font-size:13px;\">\"01/21/26:15:34:08\"</pre>\n",
" </div>\n",
" \n",
" <div style=\"border-left: 4px solid #28a745; margin: 20px 0; padding: 10px; background: #eafbe7;\">\n",
" <strong style=\"color:#222;\">✅ Final Assistant Message:</strong>\n",
" <p style=\"color:#000;\">The current time in Boston is 3:34 PM on January 26, 2021.</p>\n",
" </div>\n",
" \n",
" <div style=\"border-left: 4px solid #666; margin: 20px 0; padding: 10px; background: #f8f9fa;\">\n",
" <strong style=\"color:#222;\">🧭 Tool Sequence:</strong>\n",
" <p style=\"color:#000;\">get_current_time</p>\n",
" </div>\n",
" "
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display_functions.pretty_print_chat_completion(response)"
]
},
{
"cell_type": "markdown",
"id": "d336f851-3aff-4e66-84c5-f4cb04504b40",
"metadata": {},
"source": [
"# Manual Tools config"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "77da6e0d-c36c-4e83-a76b-9a257118b1d8",
"metadata": {},
"outputs": [],
"source": [
"def time_simple():\n",
" tz = ZoneInfo(\"America/New_York\")\n",
" return datetime.now(tz).strftime(\"%H:%M:%S\")"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "6cbe2cc0-7277-4bf4-8482-e19ceede5b5b",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'15:34:11'"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"time_simple()"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "a9e9929d-0e87-4c23-8a91-edfbfe6fb4c6",
"metadata": {},
"outputs": [],
"source": [
"# Manual tool schema - matches the time_simple function (no parameters)\n",
"# This is how M3_UGL_1.ipynb does it for get_current_time()\n",
"manual_tools_schema = [{\n",
" \"type\": \"function\",\n",
" \"function\": {\n",
" \"name\": \"time_simple\",\n",
" \"description\": \"Returns the current time in US Eastern timezone as a string.\",\n",
" \"parameters\": {}\n",
" }\n",
"}]"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "fd3bdd20-a4c8-42da-9330-05f7eb196bbf",
"metadata": {},
"outputs": [],
"source": [
"prompt = \"What is the current time in Boston, MA?\"\n",
"messages = [{\"role\": \"user\", \"content\": prompt}]\n",
"\n",
"# 2. Call the model with MANUAL tool schema (not the function!)\n",
"# When NOT using max_turns, you must pass the schema dict, not the function\n",
"response = myaisuite_client.chat.completions.create(\n",
" model=\"openai:gpt-4o\",\n",
" messages=messages,\n",
" tools=manual_tools_schema, # <-- Use the schema, not [get_current_time]\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "bd7db7f0-d3f3-432d-adcd-9c66f81dad40",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"id\": \"chatcmpl-D0ZC3xouGHPKDGnyjgWQPvhLBAVHi\",\n",
" \"choices\": [\n",
" {\n",
" \"finish_reason\": \"tool_calls\",\n",
" \"index\": 0,\n",
" \"logprobs\": null,\n",
" \"message\": {\n",
" \"content\": null,\n",
" \"refusal\": null,\n",
" \"role\": \"assistant\",\n",
" \"annotations\": [],\n",
" \"audio\": null,\n",
" \"function_call\": null,\n",
" \"tool_calls\": [\n",
" {\n",
" \"id\": \"call_sStxeH1xFHm7jZCHhEsSj6Uf\",\n",
" \"function\": {\n",
" \"arguments\": \"{}\",\n",
" \"name\": \"time_simple\"\n",
" },\n",
" \"type\": \"function\"\n",
" }\n",
" ]\n",
" }\n",
" }\n",
" ],\n",
" \"created\": 1769027655,\n",
" \"model\": \"gpt-4o-2024-08-06\",\n",
" \"object\": \"chat.completion\",\n",
" \"service_tier\": \"default\",\n",
" \"system_fingerprint\": \"fp_deacdd5f6f\",\n",
" \"usage\": {\n",
" \"completion_tokens\": 10,\n",
" \"prompt_tokens\": 53,\n",
" \"total_tokens\": 63,\n",
" \"completion_tokens_details\": {\n",
" \"accepted_prediction_tokens\": 0,\n",
" \"audio_tokens\": 0,\n",
" \"reasoning_tokens\": 0,\n",
" \"rejected_prediction_tokens\": 0\n",
" },\n",
" \"prompt_tokens_details\": {\n",
" \"audio_tokens\": 0,\n",
" \"cached_tokens\": 0\n",
" }\n",
" }\n",
"}\n"
]
}
],
"source": [
"print(json.dumps(response.model_dump(), indent=2, default=str))"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "d8e38fd0-fe17-4191-ab1a-50e570e58d98",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The current time in Boston, MA is 3:34 PM.\n"
]
}
],
"source": [
"response2 = None\n",
"\n",
"# Create a condition in case tool_calls is in response object\n",
"if response.choices[0].message.tool_calls:\n",
" # Pull out the specific tool metadata from the response\n",
" tool_call = response.choices[0].message.tool_calls[0]\n",
" \n",
" # Run the tool locally - time_simple has no arguments\n",
" tool_result = time_simple()\n",
"\n",
" # Append the result to the messages list\n",
" messages.append(response.choices[0].message)\n",
" messages.append({\n",
" \"role\": \"tool\", \"tool_call_id\": tool_call.id, \"content\": str(tool_result)\n",
" })\n",
"\n",
" # Send the list of messages with the newly appended results back to the LLM\n",
" response2 = myaisuite_client.chat.completions.create(\n",
" model=\"openai:gpt-4o\",\n",
" messages=messages,\n",
" tools=manual_tools_schema,\n",
" )\n",
"\n",
" print(response2.choices[0].message.content)\n",
"else:\n",
" print(\"No tool_calls in response. Check that the schema was sent correctly.\")\n",
" print(f\"Response content: {response.choices[0].message.content}\")"
]
},
{
"cell_type": "markdown",
"id": "2kmdgbidbhy",
"metadata": {},
"source": [
"# Manual Tools with Parameters\n",
"\n",
"The previous example used `time_simple()` which has no parameters. Now let's use `get_current_time(timezone_str)` which requires an argument. This demonstrates how to:\n",
"1. Define a schema with parameter specifications\n",
"2. Extract arguments from the LLM's tool call\n",
"3. Execute the function with those arguments"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "gkiutpdw6n6",
"metadata": {},
"outputs": [],
"source": [
"# Manual tool schema WITH parameters\n",
"# This matches our get_current_time(timezone_str) function\n",
"manual_tools_with_params = [{\n",
" \"type\": \"function\",\n",
" \"function\": {\n",
" \"name\": \"get_current_time\",\n",
" \"description\": \"Returns the current time for a specific timezone.\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"timezone_str\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The IANA timezone string, e.g., 'America/New_York', 'Europe/London', 'Asia/Tokyo'\"\n",
" }\n",
" },\n",
" \"required\": [\"timezone_str\"]\n",
" }\n",
" }\n",
"}]"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "c4tjwiw13w",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"finish_reason: tool_calls\n",
"tool_calls: [ChatCompletionMessageFunctionToolCall(id='call_tUZMhjQDTAQXchDOV48lDSfj', function=Function(arguments='{\"timezone_str\":\"America/New_York\"}', name='get_current_time'), type='function')]\n"
]
}
],
"source": [
"# Step 1: Make the initial API call with manual schema\n",
"prompt_tz = \"What is the current time in Cambridge, Massachusetts, USA?\"\n",
"messages_tz = [{\"role\": \"user\", \"content\": prompt_tz}]\n",
"\n",
"response_tz = myaisuite_client.chat.completions.create(\n",
" model=\"openai:gpt-4o\",\n",
" messages=messages_tz,\n",
" tools=manual_tools_with_params, # Pass the schema dict\n",
")\n",
"\n",
"# Check the response - should have finish_reason=\"tool_calls\"\n",
"print(f\"finish_reason: {response_tz.choices[0].finish_reason}\")\n",
"print(f\"tool_calls: {response_tz.choices[0].message.tool_calls}\")"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "5iwucsei2ld",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"LLM requested function: get_current_time\n",
"With arguments: {'timezone_str': 'America/New_York'}\n",
"Function result: 01/21/26:15:35:41\n"
]
}
],
"source": [
"# Step 2: Extract arguments and execute the tool manually\n",
"if response_tz.choices[0].message.tool_calls:\n",
" tool_call = response_tz.choices[0].message.tool_calls[0]\n",
" \n",
" # Parse the arguments JSON from the LLM's response\n",
" args = json.loads(tool_call.function.arguments)\n",
" print(f\"LLM requested function: {tool_call.function.name}\")\n",
" print(f\"With arguments: {args}\")\n",
" \n",
" # Execute the function with the extracted argument\n",
" tool_result = get_current_time(args['timezone_str'])\n",
" print(f\"Function result: {tool_result}\")\n",
"else:\n",
" print(\"No tool calls in response\")"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "taldltqxd5",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"=== Final Response ===\n",
"The current time in Cambridge, Massachusetts, USA is 15:35 (3:35 PM) on January 21.\n"
]
}
],
"source": [
"# Step 3: Send the tool result back to the LLM for the final response\n",
"if response_tz.choices[0].message.tool_calls:\n",
" # Append assistant's message (with tool_calls) to conversation\n",
" messages_tz.append(response_tz.choices[0].message)\n",
" \n",
" # Append the tool result\n",
" messages_tz.append({\n",
" \"role\": \"tool\",\n",
" \"tool_call_id\": tool_call.id,\n",
" \"content\": str(tool_result)\n",
" })\n",
" \n",
" # Make the second API call with the tool result\n",
" final_response = myaisuite_client.chat.completions.create(\n",
" model=\"openai:gpt-4o\",\n",
" messages=messages_tz,\n",
" tools=manual_tools_with_params,\n",
" )\n",
" \n",
" print(\"=== Final Response ===\")\n",
" print(final_response.choices[0].message.content)"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "85pcqydblm",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"London: The current time in London is 8:35 PM.\n",
"Sydney: The current time in Sydney is 07:35 AM on January 22, 2026.\n",
"New York: The current time in New York is 3:35:59 PM.\n"
]
}
],
"source": [
"# Bonus: Complete manual tool loop as a reusable pattern\n",
"def manual_tool_call(client, model, prompt, tools_schema, tool_functions):\n",
" \"\"\"\n",
" Execute a manual tool calling loop.\n",
" \n",
" Args:\n",
" client: aisuite client\n",
" model: model string (e.g., \"openai:gpt-4o\")\n",
" prompt: user prompt\n",
" tools_schema: list of tool schema dicts\n",
" tool_functions: dict mapping function names to actual functions\n",
" \n",
" Returns:\n",
" Final response content\n",
" \"\"\"\n",
" messages = [{\"role\": \"user\", \"content\": prompt}]\n",
" \n",
" response = client.chat.completions.create(\n",
" model=model,\n",
" messages=messages,\n",
" tools=tools_schema,\n",
" )\n",
" \n",
" if not response.choices[0].message.tool_calls:\n",
" return response.choices[0].message.content\n",
" \n",
" # Handle tool call\n",
" tool_call = response.choices[0].message.tool_calls[0]\n",
" func_name = tool_call.function.name\n",
" args = json.loads(tool_call.function.arguments)\n",
" \n",
" # Execute the function\n",
" func = tool_functions[func_name]\n",
" result = func(**args) if args else func()\n",
" \n",
" # Send result back\n",
" messages.append(response.choices[0].message)\n",
" messages.append({\n",
" \"role\": \"tool\",\n",
" \"tool_call_id\": tool_call.id,\n",
" \"content\": str(result)\n",
" })\n",
" \n",
" final = client.chat.completions.create(\n",
" model=model,\n",
" messages=messages,\n",
" tools=tools_schema,\n",
" )\n",
" \n",
" return final.choices[0].message.content\n",
"\n",
"# Test with different cities\n",
"tool_funcs = {\"get_current_time\": get_current_time}\n",
"\n",
"for city in [\"London\", \"Sydney\", \"New York\"]:\n",
" result = manual_tool_call(\n",
" myaisuite_client,\n",
" \"openai:gpt-4o\",\n",
" f\"What time is it in {city}?\",\n",
" manual_tools_with_params,\n",
" tool_funcs\n",
" )\n",
" print(f\"{city}: {result}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "434ed298-32f1-47b8-84de-d7e178440745",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "6d63cbd1-1944-43f6-9b3b-cff9c48b9db6",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.5"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment