Created
October 11, 2024 20:34
-
-
Save ohmeow/03a7dee8fbfb92ac42bb5ed732e23c84 to your computer and use it in GitHub Desktop.
LangGraph + Instructor
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"cells": [ | |
{ | |
"cell_type": "code", | |
"execution_count": 1, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"import os\n", | |
"from collections.abc import AsyncGenerator\n", | |
"from typing import Annotated, Any, Literal\n", | |
"from uuid import uuid4\n", | |
"\n", | |
"import aiohttp\n", | |
"import httpx\n", | |
"import instructor\n", | |
"import openai\n", | |
"from dotenv import load_dotenv\n", | |
"from langchain_core.callbacks import adispatch_custom_event\n", | |
"from langchain_core.messages import AIMessage, AnyMessage, HumanMessage, SystemMessage\n", | |
"from langchain_core.runnables import RunnableConfig\n", | |
"from langgraph.checkpoint.memory import MemorySaver\n", | |
"from langgraph.graph import StateGraph\n", | |
"from langgraph.graph.graph import CompiledGraph\n", | |
"from langgraph.graph.message import add_messages\n", | |
"from pydantic import BaseModel, Field\n", | |
"from pydantic.json_schema import SkipJsonSchema\n", | |
"from typing_extensions import TypedDict" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 2, | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"data": { | |
"text/plain": [ | |
"True" | |
] | |
}, | |
"execution_count": 2, | |
"metadata": {}, | |
"output_type": "execute_result" | |
} | |
], | |
"source": [ | |
"load_dotenv()" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"## Utility functions to convert LangChain messages to Instructor-compatible dicts and vice versa\n" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 3, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"def convert_lc_messages_to_instructor(messages: list[AnyMessage], include_additional_keys=True) -> list[dict]:\n", | |
" \"\"\"Convert LangChain messages to Instructor-compatible dicts,including any additional keys.\n", | |
"\n", | |
" Args:\n", | |
" messages (list): List of LangChain message objects.\n", | |
" include_additional_keys (bool): Whether to include additional keys.\n", | |
"\n", | |
" Returns:\n", | |
" list: List of dictionaries formatted for Instructor.\n", | |
" \"\"\"\n", | |
" instructor_messages = []\n", | |
" for msg in messages:\n", | |
" message_dict = {}\n", | |
"\n", | |
" # Map the message type to the appropriate role\n", | |
" if isinstance(msg, SystemMessage):\n", | |
" role = \"system\"\n", | |
" elif isinstance(msg, HumanMessage):\n", | |
" role = \"user\"\n", | |
" elif isinstance(msg, AIMessage):\n", | |
" role = \"assistant\"\n", | |
" else:\n", | |
" raise ValueError(f\"Unsupported message type: {type(msg)}\")\n", | |
"\n", | |
" message_dict[\"role\"] = role\n", | |
" message_dict[\"content\"] = msg.content\n", | |
"\n", | |
" if include_additional_keys:\n", | |
" # Copy all attributes except private ones and \"role\" and \"content\"\n", | |
" for key, value in vars(msg).items():\n", | |
" if not key.startswith(\"_\") and key not in (\"role\", \"content\"):\n", | |
" message_dict[key] = value\n", | |
"\n", | |
" instructor_messages.append(message_dict)\n", | |
"\n", | |
" return instructor_messages" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 4, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"def convert_instructor_messages_to_lc(messages: list[dict]) -> list[AnyMessage]:\n", | |
" \"\"\"Convert Instructor-style messages to LangChain message objects.\n", | |
"\n", | |
" Args:\n", | |
" messages (list): List of Instructor-style message dictionaries.\n", | |
"\n", | |
" Returns:\n", | |
" list: List of LangChain message objects.\n", | |
" \"\"\"\n", | |
" langchain_messages = []\n", | |
" for msg in messages:\n", | |
" role = msg.get(\"role\")\n", | |
" content = msg.get(\"content\")\n", | |
"\n", | |
" # Extract additional keys, if any\n", | |
" additional_keys = {k: v for k, v in msg.items() if k not in (\"role\", \"content\")}\n", | |
"\n", | |
" if role == \"system\":\n", | |
" message_obj = SystemMessage(content=content, **additional_keys)\n", | |
" elif role == \"user\":\n", | |
" message_obj = HumanMessage(content=content, **additional_keys)\n", | |
" elif role == \"assistant\":\n", | |
" message_obj = AIMessage(content=content, **additional_keys)\n", | |
" else:\n", | |
" raise ValueError(f\"Unsupported role: {role}\")\n", | |
"\n", | |
" langchain_messages.append(message_obj)\n", | |
"\n", | |
" return langchain_messages" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"## Tools\n" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 5, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"class BaseTool(BaseModel):\n", | |
" name: str = Field(..., description=\"The name of the tool\")\n", | |
" tool_id: SkipJsonSchema[str] = Field(default_factory=lambda: str(uuid4()), description=\"Unique identifier for the tool\")\n", | |
"\n", | |
" async def ainvoke(self, args: Any = None) -> dict | str: # noqa: D102\n", | |
" return self.model_dump()" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 6, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"# Chat tool\n", | |
"class AssistantResponse(BaseModel):\n", | |
" \"\"\"The response from the assistant.\n", | |
"\n", | |
" Always respond in natural language!\n", | |
" If you are being asked a question to told to provide them with information, preface the response with what information you are about to provide them.\n", | |
" NEVER start off with \"Here are the results from the agent:\"! Make the user feel like you are a human responding to their question.\n", | |
"\n", | |
" \"\"\" # noqa: W505, E501\n", | |
"\n", | |
" assistant_response: str = Field(..., description=\"The response from the assistant. Be concise!\")\n", | |
"\n", | |
"\n", | |
"class LLMCall(BaseTool):\n", | |
" \"\"\"Use this tool to handle any general statements or questions.\"\"\"\n", | |
"\n", | |
" name: SkipJsonSchema[Literal[\"text-stream\"]] = Field(\"text-stream\", description=\"The name for the streaming text tool\")\n", | |
" user_input: str = Field(..., description=\"The user input to the LLMCall tool\")\n", | |
"\n", | |
" async def ainvoke(self, messages: list[AnyMessage] = [], args: Any = None) -> AsyncGenerator[dict, None]: # noqa: D102\n", | |
" client = instructor.from_openai(openai.AsyncOpenAI())\n", | |
"\n", | |
" messages = messages + [HumanMessage(content=self.user_input)]\n", | |
" rsp_stream = await client.chat.completions.create(\n", | |
" model=\"gpt-4\",\n", | |
" messages=convert_lc_messages_to_instructor(messages, include_additional_keys=False),\n", | |
" stream=True,\n", | |
" response_model=None,\n", | |
" )\n", | |
"\n", | |
" async for chunk in rsp_stream:\n", | |
" if chunk.choices and chunk.choices[0].delta.content is not None:\n", | |
" yield {**self.model_dump(), \"content\": chunk.choices[0].delta.content}" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 7, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"# Github Repo Tool\n", | |
"class GithubRepo(BaseTool):\n", | |
" \"\"\"Use this tool to get information about a GitHub repository.\"\"\"\n", | |
"\n", | |
" name: SkipJsonSchema[Literal[\"github-repo\"]] = Field(\"github-repo\", description=\"The name of the tool\")\n", | |
" owner: str = Field(..., description=\"The name of the repository owner.\")\n", | |
" repo: str = Field(..., description=\"The name of the repository.\")\n", | |
"\n", | |
" async def ainvoke(self, args: Any = None) -> dict | str: # noqa: D102\n", | |
" if not self.owner or not self.repo:\n", | |
" raise ValueError(\"Both owner and repo must have values\")\n", | |
"\n", | |
" if not os.environ.get(\"GITHUB_TOKEN\"):\n", | |
" raise ValueError(\"Missing GITHUB_TOKEN secret.\")\n", | |
"\n", | |
" headers = {\n", | |
" \"Accept\": \"application/vnd.github+json\",\n", | |
" \"Authorization\": f\"Bearer {os.environ['GITHUB_TOKEN']}\",\n", | |
" \"X-GitHub-Api-Version\": \"2022-11-28\",\n", | |
" }\n", | |
"\n", | |
" url = f\"https://api.github.com/repos/{self.owner}/{self.repo}\"\n", | |
"\n", | |
" async with httpx.AsyncClient() as client:\n", | |
" try:\n", | |
" response = await client.get(url, headers=headers)\n", | |
" response.raise_for_status()\n", | |
" repo_data = response.json()\n", | |
" return {\n", | |
" **self.model_dump(),\n", | |
" \"description\": repo_data.get(\"description\", \"\"),\n", | |
" \"language\": repo_data.get(\"language\", \"\"),\n", | |
" \"stars\": repo_data.get(\"stargazers_count\", 0),\n", | |
" }\n", | |
"\n", | |
" except httpx.HTTPError as err:\n", | |
" print(err)\n", | |
" return \"There was an error fetching the repository. Please check the owner and repo names.\"" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 16, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"# Weather Tool\n", | |
"class Weather(BaseTool):\n", | |
" \"\"\"Use this tool to get the weather/temperature for a given city.\"\"\"\n", | |
"\n", | |
" name: SkipJsonSchema[Literal[\"weather-data\"]] = Field(\"weather-data\", description=\"The name of the tool\")\n", | |
" city: str = Field(..., description=\"The city name to get weather for\")\n", | |
" state: str = Field(..., description=\"The two letter state abbreviation to get weather for\")\n", | |
" country: str | None = Field(\"usa\", description=\"The two letter country abbreviation to get weather for\")\n", | |
"\n", | |
" async def ainvoke(self, args: Any = None) -> dict:\n", | |
" async with aiohttp.ClientSession() as session:\n", | |
" # Geocode request using OpenStreetMap Nominatim\n", | |
" geocode_url = (\n", | |
" f\"https://nominatim.openstreetmap.org/search?city={self.city}&state={self.state}&country={self.country}&format=json\"\n", | |
" )\n", | |
" print(f\"Geocode URL: {geocode_url}\") # Add this line to print the URL\n", | |
" async with session.get(geocode_url) as geocode_response:\n", | |
" if not geocode_response.ok:\n", | |
" error_text = await geocode_response.text()\n", | |
" raise ValueError(f\"Failed to get geocode data. Status: {geocode_response.status}, Response: {error_text}\")\n", | |
" geocode_data = await geocode_response.json()\n", | |
" if not geocode_data:\n", | |
" raise ValueError(f\"No geocode data found for {self.city}, {self.state}, {self.country}\")\n", | |
" latt, longt = geocode_data[0][\"lat\"], geocode_data[0][\"lon\"]\n", | |
"\n", | |
" # Weather.gov request\n", | |
" weather_gov_url = f\"https://api.weather.gov/points/{latt},{longt}\"\n", | |
" async with session.get(weather_gov_url) as weather_gov_response:\n", | |
" if not weather_gov_response.ok:\n", | |
" error_text = await weather_gov_response.text()\n", | |
" raise ValueError(f\"Failed to get weather data. Status: {weather_gov_response.status}, Response: {error_text}\")\n", | |
" weather_gov_data = await weather_gov_response.json()\n", | |
" forecast_url = weather_gov_data[\"properties\"][\"forecast\"]\n", | |
"\n", | |
" # Forecast request\n", | |
" async with session.get(forecast_url) as forecast_response:\n", | |
" if not forecast_response.ok:\n", | |
" error_text = await forecast_response.text()\n", | |
" raise ValueError(f\"Failed to get forecast data. Status: {forecast_response.status}, Response: {error_text}\")\n", | |
" forecast_data = await forecast_response.json()\n", | |
" today_forecast = forecast_data[\"properties\"][\"periods\"][0]\n", | |
"\n", | |
" return {**self.model_dump(), \"temperature\": today_forecast[\"temperature\"]}" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 17, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"class Question(BaseModel):\n", | |
" question: str = Field(..., description=\"The questions to answer\")\n", | |
" tool: Weather | GithubRepo | LLMCall = Field(\n", | |
" ..., description=\"The tool to use to answer the questions (default to LLMCall to respond with plain text)\"\n", | |
" )\n", | |
"\n", | |
"\n", | |
"class AgentResponse(BaseModel):\n", | |
" response: list[Question]" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"## Agent\n" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 18, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"class GenerativeUIState(TypedDict, total=False):\n", | |
" messages: Annotated[AnyMessage, add_messages]\n", | |
" tool_calls: list[BaseModel] | None\n", | |
" tool_results: list[dict] | None" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 58, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"class InstructorGraph:\n", | |
" def __init__(self, checkpointer):\n", | |
" self.checkpointer = checkpointer\n", | |
"\n", | |
" # Nodes\n", | |
" async def _invoke_model(self, state: GenerativeUIState, config: RunnableConfig) -> GenerativeUIState:\n", | |
" sys_msg = \"\"\"\\\n", | |
"You are a helpful assistant.\n", | |
"\n", | |
"You are provided an input from the user. Use it to return a list of each distinct question or statement they are making along with the tool necessary to answer it or perform the task.\n", | |
"Use the conversation history to help you answer those questions and provide more context to the LLMCall tool.\n", | |
"Order the list of questions and statements so than any that depend on the result of others come last in the list.\n", | |
"\n", | |
"Default to the `LLMCall` tool for any input that cannot be answered using any of the other tools available.\"\"\" # noqa: E501\n", | |
"\n", | |
" conversation_history = \"\\n\".join([f\"{msg.name}: {msg.content}\" for msg in state[\"messages\"][:-1]])\n", | |
" user_msg = f\"\"\"\\\n", | |
"Conversation History:\n", | |
"{conversation_history}\n", | |
"\n", | |
"Input: {state[\"messages\"][-1].content}\n", | |
"\n", | |
"Assistant:\n", | |
"\"\"\"\n", | |
" messages = [SystemMessage(content=sys_msg), HumanMessage(content=user_msg)]\n", | |
" client = instructor.from_openai(openai.OpenAI())\n", | |
" rsp = client.chat.completions.create(\n", | |
" model=\"gpt-4o-2024-08-06\",\n", | |
" messages=convert_lc_messages_to_instructor(messages, include_additional_keys=False),\n", | |
" response_model=AgentResponse,\n", | |
" )\n", | |
"\n", | |
" tool_calls = [q.tool for q in rsp.response]\n", | |
" await adispatch_custom_event(\"tool_calls\", [tc.model_dump() for tc in tool_calls], config=config)\n", | |
" return {\"tool_calls\": tool_calls, \"tool_results\": None}\n", | |
"\n", | |
" async def _invoke_tools(self, state: GenerativeUIState, config: RunnableConfig) -> GenerativeUIState:\n", | |
" if state[\"tool_calls\"] is None:\n", | |
" raise ValueError(\"No tool calls found in state.\")\n", | |
"\n", | |
" tool_results = []\n", | |
" new_messages = []\n", | |
" for tool in state[\"tool_calls\"]:\n", | |
" if isinstance(tool, LLMCall):\n", | |
" final_content = \"\"\n", | |
" async for rsp in tool.ainvoke(state[\"messages\"] + new_messages):\n", | |
" final_content += rsp[\"content\"]\n", | |
" await adispatch_custom_event(\"on_chat_model_stream\", rsp, config=config)\n", | |
"\n", | |
" tool_results.append(final_content)\n", | |
" new_messages.append(AIMessage(final_content))\n", | |
" else:\n", | |
" await adispatch_custom_event(\"on_tool_start\", tool.model_dump(), config=config)\n", | |
" res = await tool.ainvoke()\n", | |
" tool_results.append(res)\n", | |
" await adispatch_custom_event(\"on_tool_end\", res, config=config)\n", | |
" new_messages.append(AIMessage(str(res)))\n", | |
" return {\n", | |
" \"tool_results\": tool_results,\n", | |
" \"messages\": [AIMessage(\"Here are the results from the agent:\\n\\n\" + \"\\n\".join([str(msg.content) for msg in new_messages]))],\n", | |
" }\n", | |
"\n", | |
" # Edges\n", | |
" def _invoke_tools_or_return(self, state: GenerativeUIState) -> str:\n", | |
" if \"tool_calls\" in state and isinstance(state[\"tool_calls\"], list):\n", | |
" return \"invoke_tools\"\n", | |
" else:\n", | |
" raise ValueError(\"Invalid state. No result or tool calls found.\")\n", | |
"\n", | |
" # Graph\n", | |
" def create_graph(self) -> CompiledGraph: # noqa: D102\n", | |
" workflow = StateGraph(GenerativeUIState)\n", | |
"\n", | |
" workflow.add_node(\"invoke_model\", self._invoke_model) # type: ignore\n", | |
" workflow.add_node(\"invoke_tools\", self._invoke_tools)\n", | |
" workflow.add_conditional_edges(\"invoke_model\", self._invoke_tools_or_return)\n", | |
"\n", | |
" workflow.set_entry_point(\"invoke_model\")\n", | |
" workflow.set_finish_point(\"invoke_tools\")\n", | |
"\n", | |
" graph = workflow.compile(checkpointer=self.checkpointer).with_config({\"run_name\": \"agent\"})\n", | |
" return graph\n", | |
"\n", | |
" async def run_graph(self, new_message: str): # noqa: D102\n", | |
" config = {\"configurable\": {\"thread_id\": \"123\"}}\n", | |
" msg = HumanMessage(new_message)\n", | |
"\n", | |
" async for event in self.create_graph().astream_events({\"messages\": [msg]}, config=config, version=\"v2\"):\n", | |
" event_name, _ = event[\"name\"], event[\"event\"]\n", | |
" # print(event_name, cb_handler)\n", | |
" if event_name == \"tool_calls\":\n", | |
" yield {\n", | |
" \"event\": \"on_tools_parser_end\",\n", | |
" \"value\": event[\"data\"],\n", | |
" \"run_id\": event[\"run_id\"],\n", | |
" }\n", | |
"\n", | |
" if event_name in [\"on_chat_model_stream\"]:\n", | |
" # print(event)\n", | |
" yield {\n", | |
" \"event\": event[\"name\"],\n", | |
" \"value\": event[\"data\"][\"content\"],\n", | |
" \"run_id\": event[\"data\"][\"tool_id\"],\n", | |
" }\n", | |
"\n", | |
" if event_name in [\"on_tool_end\", \"on_tool_start\"]:\n", | |
" # print(event)\n", | |
" yield {\n", | |
" \"event\": event[\"name\"],\n", | |
" \"value\": {\n", | |
" \"name\": event[\"data\"][\"name\"],\n", | |
" \"inputs\": {key: value for key, value in event[\"data\"].items() if key not in [\"name\", \"tool_id\"]},\n", | |
" },\n", | |
" \"run_id\": event[\"data\"][\"tool_id\"],\n", | |
" }\n", | |
"\n", | |
" yield {\"event\": \"on_end\"}" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"## Tests\n" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 65, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"memory = MemorySaver()\n", | |
"\n", | |
"graph = InstructorGraph(checkpointer=memory)" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 66, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"inp = \"What is the weather in SF?\"\n", | |
"inp = \"Tell me about the fastai repo and then tell me if it has more starts than the combined temperatures of SF and SD\"" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 67, | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"on_tools_parser_end\n", | |
"[{'name': 'github-repo', 'tool_id': '87316631-b391-42f0-a2a0-ae6892b63c8e', 'owner': 'fastai', 'repo': 'fastai'}, {'name': 'weather-data', 'tool_id': '5e3956b7-c30b-4071-8189-0d938dce4537', 'city': 'San Francisco', 'state': 'CA', 'country': 'US'}, {'name': 'weather-data', 'tool_id': 'cae7713f-a6d7-4db8-8f86-fab5378d82e1', 'city': 'San Diego', 'state': 'CA', 'country': 'US'}, {'name': 'text-stream', 'tool_id': '0eaed0e0-2e09-4f2b-8be7-c4ecf3eddcff', 'user_input': 'Does the fastai repo have more stars than the combined temperatures of San Francisco and San Diego, given the star count and temperatures obtained?'}]\n", | |
"on_tool_start\n", | |
"{'name': 'github-repo', 'inputs': {'owner': 'fastai', 'repo': 'fastai'}}\n", | |
"on_tool_end\n", | |
"{'name': 'github-repo', 'inputs': {'owner': 'fastai', 'repo': 'fastai', 'description': 'The fastai deep learning library', 'language': 'Jupyter Notebook', 'stars': 26189}}\n", | |
"on_tool_start\n", | |
"{'name': 'weather-data', 'inputs': {'city': 'San Francisco', 'state': 'CA', 'country': 'US'}}\n", | |
"Geocode URL: https://nominatim.openstreetmap.org/search?city=San Francisco&state=CA&country=US&format=json\n", | |
"on_tool_end\n", | |
"{'name': 'weather-data', 'inputs': {'city': 'San Francisco', 'state': 'CA', 'country': 'US', 'temperature': 70}}\n", | |
"on_tool_start\n", | |
"{'name': 'weather-data', 'inputs': {'city': 'San Diego', 'state': 'CA', 'country': 'US'}}\n", | |
"Geocode URL: https://nominatim.openstreetmap.org/search?city=San Diego&state=CA&country=US&format=json\n", | |
"on_tool_end\n", | |
"{'name': 'weather-data', 'inputs': {'city': 'San Diego', 'state': 'CA', 'country': 'US', 'temperature': 77}}\n", | |
"|The| fast|ai| repo| has| |261|89| stars|.| The| combined| temperatures| of| San| Francisco| and| San| Diego| are| |70| +| |77| =| |147|.| Therefore|,| the| fast|ai| repo| has| significantly| more| stars| than| the| combined| temperatures| of| San| Francisco| and| San| Diego|.|on_end\n", | |
"\n" | |
] | |
} | |
], | |
"source": [ | |
"async for event_d in graph.run_graph(inp):\n", | |
" if event_d[\"event\"] == \"on_chat_model_stream\":\n", | |
" print(event_d[\"value\"], end=\"|\")\n", | |
" else:\n", | |
" print(event_d[\"event\"])\n", | |
" print(event_d.get(\"value\", \"\"))" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": null, | |
"metadata": {}, | |
"outputs": [], | |
"source": [] | |
} | |
], | |
"metadata": { | |
"kernelspec": { | |
"display_name": ".venv", | |
"language": "python", | |
"name": "python3" | |
}, | |
"language_info": { | |
"codemirror_mode": { | |
"name": "ipython", | |
"version": 3 | |
}, | |
"file_extension": ".py", | |
"mimetype": "text/x-python", | |
"name": "python", | |
"nbconvert_exporter": "python", | |
"pygments_lexer": "ipython3", | |
"version": "3.11.9" | |
} | |
}, | |
"nbformat": 4, | |
"nbformat_minor": 2 | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment