Created
March 27, 2025 14:41
-
-
Save lemassykoi/9dbfcff9d1770359a2f4f02ca606bc44 to your computer and use it in GitHub Desktop.
Provide Agents as a tool, with MCP
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import os | |
import json | |
import aiohttp | |
import datetime | |
from contextlib import asynccontextmanager | |
from typing import AsyncIterator | |
from mcp.server.fastmcp import FastMCP, Context | |
from mcp.server import Server | |
from swarm import Swarm, Agent | |
from duckduckgo_search import DDGS | |
from ollama_settings import OLLAMA_CHAT_MODEL, OLLAMA_BASE_URL | |
os.environ["OPENAI_BASE_URL"] = f'{OLLAMA_BASE_URL}/v1' | |
os.environ["OPENAI_API_KEY"] = 'ollama' | |
# Initialize Swarm client | |
client = Swarm() | |
ddgs = DDGS() | |
@asynccontextmanager | |
async def app_lifespan(server: Server) -> AsyncIterator[dict]: | |
session = aiohttp.ClientSession() | |
try: | |
# The lifespan context contains our aiohttp session. | |
yield {"session": session} | |
finally: | |
await session.close() | |
# Create the FastMCP instance with lifespan support. | |
mcp = FastMCP( | |
"Agents MCP Server", | |
lifespan = app_lifespan | |
) | |
### SWARM | |
def search_web(query:str, max_results:int = 10) -> str: | |
""" Query DuckduckGo News Search """ | |
print(f"Searching the web for {query}...") | |
# DuckDuckGo search | |
current_date = datetime.datetime.now().strftime("%Y-%m") | |
results = ddgs.text(f"{query} {current_date}", max_results=max_results) | |
if results: | |
news_results = "" | |
for result in results: | |
news_results += f"Title: {result['title']}\nURL: {result['href']}\nDescription: {result['body']}\n\n" | |
return news_results.strip() | |
else: | |
return f"Could not find news results for {query}." | |
# Web Search Agent to fetch latest news | |
web_search_agent = Agent( | |
name="Web Search Assistant", | |
instructions="Your role is to gather latest news articles on specified topics using DuckDuckGo's search capabilities.", | |
functions=[search_web], | |
model=OLLAMA_CHAT_MODEL | |
) | |
# Senior Research Analyst | |
researcher_agent = Agent( | |
name="Research Assistant", | |
instructions="""Your role is to analyze and synthesize the raw search results. You should: | |
1. Remove duplicate information and redundant content | |
2. Identify and merge related topics and themes | |
3. Verify information consistency across sources | |
4. Prioritize recent and relevant information | |
5. Extract key facts, statistics, and quotes | |
6. Identify primary sources when available | |
7. Flag any contradictory information | |
8. Maintain proper attribution for important claims | |
9. Organize information in a logical sequence | |
10. Preserve important context and relationships between topics""", | |
model=OLLAMA_CHAT_MODEL | |
) | |
# Editor Agent to edit news | |
writer_agent = Agent( | |
name="Writer Assistant", | |
instructions="""Your role is to transform the deduplicated research results into a polished, publication-ready article. You should: | |
1. Organize content into clear, thematic sections | |
2. Write in a professional yet engaging tone, that is genuine and informative | |
3. Ensure proper flow between topics | |
4. Add relevant context where needed | |
5. Maintain factual accuracy while making complex topics accessible | |
6. Include a brief summary at the beginning | |
7. Format with clear headlines and subheadings | |
8. Preserve all key information from the source material""", | |
model=OLLAMA_CHAT_MODEL | |
) | |
def run_swarm_workflow(query): | |
print("Running web research assistant workflow...") | |
# Search the web | |
news_response = client.run( | |
agent=web_search_agent, | |
messages=[{"role": "user", "content": f"Search the web for {query}"}], | |
) | |
raw_news = news_response.messages[-1]["content"] | |
# Analyze and synthesize the search results | |
research_analysis_response = client.run( | |
agent=researcher_agent, | |
messages=[{"role": "user", "content": raw_news }], | |
) | |
deduplicated_news = research_analysis_response.messages[-1]["content"] | |
# Edit and publish the analysed results with streaming | |
return client.run( | |
agent=writer_agent, | |
messages=[{"role": "user", "content": deduplicated_news }], | |
stream=True # Enable streaming | |
) | |
### | |
@mcp.tool() | |
def web_research_assistant(query: str, ctx: Context) -> dict: | |
""" | |
Query the Web Research Assistant. | |
Args: | |
query (str): The query to submit to the web research assistant. Should be submited in natural language. | |
Returns: | |
dict: The return value. | |
""" | |
try: | |
# Get streaming response | |
streaming_response = run_swarm_workflow(query) | |
# Create a placeholder for the streaming text | |
full_response = "" | |
# Stream the response | |
for chunk in streaming_response: | |
# Skip the initial delimiter | |
if isinstance(chunk, dict) and 'delim' in chunk: | |
continue | |
# Extract only the content from each chunk | |
if isinstance(chunk, dict) and 'content' in chunk: | |
content = chunk['content'] | |
full_response += content | |
# Update final response | |
return {"success": True, "message": str(full_response)} | |
except Exception as e: | |
return {"success": False, "error": str(e)} | |
@mcp.tool() | |
def ask_human(question:str) -> dict: | |
""" | |
Ask a confirmation or question to a human and get their response. | |
Args: | |
question (str): The question to be asked to the human. | |
Returns: | |
str: A string which contains the response from the human. | |
""" | |
try: | |
answer = input(question + " ==> ") | |
return {"success": True, "message": answer} | |
except Exception as e: | |
return {"success": False, "error": str(e)} | |
# ------------------------------------------------------------------- | |
# Run the MCP server. | |
# ------------------------------------------------------------------- | |
if __name__ == '__main__': | |
mcp.run() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment