Created
March 11, 2025 02:49
-
-
Save bossjones/1a61464680d221b7bb586bcbb917384c to your computer and use it in GitHub Desktop.
sequentialthinking.py test_requentialthinking.py port of https://github.com/modelcontextprotocol/servers/blob/main/src/sequentialthinking/index.ts in python w/ fastmcp
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python3 | |
""" | |
Sequential Thinking FastMCP Server | |
This module implements a FastMCP server for the Sequential Thinking tool, which enables | |
dynamic and reflective problem-solving through a structured thinking process. | |
The Sequential Thinking tool helps break down complex problems into manageable steps, | |
allowing for revision, branching, and iterative refinement of thoughts. It maintains | |
a history of thoughts and supports features like revising previous thoughts or creating | |
branches from existing thought paths. | |
This implementation serves as a Minimum Viable Product (MVP) for a Proof of Concept (POC) | |
of the Sequential Thinking tool using FastMCP. | |
Features: | |
- Structured thinking process with numbered thoughts | |
- Support for revising previous thoughts | |
- Support for branching into alternative thought paths | |
- Dynamic adjustment of total thought count | |
- Colored terminal output for better visualization | |
- JSON response format for client consumption | |
Usage: | |
Run directly as a script to start the FastMCP server: | |
$ uv run python -m codegen_lab.sequentialthinking | |
Or import and use in another module: | |
>>> from codegen_lab.sequentialthinking import mcp | |
>>> mcp.run() | |
To test without starting the server: | |
>>> from codegen_lab.sequentialthinking import test_sequential_thinking | |
>>> test_sequential_thinking() | |
Dependencies: | |
- FastMCP (mcp.server.fastmcp) | |
- Python 3.8+ | |
- typing_extensions for Python < 3.11 | |
""" | |
import json | |
import sys | |
from typing import Dict, List, Any, Optional, TypedDict | |
from mcp.server.fastmcp import FastMCP | |
from mcp.server.fastmcp.server import Context | |
from pydantic import Field | |
# Define version information | |
__version__ = "0.2.0" | |
# Define the ThoughtData type | |
class ThoughtData(TypedDict, total=False): | |
""" | |
Type definition for thought data in the Sequential Thinking tool. | |
Attributes: | |
thought: The content of the current thinking step | |
thoughtNumber: Current thought number in sequence | |
totalThoughts: Estimated total thoughts needed | |
nextThoughtNeeded: Whether another thought step is needed | |
isRevision: Whether this thought revises previous thinking | |
revisesThought: Which thought is being reconsidered | |
branchFromThought: Branching point thought number | |
branchId: Branch identifier | |
needsMoreThoughts: If more thoughts are needed | |
""" | |
thought: str | |
thoughtNumber: int | |
totalThoughts: int | |
nextThoughtNeeded: bool | |
isRevision: Optional[bool] | |
revisesThought: Optional[int] | |
branchFromThought: Optional[int] | |
branchId: Optional[str] | |
needsMoreThoughts: Optional[bool] | |
class SequentialThinkingServer: | |
""" | |
Server implementation for the Sequential Thinking tool. | |
This class manages the thought history and branches, validates input data, | |
formats thoughts for display, and processes incoming thought requests. | |
""" | |
def __init__(self) -> None: | |
"""Initialize the Sequential Thinking server with empty history and branches.""" | |
self.thought_history: List[ThoughtData] = [] | |
self.branches: Dict[str, List[ThoughtData]] = {} | |
def validate_thought_data(self, input_data: Dict[str, Any]) -> ThoughtData: | |
""" | |
Validate and convert input data to ThoughtData type. | |
Args: | |
input_data: The input data to validate | |
Returns: | |
ThoughtData: The validated thought data | |
Raises: | |
ValueError: If the input data is invalid | |
""" | |
if not input_data.get('thought') or not isinstance(input_data['thought'], str): | |
raise ValueError('Invalid thought: must be a string') | |
if not input_data.get('thoughtNumber') or not isinstance(input_data['thoughtNumber'], int): | |
raise ValueError('Invalid thoughtNumber: must be a number') | |
if not input_data.get('totalThoughts') or not isinstance(input_data['totalThoughts'], int): | |
raise ValueError('Invalid totalThoughts: must be a number') | |
if not isinstance(input_data.get('nextThoughtNeeded'), bool): | |
raise ValueError('Invalid nextThoughtNeeded: must be a boolean') | |
return { | |
'thought': input_data['thought'], | |
'thoughtNumber': input_data['thoughtNumber'], | |
'totalThoughts': input_data['totalThoughts'], | |
'nextThoughtNeeded': input_data['nextThoughtNeeded'], | |
'isRevision': input_data.get('isRevision'), | |
'revisesThought': input_data.get('revisesThought'), | |
'branchFromThought': input_data.get('branchFromThought'), | |
'branchId': input_data.get('branchId'), | |
'needsMoreThoughts': input_data.get('needsMoreThoughts') | |
} | |
def format_thought(self, thought_data: ThoughtData) -> str: | |
""" | |
Format a thought with colored output and borders. | |
Args: | |
thought_data: The thought data to format | |
Returns: | |
str: The formatted thought string for display | |
""" | |
# Colors for terminal output | |
BLUE = '\033[94m' | |
YELLOW = '\033[93m' | |
GREEN = '\033[92m' | |
RESET = '\033[0m' | |
thought_number = thought_data['thoughtNumber'] | |
total_thoughts = thought_data['totalThoughts'] | |
thought = thought_data['thought'] | |
is_revision = thought_data.get('isRevision', False) | |
revises_thought = thought_data.get('revisesThought') | |
branch_from_thought = thought_data.get('branchFromThought') | |
branch_id = thought_data.get('branchId') | |
prefix = '' | |
context = '' | |
if is_revision: | |
prefix = f"{YELLOW}🔄 Revision{RESET}" | |
context = f" (revising thought {revises_thought})" | |
elif branch_from_thought: | |
prefix = f"{GREEN}🌿 Branch{RESET}" | |
context = f" (from thought {branch_from_thought}, ID: {branch_id})" | |
else: | |
prefix = f"{BLUE}💭 Thought{RESET}" | |
context = '' | |
header = f"{prefix} {thought_number}/{total_thoughts}{context}" | |
border = '─' * max(len(header.replace(BLUE, '').replace(YELLOW, '').replace(GREEN, '').replace(RESET, '')), len(thought) + 2) | |
return f""" | |
┌{border}┐ | |
│ {header} │ | |
├{border}┤ | |
│ {thought.ljust(len(border) - 2)} │ | |
└{border}┘""" | |
def process_thought(self, input_data: Dict[str, Any]) -> Dict[str, Any]: | |
""" | |
Process a thought and return a formatted response. | |
Args: | |
input_data: The input thought data to process | |
Returns: | |
Dict[str, Any]: The response data with thought processing results | |
""" | |
try: | |
validated_input = self.validate_thought_data(input_data) | |
if validated_input['thoughtNumber'] > validated_input['totalThoughts']: | |
validated_input['totalThoughts'] = validated_input['thoughtNumber'] | |
self.thought_history.append(validated_input) | |
if validated_input.get('branchFromThought') and validated_input.get('branchId'): | |
branch_id = validated_input['branchId'] | |
if branch_id not in self.branches: | |
self.branches[branch_id] = [] | |
self.branches[branch_id].append(validated_input) | |
formatted_thought = self.format_thought(validated_input) | |
print(formatted_thought, file=sys.stderr) | |
return { | |
'content': [{ | |
'type': 'text', | |
'text': json.dumps({ | |
'thoughtNumber': validated_input['thoughtNumber'], | |
'totalThoughts': validated_input['totalThoughts'], | |
'nextThoughtNeeded': validated_input['nextThoughtNeeded'], | |
'branches': list(self.branches.keys()), | |
'thoughtHistoryLength': len(self.thought_history) | |
}, indent=2) | |
}] | |
} | |
except Exception as error: | |
return { | |
'content': [{ | |
'type': 'text', | |
'text': json.dumps({ | |
'error': str(error), | |
'status': 'failed' | |
}, indent=2) | |
}], | |
'isError': True | |
} | |
# Create FastMCP server instance | |
mcp = FastMCP( | |
name="sequential-thinking", | |
description="A tool for dynamic and reflective problem-solving through structured thinking", | |
version=__version__ | |
) | |
# Create thinking server instance | |
thinking_server = SequentialThinkingServer() | |
# Define the sequential thinking tool | |
SEQUENTIAL_THINKING_DESCRIPTION = """A detailed tool for dynamic and reflective problem-solving through thoughts. | |
This tool helps analyze problems through a flexible thinking process that can adapt and evolve. | |
Each thought can build on, question, or revise previous insights as understanding deepens. | |
When to use this tool: | |
- Breaking down complex problems into steps | |
- Planning and design with room for revision | |
- Analysis that might need course correction | |
- Problems where the full scope might not be clear initially | |
- Problems that require a multi-step solution | |
- Tasks that need to maintain context over multiple steps | |
- Situations where irrelevant information needs to be filtered out | |
Key features: | |
- You can adjust total_thoughts up or down as you progress | |
- You can question or revise previous thoughts | |
- You can add more thoughts even after reaching what seemed like the end | |
- You can express uncertainty and explore alternative approaches | |
- Not every thought needs to build linearly - you can branch or backtrack | |
- Generates a solution hypothesis | |
- Verifies the hypothesis based on the Chain of Thought steps | |
- Repeats the process until satisfied | |
- Provides a correct answer | |
You should: | |
1. Start with an initial estimate of needed thoughts, but be ready to adjust | |
2. Feel free to question or revise previous thoughts | |
3. Don't hesitate to add more thoughts if needed, even at the "end" | |
4. Express uncertainty when present | |
5. Mark thoughts that revise previous thinking or branch into new paths | |
6. Ignore information that is irrelevant to the current step | |
7. Generate a solution hypothesis when appropriate | |
8. Verify the hypothesis based on the Chain of Thought steps | |
9. Repeat the process until satisfied with the solution | |
10. Provide a single, ideally correct answer as the final output | |
11. Only set next_thought_needed to false when truly done and a satisfactory answer is reached""" | |
@mcp.tool(description=SEQUENTIAL_THINKING_DESCRIPTION) | |
def sequentialthinking( | |
thought: str = Field(description="Your current thinking step"), | |
nextThoughtNeeded: bool = Field(description="Whether another thought step is needed"), | |
thoughtNumber: int = Field(description="Current thought number", ge=1), | |
totalThoughts: int = Field(description="Estimated total thoughts needed", ge=1), | |
isRevision: Optional[bool] = Field(None, description="Whether this thought revises previous thinking"), | |
revisesThought: Optional[int] = Field(None, description="Which thought is being reconsidered", ge=1), | |
branchFromThought: Optional[int] = Field(None, description="Branching point thought number", ge=1), | |
branchId: Optional[str] = Field(None, description="Branch identifier"), | |
needsMoreThoughts: Optional[bool] = Field(None, description="If more thoughts are needed"), | |
ctx: Optional[Context] = None | |
) -> Dict[str, Any]: | |
""" | |
A detailed tool for dynamic and reflective problem-solving through thoughts. | |
This tool helps analyze problems through a flexible thinking process that can adapt and evolve. | |
Each thought can build on, question, or revise previous insights as understanding deepens. | |
When to use this tool: | |
- Breaking down complex problems into steps | |
- Planning and design with room for revision | |
- Analysis that might need course correction | |
- Problems where the full scope might not be clear initially | |
- Problems that require a multi-step solution | |
- Tasks that need to maintain context over multiple steps | |
- Situations where irrelevant information needs to be filtered out | |
Key features: | |
- You can adjust total_thoughts up or down as you progress | |
- You can question or revise previous thoughts | |
- You can add more thoughts even after reaching what seemed like the end | |
- You can express uncertainty and explore alternative approaches | |
- Not every thought needs to build linearly - you can branch or backtrack | |
- Generates a solution hypothesis | |
- Verifies the hypothesis based on the Chain of Thought steps | |
- Repeats the process until satisfied | |
- Provides a correct answer | |
Args: | |
thought: Your current thinking step | |
nextThoughtNeeded: Whether another thought step is needed | |
thoughtNumber: Current thought number in sequence | |
totalThoughts: Estimated total thoughts needed | |
isRevision: Whether this thought revises previous thinking | |
revisesThought: Which thought is being reconsidered | |
branchFromThought: Branching point thought number | |
branchId: Branch identifier | |
needsMoreThoughts: If more thoughts are needed | |
ctx: FastMCP context (automatically injected) | |
Returns: | |
Dict[str, Any]: Response with thought processing results | |
""" | |
# Log the incoming thought if context is available | |
if ctx: | |
ctx.debug(f"Processing thought #{thoughtNumber}/{totalThoughts}") | |
# Process the thought using the thinking server | |
input_data = { | |
'thought': thought, | |
'nextThoughtNeeded': nextThoughtNeeded, | |
'thoughtNumber': thoughtNumber, | |
'totalThoughts': totalThoughts | |
} | |
# Add optional fields if they are provided | |
if isRevision is not None: | |
input_data['isRevision'] = isRevision | |
if revisesThought is not None: | |
input_data['revisesThought'] = revisesThought | |
if branchFromThought is not None: | |
input_data['branchFromThought'] = branchFromThought | |
if branchId is not None: | |
input_data['branchId'] = branchId | |
if needsMoreThoughts is not None: | |
input_data['needsMoreThoughts'] = needsMoreThoughts | |
return thinking_server.process_thought(input_data) | |
def test_sequential_thinking() -> None: | |
""" | |
Test the Sequential Thinking tool without starting the server. | |
This function creates a sample thought and processes it using the | |
SequentialThinkingServer to verify that the implementation works correctly. | |
Returns: | |
None | |
""" | |
print("Testing Sequential Thinking tool...") | |
# Create a sample thought | |
sample_thought = { | |
'thought': "This is a test thought to verify the implementation works correctly.", | |
'nextThoughtNeeded': True, | |
'thoughtNumber': 1, | |
'totalThoughts': 3 | |
} | |
# Process the thought | |
result = thinking_server.process_thought(sample_thought) | |
# Print the result | |
print("\nResult:") | |
print(json.dumps(result, indent=2)) | |
print("\nTest completed successfully!") | |
if __name__ == "__main__": | |
if len(sys.argv) > 1 and sys.argv[1] == "--test": | |
# Run the test function if --test argument is provided | |
test_sequential_thinking() | |
else: | |
try: | |
# Start the FastMCP server | |
print("Starting Sequential Thinking MCP Server...", file=sys.stderr) | |
mcp.run(transport="stdio") | |
except Exception as e: | |
print(f"Fatal error running server: {e}", file=sys.stderr) | |
sys.exit(1) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
""" | |
Unit tests for the Sequential Thinking server. | |
This module contains comprehensive tests for the Sequential Thinking server, | |
covering basic thought processing, thought revision, branching, error handling, | |
and edge cases. | |
""" | |
from typing import Any, Dict, List, Optional, TYPE_CHECKING | |
import json | |
import pytest | |
from codegen_lab.sequentialthinking import SequentialThinkingServer | |
if TYPE_CHECKING: | |
from _pytest.capture import CaptureFixture | |
from _pytest.fixtures import FixtureRequest | |
from _pytest.logging import LogCaptureFixture | |
from _pytest.monkeypatch import MonkeyPatch | |
from pytest_mock.plugin import MockerFixture | |
@pytest.fixture | |
def thinking_server() -> SequentialThinkingServer: | |
""" | |
Create a fresh Sequential Thinking server for testing. | |
Returns: | |
SequentialThinkingServer: A new server instance | |
""" | |
return SequentialThinkingServer() | |
@pytest.fixture | |
def sample_thought() -> Dict[str, Any]: | |
""" | |
Create a sample thought for testing. | |
Returns: | |
Dict[str, Any]: A sample thought | |
""" | |
return { | |
"thought": "This is a test thought.", | |
"nextThoughtNeeded": True, | |
"thoughtNumber": 1, | |
"totalThoughts": 3, | |
} | |
@pytest.fixture | |
def sample_thought_sequence() -> List[Dict[str, Any]]: | |
""" | |
Create a sequence of sample thoughts for testing. | |
Returns: | |
List[Dict[str, Any]]: A sequence of sample thoughts | |
""" | |
return [ | |
{ | |
"thought": "This is the first thought in a sequence.", | |
"nextThoughtNeeded": True, | |
"thoughtNumber": 1, | |
"totalThoughts": 3, | |
}, | |
{ | |
"thought": "This is the second thought in a sequence.", | |
"nextThoughtNeeded": True, | |
"thoughtNumber": 2, | |
"totalThoughts": 3, | |
}, | |
{ | |
"thought": "This is the final thought in a sequence.", | |
"nextThoughtNeeded": False, | |
"thoughtNumber": 3, | |
"totalThoughts": 3, | |
}, | |
] | |
class TestSequentialThinkingBasic: | |
"""Tests for basic Sequential Thinking functionality.""" | |
def test_init(self, thinking_server: SequentialThinkingServer) -> None: | |
""" | |
Test initialization of the Sequential Thinking server. | |
Args: | |
thinking_server: The Sequential Thinking server fixture. | |
""" | |
assert thinking_server.thought_history == [] | |
assert thinking_server.branches == {} | |
def test_validate_thought_data_valid( | |
self, thinking_server: SequentialThinkingServer, sample_thought: Dict[str, Any] | |
) -> None: | |
""" | |
Test validation of valid thought data. | |
Args: | |
thinking_server: The Sequential Thinking server fixture. | |
sample_thought: A sample thought fixture. | |
""" | |
validated = thinking_server.validate_thought_data(sample_thought) | |
assert validated["thought"] == sample_thought["thought"] | |
assert validated["thoughtNumber"] == sample_thought["thoughtNumber"] | |
assert validated["totalThoughts"] == sample_thought["totalThoughts"] | |
assert validated["nextThoughtNeeded"] == sample_thought["nextThoughtNeeded"] | |
assert validated["isRevision"] is None | |
assert validated["revisesThought"] is None | |
assert validated["branchFromThought"] is None | |
assert validated["branchId"] is None | |
assert validated["needsMoreThoughts"] is None | |
def test_validate_thought_data_missing_required( | |
self, thinking_server: SequentialThinkingServer | |
) -> None: | |
""" | |
Test that validation fails when required fields are missing. | |
Args: | |
thinking_server: The Sequential Thinking server fixture. | |
""" | |
invalid_thought = {"thought": "Missing required fields"} | |
with pytest.raises(ValueError, match="Invalid thoughtNumber"): | |
thinking_server.validate_thought_data(invalid_thought) | |
def test_process_thought_basic( | |
self, thinking_server: SequentialThinkingServer, sample_thought: Dict[str, Any] | |
) -> None: | |
""" | |
Test basic thought processing functionality. | |
Args: | |
thinking_server: The Sequential Thinking server fixture. | |
sample_thought: A sample thought fixture. | |
""" | |
result = thinking_server.process_thought(sample_thought) | |
# Check that the thought was added to history | |
assert len(thinking_server.thought_history) == 1 | |
assert thinking_server.thought_history[0]["thought"] == sample_thought["thought"] | |
# Check the response format | |
assert "content" in result | |
assert isinstance(result["content"], list) | |
assert len(result["content"]) == 1 | |
assert result["content"][0]["type"] == "text" | |
# Parse the JSON response | |
response_data = json.loads(result["content"][0]["text"]) | |
assert response_data["thoughtNumber"] == sample_thought["thoughtNumber"] | |
assert response_data["totalThoughts"] == sample_thought["totalThoughts"] | |
assert response_data["nextThoughtNeeded"] == sample_thought["nextThoughtNeeded"] | |
assert response_data["branches"] == [] | |
assert response_data["thoughtHistoryLength"] == 1 | |
def test_process_thought_sequence( | |
self, | |
thinking_server: SequentialThinkingServer, | |
sample_thought_sequence: List[Dict[str, Any]] | |
) -> None: | |
""" | |
Test processing a sequence of thoughts. | |
Args: | |
thinking_server: The Sequential Thinking server fixture. | |
sample_thought_sequence: A sequence of sample thoughts. | |
""" | |
for i, thought in enumerate(sample_thought_sequence): | |
result = thinking_server.process_thought(thought) | |
assert "content" in result | |
assert isinstance(result["content"], list) | |
assert len(result["content"]) == 1 | |
assert result["content"][0]["type"] == "text" | |
# Parse the JSON response | |
response_data = json.loads(result["content"][0]["text"]) | |
assert response_data["thoughtNumber"] == thought["thoughtNumber"] | |
assert response_data["totalThoughts"] == thought["totalThoughts"] | |
assert response_data["nextThoughtNeeded"] == thought["nextThoughtNeeded"] | |
assert response_data["thoughtHistoryLength"] == i + 1 | |
class TestSequentialThinkingRevision: | |
"""Tests for thought revision functionality.""" | |
def test_revise_thought(self, thinking_server: SequentialThinkingServer) -> None: | |
""" | |
Test revising a previous thought. | |
Args: | |
thinking_server: The Sequential Thinking server fixture. | |
""" | |
# Add initial thoughts | |
initial_thoughts = [ | |
{ | |
"thought": "Initial thought 1", | |
"nextThoughtNeeded": True, | |
"thoughtNumber": 1, | |
"totalThoughts": 3, | |
}, | |
{ | |
"thought": "Initial thought 2", | |
"nextThoughtNeeded": True, | |
"thoughtNumber": 2, | |
"totalThoughts": 3, | |
}, | |
] | |
for thought in initial_thoughts: | |
thinking_server.process_thought(thought) | |
# Now revise the first thought | |
revision = { | |
"thought": "Revised thought 1", | |
"nextThoughtNeeded": True, | |
"thoughtNumber": 3, | |
"totalThoughts": 3, | |
"isRevision": True, | |
"revisesThought": 1, | |
} | |
result = thinking_server.process_thought(revision) | |
# Check that the revision was added to history | |
assert len(thinking_server.thought_history) == 3 | |
assert thinking_server.thought_history[2]["thought"] == revision["thought"] | |
assert thinking_server.thought_history[2]["isRevision"] is True | |
assert thinking_server.thought_history[2]["revisesThought"] == 1 | |
# Check the response format | |
assert "content" in result | |
assert isinstance(result["content"], list) | |
assert len(result["content"]) == 1 | |
assert result["content"][0]["type"] == "text" | |
# Parse the JSON response | |
response_data = json.loads(result["content"][0]["text"]) | |
assert response_data["thoughtNumber"] == revision["thoughtNumber"] | |
assert response_data["totalThoughts"] == revision["totalThoughts"] | |
assert response_data["nextThoughtNeeded"] == revision["nextThoughtNeeded"] | |
assert response_data["thoughtHistoryLength"] == 3 | |
def test_revise_nonexistent_thought( | |
self, thinking_server: SequentialThinkingServer | |
) -> None: | |
""" | |
Test revising a thought that doesn't exist. | |
Args: | |
thinking_server: The Sequential Thinking server fixture. | |
""" | |
# Add initial thought | |
initial_thought = { | |
"thought": "Initial thought", | |
"nextThoughtNeeded": True, | |
"thoughtNumber": 1, | |
"totalThoughts": 3, | |
} | |
thinking_server.process_thought(initial_thought) | |
# Try to revise a nonexistent thought | |
revision = { | |
"thought": "Revision of nonexistent thought", | |
"nextThoughtNeeded": True, | |
"thoughtNumber": 2, | |
"totalThoughts": 3, | |
"isRevision": True, | |
"revisesThought": 5, # Nonexistent thought number | |
} | |
# This should still work, as the server doesn't validate revisesThought | |
result = thinking_server.process_thought(revision) | |
assert "content" in result | |
assert len(thinking_server.thought_history) == 2 | |
class TestSequentialThinkingBranching: | |
"""Tests for branching functionality.""" | |
def test_create_branch(self, thinking_server: SequentialThinkingServer) -> None: | |
""" | |
Test creating a branch from an existing thought. | |
Args: | |
thinking_server: The Sequential Thinking server fixture. | |
""" | |
# Add initial thoughts | |
initial_thoughts = [ | |
{ | |
"thought": "Initial thought 1", | |
"nextThoughtNeeded": True, | |
"thoughtNumber": 1, | |
"totalThoughts": 3, | |
}, | |
{ | |
"thought": "Initial thought 2", | |
"nextThoughtNeeded": True, | |
"thoughtNumber": 2, | |
"totalThoughts": 3, | |
}, | |
] | |
for thought in initial_thoughts: | |
thinking_server.process_thought(thought) | |
# Create a branch from thought 1 | |
branch_thought = { | |
"thought": "Branch thought from #1", | |
"nextThoughtNeeded": True, | |
"thoughtNumber": 1, | |
"totalThoughts": 2, | |
"branchFromThought": 1, | |
"branchId": "alternative-path", | |
} | |
result = thinking_server.process_thought(branch_thought) | |
# Check that the branch was created | |
assert "alternative-path" in thinking_server.branches | |
assert len(thinking_server.branches["alternative-path"]) == 1 | |
assert thinking_server.branches["alternative-path"][0]["thought"] == branch_thought["thought"] | |
# Check that the thought was also added to history | |
assert len(thinking_server.thought_history) == 3 | |
assert thinking_server.thought_history[2]["thought"] == branch_thought["thought"] | |
assert thinking_server.thought_history[2]["branchFromThought"] == 1 | |
assert thinking_server.thought_history[2]["branchId"] == "alternative-path" | |
# Check the response format | |
assert "content" in result | |
assert isinstance(result["content"], list) | |
assert len(result["content"]) == 1 | |
assert result["content"][0]["type"] == "text" | |
# Parse the JSON response | |
response_data = json.loads(result["content"][0]["text"]) | |
assert response_data["thoughtNumber"] == branch_thought["thoughtNumber"] | |
assert response_data["totalThoughts"] == branch_thought["totalThoughts"] | |
assert response_data["nextThoughtNeeded"] == branch_thought["nextThoughtNeeded"] | |
assert "alternative-path" in response_data["branches"] | |
def test_continue_branch(self, thinking_server: SequentialThinkingServer) -> None: | |
""" | |
Test continuing an existing branch. | |
Args: | |
thinking_server: The Sequential Thinking server fixture. | |
""" | |
# Add initial thought | |
initial_thought = { | |
"thought": "Initial thought 1", | |
"nextThoughtNeeded": True, | |
"thoughtNumber": 1, | |
"totalThoughts": 3, | |
} | |
thinking_server.process_thought(initial_thought) | |
# Create a branch | |
branch_thought_1 = { | |
"thought": "First branch thought", | |
"nextThoughtNeeded": True, | |
"thoughtNumber": 1, | |
"totalThoughts": 2, | |
"branchFromThought": 1, | |
"branchId": "alternative-path", | |
} | |
thinking_server.process_thought(branch_thought_1) | |
# Continue the branch | |
branch_thought_2 = { | |
"thought": "Second branch thought", | |
"nextThoughtNeeded": False, | |
"thoughtNumber": 2, | |
"totalThoughts": 2, | |
"branchId": "alternative-path", | |
} | |
result = thinking_server.process_thought(branch_thought_2) | |
# Check that the thought was added to the branch | |
# Note: The current implementation doesn't add to branches unless branchFromThought is present | |
# So we only check that it was added to the history | |
assert len(thinking_server.thought_history) == 3 | |
assert thinking_server.thought_history[2]["thought"] == branch_thought_2["thought"] | |
assert thinking_server.thought_history[2]["branchId"] == "alternative-path" | |
# Check the response format | |
response_data = json.loads(result["content"][0]["text"]) | |
assert response_data["thoughtNumber"] == branch_thought_2["thoughtNumber"] | |
assert response_data["totalThoughts"] == branch_thought_2["totalThoughts"] | |
assert response_data["nextThoughtNeeded"] == branch_thought_2["nextThoughtNeeded"] | |
assert "alternative-path" in response_data["branches"] | |
class TestSequentialThinkingEdgeCases: | |
"""Tests for edge cases in Sequential Thinking.""" | |
def test_adjust_total_thoughts(self, thinking_server: SequentialThinkingServer) -> None: | |
""" | |
Test automatic adjustment of totalThoughts when thoughtNumber exceeds it. | |
Args: | |
thinking_server: The Sequential Thinking server fixture. | |
""" | |
thought = { | |
"thought": "Thought with number exceeding total", | |
"nextThoughtNeeded": True, | |
"thoughtNumber": 5, | |
"totalThoughts": 3, | |
} | |
result = thinking_server.process_thought(thought) | |
response_data = json.loads(result["content"][0]["text"]) | |
# Check that totalThoughts was adjusted | |
assert response_data["totalThoughts"] == 5 | |
assert thinking_server.thought_history[0]["totalThoughts"] == 5 | |
def test_empty_thought(self, thinking_server: SequentialThinkingServer) -> None: | |
""" | |
Test processing a thought with empty content. | |
Args: | |
thinking_server: The Sequential Thinking server fixture. | |
""" | |
# The current implementation rejects empty thoughts | |
thought = { | |
"thought": "", | |
"nextThoughtNeeded": True, | |
"thoughtNumber": 1, | |
"totalThoughts": 3, | |
} | |
with pytest.raises(ValueError, match="Invalid thought"): | |
thinking_server.validate_thought_data(thought) | |
def test_very_long_thought(self, thinking_server: SequentialThinkingServer) -> None: | |
""" | |
Test processing a very long thought. | |
Args: | |
thinking_server: The Sequential Thinking server fixture. | |
""" | |
long_text = "This is a very long thought. " * 100 | |
thought = { | |
"thought": long_text, | |
"nextThoughtNeeded": True, | |
"thoughtNumber": 1, | |
"totalThoughts": 3, | |
} | |
result = thinking_server.process_thought(thought) | |
# Check that the long thought was processed | |
assert len(thinking_server.thought_history) == 1 | |
assert thinking_server.thought_history[0]["thought"] == long_text | |
# Check the response format | |
assert "content" in result | |
response_data = json.loads(result["content"][0]["text"]) | |
assert response_data["thoughtNumber"] == thought["thoughtNumber"] | |
assert response_data["totalThoughts"] == thought["totalThoughts"] | |
class TestSequentialThinkingErrorHandling: | |
"""Tests for error handling in Sequential Thinking.""" | |
def test_missing_required_fields( | |
self, thinking_server: SequentialThinkingServer | |
) -> None: | |
""" | |
Test handling of missing required fields. | |
Args: | |
thinking_server: The Sequential Thinking server fixture. | |
""" | |
# Missing nextThoughtNeeded | |
invalid_thought = { | |
"thought": "Missing nextThoughtNeeded", | |
"thoughtNumber": 1, | |
"totalThoughts": 3, | |
} | |
with pytest.raises(ValueError, match="Invalid nextThoughtNeeded"): | |
thinking_server.validate_thought_data(invalid_thought) | |
def test_invalid_field_types( | |
self, thinking_server: SequentialThinkingServer | |
) -> None: | |
""" | |
Test handling of invalid field types. | |
Args: | |
thinking_server: The Sequential Thinking server fixture. | |
""" | |
# String instead of boolean for nextThoughtNeeded | |
invalid_thought = { | |
"thought": "Invalid nextThoughtNeeded type", | |
"nextThoughtNeeded": "true", # Should be boolean | |
"thoughtNumber": 1, | |
"totalThoughts": 3, | |
} | |
with pytest.raises(ValueError, match="Invalid nextThoughtNeeded"): | |
thinking_server.validate_thought_data(invalid_thought) | |
class TestSequentialThinkingToolFunction: | |
"""Tests for the Sequential Thinking tool function.""" | |
def test_tool_function_basic(self) -> None: | |
"""Test the basic functionality of the Sequential Thinking tool function.""" | |
from codegen_lab.sequentialthinking import sequentialthinking | |
result = sequentialthinking( | |
thought="Test thought", | |
nextThoughtNeeded=True, | |
thoughtNumber=1, | |
totalThoughts=3 | |
) | |
assert isinstance(result, dict) | |
assert "content" in result | |
assert isinstance(result["content"], list) | |
assert len(result["content"]) == 1 | |
assert result["content"][0]["type"] == "text" | |
def test_tool_function_with_context(self) -> None: | |
"""Test the Sequential Thinking tool function with context.""" | |
from codegen_lab.sequentialthinking import sequentialthinking | |
result = sequentialthinking( | |
thought="Test thought with context", | |
nextThoughtNeeded=True, | |
thoughtNumber=1, | |
totalThoughts=3, | |
isRevision=True, | |
revisesThought=1 | |
) | |
assert isinstance(result, dict) | |
assert "content" in result | |
def test_tool_function_with_optional_params(self) -> None: | |
"""Test the Sequential Thinking tool function with optional parameters.""" | |
from codegen_lab.sequentialthinking import sequentialthinking | |
result = sequentialthinking( | |
thought="Test thought with optional params", | |
nextThoughtNeeded=True, | |
thoughtNumber=1, | |
totalThoughts=3, | |
branchFromThought=1, | |
branchId="test-branch", | |
needsMoreThoughts=True | |
) | |
assert isinstance(result, dict) | |
assert "content" in result | |
if __name__ == "__main__": | |
pytest.main(["-xvs", __file__]) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment