Skip to content

Instantly share code, notes, and snippets.

@bguiz
Created March 10, 2025 09:46
Show Gist options
  • Save bguiz/d0d5d3b732f9395c1a6817700fd5a311 to your computer and use it in GitHub Desktop.
Save bguiz/d0d5d3b732f9395c1a6817700fd5a311 to your computer and use it in GitHub Desktop.
#!/usr/bin/env node
import { ToolNode } from '@langchain/langgraph/prebuilt';
import {
StateGraph,
MessagesAnnotation,
MemorySaver,
START,
END,
interrupt,
Command,
} from '@langchain/langgraph';
import { createInstance as llmCreateInstance } from './api/openrouter-openai.js';
import { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages';
import { tool } from '@langchain/core/tools';
import { z } from 'zod';
/**
* PLACEHOLDER TOOLS
*/
const cmdFooTool = tool(async function(inputs) {
console.log('===TOOL CMD_FOO===');
return inputs.name;
}, {
name: 'CMD_FOO',
description: 'Invoke when you want to do a Foo.',
schema: z.object({
name: z.string('Any string'),
}),
});
const cmdBarTool = tool(async function(inputs) {
console.log('===TOOL QRY_BAR===');
return inputs.name;
}, {
name: 'QRY_BAR',
description: 'Invoke when you want to query a Bar.',
schema: z.object({
name: z.string('Any string'),
}),
});
const tools = [cmdFooTool, cmdBarTool];
const llm = llmCreateInstance();
const llmWithTools = llm.bindTools(tools);
const toolsNode = new ToolNode(tools);
async function agentNode(state) {
console.log('===AGENT NODE===');
const response = await llmWithTools.invoke(state.messages);
console.log('=AGENT RESPONSE=',
'\ncontent:', response.content,
'\ntool_calls:', response.tool_calls.map((toolCall) => (toolCall.name)));
return { messages: [response] };
}
async function rejectNode(state) {
console.log('===REJECT NODE===');
const rejectionMessage = new AIMessage({
content: 'Tool calls were rejected by user',
tool_calls: [],
});
return {
messages: [
...state.messages,
rejectionMessage,
]
};
}
async function approveNode (state) {
console.log('===APPROVE NODE===');
const lastMsg = state.messages.at(-1);
const toolCall = lastMsg.tool_calls.at(-1);
const interruptMessage = `Please review the following tool invocation:
${toolCall.name} with inputs ${JSON.stringify(toolCall.args, undefined, 2)}
Do you approve (y/N)`;
console.log('=APPROVE INTERRUPT PRE=');
const interruptResponse = interrupt(interruptMessage);
console.log('=APPROVE INTERRUPT POST=');
const isApproved = (interruptResponse.trim().charAt(0).toLowerCase() === 'y');
const goto = (isApproved) ? 'tools' : 'reject';
console.log('=APPROVE RESULT=\n', { isApproved, goto });
return new Command({ goto });
}
function hasToolCalls(message) {
return message?.tool_calls?.length > 0;
}
async function agentRouter (state) {
const lastMsg = state.messages.at(-1);
if (hasToolCalls(lastMsg)) {
return 'approve';
}
return END;
}
const workflow = new StateGraph(MessagesAnnotation)
.addNode('agent', agentNode)
.addNode('tools', toolsNode)
.addNode('reject', rejectNode)
.addNode('approve', approveNode, {
ends: ['tools', 'reject'],
})
.addEdge(START, 'agent')
.addEdge('tools', 'agent')
.addEdge('reject', END)
.addConditionalEdges('agent', agentRouter, ['approve', END]);
const checkpointer = new MemorySaver();
const graph = workflow.compile({
checkpointer,
});
const graphConfig = {
configurable: { thread_id: '0x0004' },
};
/**
* SIMULATE A RUN
*/
let state;
let agentResult;
let inputText;
let invokeWith;
// step 1: prompt
inputText = 'Pls perform a Foo with name "ASDF".';
console.log('===HUMAN PROMPT (1)===\n', inputText);
invokeWith = { messages: [new HumanMessage(inputText)] };
agentResult = await graph.invoke(invokeWith, graphConfig);
state = await graph.getState(graphConfig);
console.log('===STATE NEXT (1)===\n', state.next);
console.log('=LAST MSG=\n', agentResult.messages.at(-1).content);
console.log('=LAST TOOL CALLS=\n', agentResult.messages.at(-1).tool_calls);
// step 2: interrupted in the 'approve' node, human in the loop authorises
inputText = 'yes'
console.log('===HUMAN INTERRUPT RESPONSE (2)===\n', inputText);
invokeWith = new Command({ resume: inputText });
agentResult = await graph.invoke(invokeWith, graphConfig);
state = await graph.getState(graphConfig);
console.log('===STATE NEXT (2)===\n', state.next);
console.log('=LAST MSG=\n', agentResult.messages.at(-1).content);
console.log('=LAST TOOL CALLS=\n', agentResult.messages.at(-1).tool_calls);
// step 3: prompt
inputText = 'Pls perform a Foo with name "ZXCV".';
console.log('===HUMAN PROMPT (3)===\n', inputText);
invokeWith = { messages: [new HumanMessage(inputText)] };
agentResult = await graph.invoke(invokeWith, graphConfig);
state = await graph.getState(graphConfig);
console.log('===STATE NEXT=== (3)\n', state.next);
console.log('=LAST MSG=\n', agentResult.messages.at(-1).content);
console.log('=LAST TOOL CALLS=\n', agentResult.messages.at(-1).tool_calls);
// step 4: interrupted in the 'approve' node, human in the loop does not authorise
inputText = 'no';
console.log('===HUMAN INTERRUPT RESPONSE (4)===\n', inputText);
invokeWith = new Command({ resume: inputText });
agentResult = await graph.invoke(invokeWith, graphConfig);
state = await graph.getState(graphConfig);
console.log('===STATE NEXT=== (4)\n', state.next);
console.log('=LAST MSG=\n', agentResult.messages.at(-1).content);
console.log('=LAST TOOL CALLS=\n', agentResult.messages.at(-1).tool_calls);
// step 5: prompt
inputText = 'Pls perform a Foo with name "GHJK".';
console.log('===HUMAN PROMPT=== (5)\n', inputText);
invokeWith = { messages: [new HumanMessage(inputText)] };
agentResult = await graph.invoke(invokeWith, graphConfig);
state = await graph.getState(graphConfig);
console.log('===STATE NEXT (6)===\n', state.next);
console.log('=LAST MSG=\n', agentResult.messages.at(-1).content);
console.log('=LAST TOOL CALLS=\n', agentResult.messages.at(-1).tool_calls);
#!/usr/bin/env node
import { ToolNode } from '@langchain/langgraph/prebuilt';
import {
StateGraph,
MessagesAnnotation,
MemorySaver,
START,
END,
interrupt,
Command,
} from '@langchain/langgraph';
import { createInstance as llmCreateInstance } from './api/openrouter-openai.js';
import { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages';
import { tool } from '@langchain/core/tools';
import { z } from 'zod';
/**
* PLACEHOLDER TOOLS
*/
const cmdFooTool = tool(async function(inputs) {
console.log('===TOOL CMD_FOO===');
return inputs.name;
}, {
name: 'CMD_FOO',
description: 'Invoke when you want to do a Foo.',
schema: z.object({
name: z.string('Any string'),
}),
});
const cmdBarTool = tool(async function(inputs) {
console.log('===TOOL QRY_BAR===');
return inputs.name;
}, {
name: 'QRY_BAR',
description: 'Invoke when you want to query a Bar.',
schema: z.object({
name: z.string('Any string'),
}),
});
const tools = [cmdFooTool, cmdBarTool];
const llm = llmCreateInstance();
const llmWithTools = llm.bindTools(tools);
const toolsNode = new ToolNode(tools);
async function agentNode(state) {
console.log('===AGENT NODE===');
const response = await llmWithTools.invoke(state.messages);
console.log('=AGENT RESPONSE=',
'\ncontent:', response.content,
'\ntool_calls:', response.tool_calls.map((toolCall) => (toolCall.name)));
return { messages: [response] };
}
async function approveNode (state) {
console.log('===APPROVE NODE===');
const lastMsg = state.messages.at(-1);
const toolCall = lastMsg.tool_calls.at(-1);
const interruptMessage = `Please review the following tool invocation:
${toolCall.name} with inputs ${JSON.stringify(toolCall.args, undefined, 2)}
Do you approve (y/N)`;
console.log('=APPROVE INTERRUPT PRE=');
const interruptResponse = interrupt(interruptMessage);
console.log('=APPROVE INTERRUPT POST=');
const isApproved = (interruptResponse.trim().charAt(0).toLowerCase() === 'y');
if (isApproved) {
return new Command({
goto: 'tools',
});
} else {
// Create a new message for rejection and clear tool calls,
// so that it does not get passed to the next graph node.
const rejectionMessage = new AIMessage({
content: 'Tool calls were rejected by user',
tool_calls: [],
});
return {
messages: [...state.messages, rejectionMessage],
goto: END,
};
}
}
function hasToolCalls(message) {
return message?.tool_calls?.length > 0;
}
async function agentRouter (state) {
const lastMsg = state.messages.at(-1);
if (hasToolCalls(lastMsg)) {
return 'approve';
}
return END;
}
const workflow = new StateGraph(MessagesAnnotation)
.addNode('agent', agentNode)
.addNode('tools', toolsNode)
.addNode('approve', approveNode, {
ends: ['tools', END],
})
.addEdge(START, 'agent')
.addEdge('tools', 'agent')
.addConditionalEdges('agent', agentRouter, ['approve', END]);
const checkpointer = new MemorySaver();
const graph = workflow.compile({
checkpointer,
});
const graphConfig = {
configurable: { thread_id: '0x0004' },
};
/**
* SIMULATE A RUN
*/
let state;
let agentResult;
let inputText;
let invokeWith;
// step 1: prompt
inputText = 'Pls perform a Foo with name "ASDF".';
console.log('===HUMAN PROMPT (1)===\n', inputText);
invokeWith = { messages: [new HumanMessage(inputText)] };
agentResult = await graph.invoke(invokeWith, graphConfig);
state = await graph.getState(graphConfig);
console.log('===STATE NEXT (1)===\n', state.next);
console.log('=LAST MSG=\n', agentResult.messages.at(-1).content);
console.log('=LAST TOOL CALLS=\n', agentResult.messages.at(-1).tool_calls);
// step 2: interrupted in the 'approve' node, human in the loop authorises
inputText = 'yes'
console.log('===HUMAN INTERRUPT RESPONSE (2)===\n', inputText);
invokeWith = new Command({ resume: inputText });
agentResult = await graph.invoke(invokeWith, graphConfig);
state = await graph.getState(graphConfig);
console.log('===STATE NEXT (2)===\n', state.next);
console.log('=LAST MSG=\n', agentResult.messages.at(-1).content);
console.log('=LAST TOOL CALLS=\n', agentResult.messages.at(-1).tool_calls);
// step 3: prompt
inputText = 'Pls perform a Foo with name "ZXCV".';
console.log('===HUMAN PROMPT (3)===\n', inputText);
invokeWith = { messages: [new HumanMessage(inputText)] };
agentResult = await graph.invoke(invokeWith, graphConfig);
state = await graph.getState(graphConfig);
console.log('===STATE NEXT=== (3)\n', state.next);
console.log('=LAST MSG=\n', agentResult.messages.at(-1).content);
console.log('=LAST TOOL CALLS=\n', agentResult.messages.at(-1).tool_calls);
// step 4: interrupted in the 'approve' node, human in the loop does not authorise
inputText = 'no';
console.log('===HUMAN INTERRUPT RESPONSE (4)===\n', inputText);
invokeWith = new Command({ resume: inputText });
agentResult = await graph.invoke(invokeWith, graphConfig);
state = await graph.getState(graphConfig);
console.log('===STATE NEXT=== (4)\n', state.next);
console.log('=LAST MSG=\n', agentResult.messages.at(-1).content);
console.log('=LAST TOOL CALLS=\n', agentResult.messages.at(-1).tool_calls);
// step 5: prompt
inputText = 'Pls perform a Foo with name "GHJK".';
console.log('===HUMAN PROMPT=== (5)\n', inputText);
invokeWith = { messages: [new HumanMessage(inputText)] };
agentResult = await graph.invoke(invokeWith, graphConfig);
state = await graph.getState(graphConfig);
console.log('===STATE NEXT (6)===\n', state.next);
console.log('=LAST MSG=\n', agentResult.messages.at(-1).content);
console.log('=LAST TOOL CALLS=\n', agentResult.messages.at(-1).tool_calls);
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment