feat: AI Workflow Builder agent (no-changelog) (#17423)

Co-authored-by: cubic-dev-ai[bot] <191113872+cubic-dev-ai[bot]@users.noreply.github.com>
This commit is contained in:
oleg
2025-07-21 11:18:26 +02:00
committed by GitHub
parent c0f1867429
commit 632b38119b
133 changed files with 18499 additions and 2867 deletions

View File

@@ -29,6 +29,7 @@ component_management:
- component_id: backend_packages
name: Backend
paths:
- packages/@n8n/ai-workflow-builder.ee/**
- packages/@n8n/api-types/**
- packages/@n8n/config/**
- packages/@n8n/client-oauth2/**

View File

@@ -0,0 +1,2 @@
evaluations/results/*
evaluations/nodes.json

View File

@@ -0,0 +1,16 @@
{
"fileExtensions": ["ts"],
"excludeRegExp": [
"node_modules",
"dist",
"\\.test\\.",
"\\.spec\\."
],
"tsConfig": "./tsconfig.json",
"layout": "neato",
"detectiveOptions": {
"ts": {
"skipTypeImports": false
}
}
}

View File

@@ -0,0 +1,62 @@
{
"name": "@n8n/ai-workflow-builder",
"version": "0.13.0",
"scripts": {
"clean": "rimraf dist .turbo",
"typecheck": "tsc --noEmit",
"build": "tsc -p ./tsconfig.build.json && tsc-alias -p tsconfig.build.json",
"format": "biome format --write src",
"format:check": "biome ci src",
"test": "jest",
"test:coverage": "jest --coverage",
"test:watch": "jest --watch",
"lint": "eslint . --quiet",
"lintfix": "eslint . --fix",
"watch": "tsc-watch -p tsconfig.build.json --onCompilationComplete \"tsc-alias -p tsconfig.build.json\"",
"deps:graph": "madge src/index.ts --image deps-graph.svg",
"deps:graph:service": "madge src/ai-workflow-builder-agent.service.ts --image deps-service.svg",
"deps:graph:tools": "madge src/tools/index.ts --image deps-tools.svg",
"deps:circular": "madge src/index.ts --circular",
"deps:report": "madge src/index.ts --json > deps-report.json && echo 'Dependency report saved to deps-report.json'",
"deps:orphans": "madge src/index.ts --orphans",
"deps:all": "pnpm run deps:graph && pnpm run deps:graph:service && pnpm run deps:graph:tools && pnpm run deps:circular && pnpm run deps:report",
"eval": "tsx evaluations/run-evaluation.ts",
"eval:generate": "GENERATE_TEST_CASES=true tsx evaluations/run-evaluation.ts"
},
"main": "dist/index.js",
"module": "src/index.ts",
"types": "dist/index.d.ts",
"files": [
"dist/**/*"
],
"exports": {
".": {
"require": "./dist/index.js",
"import": "./src/index.ts",
"types": "./dist/index.d.ts"
}
},
"dependencies": {
"@langchain/anthropic": "catalog:",
"@langchain/core": "catalog:",
"@langchain/langgraph": "0.2.74",
"@langchain/openai": "catalog:",
"@n8n/backend-common": "workspace:^",
"@n8n/config": "workspace:*",
"@n8n/di": "workspace:*",
"@n8n_io/ai-assistant-sdk": "catalog:",
"langsmith": "^0.3.45",
"n8n-workflow": "workspace:*",
"picocolors": "catalog:",
"zod": "catalog:"
},
"devDependencies": {
"@n8n/typescript-config": "workspace:*",
"@types/cli-progress": "^3.11.5",
"p-limit": "^3.1.0",
"cli-progress": "^3.12.0",
"cli-table3": "^0.6.3",
"jest-mock-extended": "^3.0.4",
"madge": "^8.0.0"
}
}

View File

@@ -0,0 +1,182 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { LangChainTracer } from '@langchain/core/tracers/tracer_langchain';
import { MemorySaver } from '@langchain/langgraph';
import { Logger } from '@n8n/backend-common';
import { Service } from '@n8n/di';
import { AiAssistantClient } from '@n8n_io/ai-assistant-sdk';
import { Client } from 'langsmith';
import { INodeTypes } from 'n8n-workflow';
import type { IUser, INodeTypeDescription } from 'n8n-workflow';
import { LLMServiceError } from './errors';
import { anthropicClaudeSonnet4, gpt41mini } from './llm-config';
import { WorkflowBuilderAgent, type ChatPayload } from './workflow-builder-agent';
@Service()
export class AiWorkflowBuilderService {
private parsedNodeTypes: INodeTypeDescription[] = [];
private llmSimpleTask: BaseChatModel | undefined;
private llmComplexTask: BaseChatModel | undefined;
private tracingClient: Client | undefined;
private checkpointer = new MemorySaver();
private agent: WorkflowBuilderAgent | undefined;
constructor(
private readonly nodeTypes: INodeTypes,
private readonly client?: AiAssistantClient,
private readonly logger?: Logger,
) {
this.parsedNodeTypes = this.getNodeTypes();
}
private async setupModels(user?: IUser) {
try {
if (this.llmSimpleTask && this.llmComplexTask) {
return;
}
// If client is provided, use it for API proxy
if (this.client && user) {
const authHeaders = await this.client.generateApiProxyCredentials(user);
// Extract baseUrl from client configuration
const baseUrl = this.client.getApiProxyBaseUrl();
this.llmSimpleTask = await gpt41mini({
baseUrl: baseUrl + '/openai',
// When using api-proxy the key will be populated automatically, we just need to pass a placeholder
apiKey: '-',
headers: {
Authorization: authHeaders.apiKey,
},
});
this.llmComplexTask = await anthropicClaudeSonnet4({
baseUrl: baseUrl + '/anthropic',
apiKey: '-',
headers: {
Authorization: authHeaders.apiKey,
'anthropic-beta': 'prompt-caching-2024-07-31',
},
});
this.tracingClient = new Client({
apiKey: '-',
apiUrl: baseUrl + '/langsmith',
autoBatchTracing: false,
traceBatchConcurrency: 1,
fetchOptions: {
headers: {
Authorization: authHeaders.apiKey,
},
},
});
return;
}
// If base URL is not set, use environment variables
this.llmSimpleTask = await gpt41mini({
apiKey: process.env.N8N_AI_OPENAI_API_KEY ?? '',
});
this.llmComplexTask = await anthropicClaudeSonnet4({
apiKey: process.env.N8N_AI_ANTHROPIC_KEY ?? '',
headers: {
'anthropic-beta': 'prompt-caching-2024-07-31',
},
});
} catch (error) {
const llmError = new LLMServiceError('Failed to setup LLM models', {
cause: error,
tags: {
hasClient: !!this.client,
hasUser: !!user,
},
});
throw llmError;
}
}
private getNodeTypes(): INodeTypeDescription[] {
// These types are ignored because they tend to cause issues when generating workflows
const ignoredTypes = [
'@n8n/n8n-nodes-langchain.toolVectorStore',
'@n8n/n8n-nodes-langchain.documentGithubLoader',
'@n8n/n8n-nodes-langchain.code',
];
const nodeTypesKeys = Object.keys(this.nodeTypes.getKnownTypes());
const nodeTypes = nodeTypesKeys
.filter((nodeType) => !ignoredTypes.includes(nodeType))
.map((nodeName) => {
try {
return { ...this.nodeTypes.getByNameAndVersion(nodeName).description, name: nodeName };
} catch (error) {
this.logger?.error('Error getting node type', {
nodeName,
error: error instanceof Error ? error.message : 'Unknown error',
});
return undefined;
}
})
.filter(
(nodeType): nodeType is INodeTypeDescription =>
nodeType !== undefined && nodeType.hidden !== true,
)
.map((nodeType, _index, nodeTypes: INodeTypeDescription[]) => {
// If the node type is a tool, we need to find the corresponding non-tool node type
// and merge the two node types to get the full node type description.
const isTool = nodeType.name.endsWith('Tool');
if (!isTool) return nodeType;
const nonToolNode = nodeTypes.find((nt) => nt.name === nodeType.name.replace('Tool', ''));
if (!nonToolNode) return nodeType;
return {
...nonToolNode,
...nodeType,
};
});
return nodeTypes;
}
private async getAgent(user?: IUser) {
if (!this.llmComplexTask || !this.llmSimpleTask) {
await this.setupModels(user);
}
if (!this.llmComplexTask || !this.llmSimpleTask) {
throw new LLMServiceError('Failed to initialize LLM models');
}
this.agent ??= new WorkflowBuilderAgent({
parsedNodeTypes: this.parsedNodeTypes,
// We use Sonnet both for simple and complex tasks
llmSimpleTask: this.llmComplexTask,
llmComplexTask: this.llmComplexTask,
logger: this.logger,
checkpointer: this.checkpointer,
tracer: this.tracingClient
? new LangChainTracer({ client: this.tracingClient, projectName: 'n8n-workflow-builder' })
: undefined,
});
return this.agent;
}
async *chat(payload: ChatPayload, user?: IUser) {
const agent = await this.getAgent(user);
for await (const output of agent.chat(payload, user?.id?.toString())) {
yield output;
}
}
async getSessions(workflowId: string | undefined, user?: IUser) {
const agent = await this.getAgent(user);
return await agent.getSessions(workflowId, user?.id?.toString());
}
}

View File

@@ -0,0 +1,64 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { BaseMessage } from '@langchain/core/messages';
import { AIMessage, HumanMessage } from '@langchain/core/messages';
import z from 'zod';
export async function conversationCompactChain(llm: BaseChatModel, messages: BaseMessage[]) {
// Use structured output for consistent summary format
const CompactedSession = z.object({
summary: z.string().describe('A concise summary of the conversation so far'),
key_decisions: z.array(z.string()).describe('List of key decisions and actions taken'),
current_state: z.string().describe('Description of the current workflow state'),
next_steps: z.string().describe('Suggested next steps based on the conversation'),
});
const modelWithStructure = llm.withStructuredOutput(CompactedSession);
// Format messages for summarization
const conversationText = messages
.map((msg) => {
if (msg instanceof HumanMessage) {
// eslint-disable-next-line @typescript-eslint/no-base-to-string, @typescript-eslint/restrict-template-expressions
return `User: ${msg.content}`;
} else if (msg instanceof AIMessage) {
// eslint-disable-next-line @typescript-eslint/no-base-to-string, @typescript-eslint/restrict-template-expressions
return `Assistant: ${msg.content ?? 'Used tools'}`;
}
return '';
})
.filter(Boolean)
.join('\n');
const compactPrompt = `Please summarize the following conversation between a user and an AI assistant building an n8n workflow:
${conversationText}
Provide a structured summary that captures the key points, decisions made, current state of the workflow, and suggested next steps.`;
const structuredOutput = await modelWithStructure.invoke(compactPrompt);
// Create a new compacted message
const compactedMessage = new AIMessage({
content: `## Previous Conversation Summary
**Summary:** ${structuredOutput.summary}
**Key Decisions:**
${(structuredOutput.key_decisions as string[]).map((d: string) => `- ${d}`).join('\n')}
**Current State:** ${structuredOutput.current_state}
**Next Steps:** ${structuredOutput.next_steps}`,
});
// Keep only the last message(request to compact from user) plus the summary
const lastUserMessage = messages.slice(-1);
const newMessages = [lastUserMessage[0], compactedMessage];
return {
success: true,
summary: structuredOutput,
newMessages,
messagesRemoved: messages.length - newMessages.length,
};
}

View File

@@ -0,0 +1,119 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { SystemMessage } from '@langchain/core/messages';
import { ChatPromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts';
import type { Logger } from 'n8n-workflow';
import { z } from 'zod';
import { LLMServiceError } from '../errors';
import type { ParameterUpdaterOptions } from '../types/config';
import { ParameterUpdatePromptBuilder } from './prompts/prompt-builder';
export const parametersSchema = z
.object({
parameters: z
.object({})
.passthrough()
.describe(
"The complete updated parameters object for the node. This should be a JSON object that matches the node's parameter structure. Include ALL existing parameters plus the requested changes.",
),
})
.describe(
'The complete updated parameters object for the node. Must include only parameters from <node_properties_definition>, for example For example: { "parameters": { "method": "POST", "url": "https://api.example.com", "sendHeaders": true, "headerParameters": { "parameters": [{ "name": "Content-Type", "value": "application/json" }] } } }}',
);
const nodeDefinitionPrompt = `
The node accepts these properties:
<node_properties_definition>
{node_definition}
</node_properties_definition>`;
const workflowContextPrompt = `
<current_workflow_json>
{workflow_json}
</current_workflow_json>
<current_simplified_execution_data>
{execution_data}
</current_simplified_execution_data>
<current_execution_nodes_schemas>
{execution_schema}
</current_execution_nodes_schemas>
<selected_node>
Name: {node_name}
Type: {node_type}
Current Parameters: {current_parameters}
</selected_node>
<requested_changes>
{changes}
</requested_changes>
Based on the requested changes and the node's property definitions, return the complete updated parameters object.`;
/**
* Creates a parameter updater chain with dynamic prompt building
*/
export const createParameterUpdaterChain = (
llm: BaseChatModel,
options: ParameterUpdaterOptions,
logger?: Logger,
) => {
if (typeof llm.withStructuredOutput !== 'function') {
throw new LLMServiceError("LLM doesn't support withStructuredOutput", {
llmModel: llm._llmType(),
});
}
// Build dynamic system prompt based on context
const systemPromptContent = ParameterUpdatePromptBuilder.buildSystemPrompt({
nodeType: options.nodeType,
nodeDefinition: options.nodeDefinition,
requestedChanges: options.requestedChanges,
hasResourceLocatorParams: ParameterUpdatePromptBuilder.hasResourceLocatorParameters(
options.nodeDefinition,
),
});
// Log token estimate for monitoring
const tokenEstimate = ParameterUpdatePromptBuilder.estimateTokens(systemPromptContent);
logger?.debug(`Parameter updater prompt size: ~${tokenEstimate} tokens`);
// Cache system prompt and node definition prompt
const systemPrompt = new SystemMessage({
content: [
{
type: 'text',
text: systemPromptContent,
cache_control: { type: 'ephemeral' },
},
],
});
const nodeDefinitionMessage = ChatPromptTemplate.fromMessages([
[
'human',
[
{
type: 'text',
text: nodeDefinitionPrompt,
cache_control: { type: 'ephemeral' },
},
],
],
]);
// Do not cache workflow context prompt as it is dynamic
const workflowContextMessage = HumanMessagePromptTemplate.fromTemplate(workflowContextPrompt);
const prompt = ChatPromptTemplate.fromMessages([
systemPrompt,
nodeDefinitionMessage,
workflowContextMessage,
]);
const llmWithStructuredOutput = llm.withStructuredOutput(parametersSchema);
const modelWithStructure = prompt.pipe(llmWithStructuredOutput);
return modelWithStructure;
};

View File

@@ -3,9 +3,10 @@ import type { AIMessageChunk } from '@langchain/core/messages';
import { SystemMessage } from '@langchain/core/messages';
import { ChatPromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts';
import { DynamicStructuredTool } from '@langchain/core/tools';
import { OperationalError } from 'n8n-workflow';
import { z } from 'zod';
import { LLMServiceError } from '../errors';
export const plannerPrompt = new SystemMessage(
`You are a Workflow Planner for n8n, a platform that helps users automate processes across different services and APIs.
@@ -85,7 +86,7 @@ const chatPrompt = ChatPromptTemplate.fromMessages([
export const plannerChain = (llm: BaseChatModel) => {
if (!llm.bindTools) {
throw new OperationalError("LLM doesn't support binding tools");
throw new LLMServiceError("LLM doesn't support binding tools", { llmModel: llm._llmType() });
}
return chatPrompt

View File

@@ -0,0 +1,9 @@
export const COMMON_PATTERNS = `
## Common Parameter Update Patterns
### HTTP Request Node Updates
- URL: Set directly or use expressions
- Method: GET, POST, PUT, DELETE, etc.
- Headers: Add/update in headerParameters.parameters array
- Body: Update bodyParameters.parameters for POST/PUT
- Authentication: Update authentication settings`;

View File

@@ -0,0 +1,24 @@
export const CORE_INSTRUCTIONS = `You are an expert n8n workflow architect who updates node parameters based on natural language instructions.
## Your Task
Update the parameters of an existing n8n node based on the requested changes. Return the COMPLETE parameters object with both modified and unmodified parameters. Only modify the parameters that are explicitly mentioned in the changes, preserving all other existing parameters exactly as they are.
## Reference Information
You will receive:
1. The original user workflow request
2. The current workflow JSON
3. The selected node's current configuration (id, name, type, parameters)
4. The node type's parameter definitions
5. Natural language changes to apply
## Parameter Update Guidelines
1. START WITH CURRENT: If current parameters is empty {}, start with an empty object and add the requested parameters
2. PRESERVE EXISTING VALUES: Only modify parameters mentioned in the requested changes
3. MAINTAIN STRUCTURE: Keep the exact parameter structure required by the node type
4. CHECK FOR RESOURCELOCATOR: If a parameter is type 'resourceLocator' in the node definition, it MUST use the ResourceLocator structure with __rl, mode, and value fields
5. USE PROPER EXPRESSIONS: Follow n8n expression syntax when referencing other nodes
6. VALIDATE TYPES: Ensure parameter values match their expected types
7. HANDLE NESTED PARAMETERS: Correctly update nested structures like headers, conditions, etc.
8. SIMPLE VALUES: For simple parameter updates like "Set X to Y", directly set the parameter without unnecessary nesting
9. GENERATE IDS: When adding new items to arrays (like assignments, headers, etc.), generate unique IDs using a simple pattern like "id-1", "id-2", etc.
10. TOOL NODE DETECTION: Check if node type ends with "Tool" to determine if $fromAI expressions are available`;

View File

@@ -0,0 +1,11 @@
export const EXPRESSION_RULES = `
## CRITICAL: Correctly Formatting n8n Expressions
When using expressions to reference data from other nodes:
- ALWAYS use the format: \`={{ $('Node Name').item.json.field }}\`
- NEVER omit the equals sign before the double curly braces
- ALWAYS use DOUBLE curly braces, never single
- NEVER use emojis or special characters inside expressions as they will break the expression
- INCORRECT: \`{ $('Node Name').item.json.field }\` (missing =, single braces)
- INCORRECT: \`{{ $('Node Name').item.json.field }}\` (missing =)
- INCORRECT: \`={{ $('👍 Node').item.json.field }}\` (contains emoji)
- CORRECT: \`={{ $('Previous Node').item.json.field }}\``;

View File

@@ -0,0 +1,3 @@
export const OUTPUT_FORMAT = `
## Output Format
Return ONLY the complete updated parameters object that matches the node's parameter structure. Include ALL parameters, both modified and unmodified.`;

View File

@@ -0,0 +1,75 @@
export const RESOURCE_LOCATOR_EXAMPLES = `
### ResourceLocator Examples
#### Example 1: Slack Node - Channel by ID
Current Parameters:
{
"select": "channel",
"channelId": {
"__rl": true,
"value": "",
"mode": "list"
},
"otherOptions": {}
}
Requested Changes: Send to channel C0122KQ70S7E
Expected Output:
{
"select": "channel",
"channelId": {
"__rl": true,
"mode": "id",
"value": "C0122KQ70S7E"
},
"otherOptions": {}
}
#### Example 2: Google Drive Node - File by URL
Current Parameters:
{
"operation": "download",
"fileId": {
"__rl": true,
"value": "",
"mode": "list"
}
}
Requested Changes: Use file https://drive.google.com/file/d/1ABC123XYZ/view
Expected Output:
{
"operation": "download",
"fileId": {
"__rl": true,
"mode": "url",
"value": "https://drive.google.com/file/d/1ABC123XYZ/view"
}
}
#### Example 3: Notion Node - Page ID from Expression
Current Parameters:
{
"resource": "databasePage",
"operation": "get",
"pageId": {
"__rl": true,
"value": "hardcoded-page-id",
"mode": "id"
}
}
Requested Changes: Use page ID from the previous node's output
Expected Output:
{
"resource": "databasePage",
"operation": "get",
"pageId": {
"__rl": true,
"mode": "id",
"value": "={{ $('Previous Node').item.json.pageId }}"
}
}`;

View File

@@ -0,0 +1,67 @@
export const TOOL_NODE_EXAMPLES = `
### Tool Node Examples
#### Example 1: Gmail Tool - Send Email with AI
Current Parameters: {}
Requested Changes: Let AI determine recipient, subject, and message
Expected Output:
{
"sendTo": "={{ $fromAI('to') }}",
"subject": "={{ $fromAI('subject') }}",
"message": "={{ $fromAI('message_html') }}",
"options": {}
}
#### Example 2: Google Calendar Tool - Filter by Date
Current Parameters:
{
"operation": "getAll",
"calendar": {
"__rl": true,
"value": "primary",
"mode": "list"
}
}
Requested Changes: Let AI determine date range for filtering
Expected Output:
{
"operation": "getAll",
"calendar": {
"__rl": true,
"value": "primary",
"mode": "list"
},
"timeMin": "={{ $fromAI('After', '', 'string') }}",
"timeMax": "={{ $fromAI('Before', '', 'string') }}"
}
#### Example 3: Slack Tool - Send Message
Current Parameters:
{
"resource": "message"
}
Requested Changes: Let AI determine channel and message content
Expected Output:
{
"resource": "message",
"channelId": "={{ $fromAI('channel') }}",
"messageText": "={{ $fromAI('message') }}"
}
#### Example 4: Tool Node with Mixed Content
Current Parameters:
{
"sendTo": "admin@company.com"
}
Requested Changes: Keep admin email but let AI add additional recipients and determine subject
Expected Output:
{
"sendTo": "=admin@company.com, {{ $fromAI('additional_recipients') }}",
"subject": "={{ $fromAI('subject') }} - Automated Report"
}`;

View File

@@ -0,0 +1,134 @@
export const IF_NODE_EXAMPLES = `
### IF Node Examples
#### Example 1: Simple String Condition
Current Parameters: {}
Requested Changes: Check if order status equals "pending"
Expected Output:
{
"conditions": {
"options": {
"caseSensitive": false,
"leftValue": "",
"typeValidation": "loose"
},
"conditions": [
{
"id": "id-1",
"leftValue": "={{ $('Previous Node').item.json.orderStatus }}",
"rightValue": "pending",
"operator": {
"type": "string",
"operation": "equals"
}
}
],
"combinator": "and"
}
}
#### Example 2: Check if Field Exists
Current Parameters: {}
Requested Changes: Check if email field exists in the data
Expected Output:
{
"conditions": {
"options": {
"caseSensitive": false,
"leftValue": "",
"typeValidation": "loose"
},
"conditions": [
{
"id": "id-1",
"leftValue": "={{ $('Previous Node').item.json.email }}",
"operator": {
"type": "string",
"operation": "exists"
}
}
],
"combinator": "and"
}
}
#### Example 3: Multiple Conditions with AND
Current Parameters: {}
Requested Changes: Check if status is active AND score is 50 or higher
Expected Output:
{
"conditions": {
"options": {
"caseSensitive": false,
"leftValue": "",
"typeValidation": "loose"
},
"conditions": [
{
"id": "id-1",
"leftValue": "={{ $('Set').item.json.status }}",
"rightValue": "active",
"operator": {
"type": "string",
"operation": "equals"
}
},
{
"id": "id-2",
"leftValue": "={{ $('Set').item.json.score }}",
"rightValue": "50",
"operator": {
"type": "number",
"operation": "gte"
}
}
],
"combinator": "and"
}
}
#### Example 3: IF Node - Complex Multi-Type Conditions
Current Parameters: {}
Requested Changes:
- Check if email is not empty AND verified is true AND permissions array contains "write"
Expected Output:
{
"conditions": {
"options": {
"caseSensitive": true,
"leftValue": "",
"typeValidation": "strict"
},
"conditions": [
{
"id": "id-1",
"leftValue": "={{ $('Set').item.json.email }}",
"operator": {
"type": "string",
"operation": "notEmpty"
}
},
{
"id": "id-2",
"leftValue": "={{ $('Set').item.json.verified }}",
"operator": {
"type": "boolean",
"operation": "true"
}
},
{
"id": "id-3",
"leftValue": "={{ $('Set').item.json.permissions }}",
"rightValue": "write",
"operator": {
"type": "array",
"operation": "contains"
}
}
],
"combinator": "and"
}
}
`;

View File

@@ -0,0 +1,144 @@
export const SET_NODE_EXAMPLES = `
### Set Node Examples
#### Example 1: Simple String Assignment
Current Parameters: {}
Requested Changes: Set message to "Hello World"
Expected Output:
{
"assignments": {
"assignments": [
{
"id": "id-1",
"name": "message",
"value": "Hello World",
"type": "string"
}
]
},
"options": {}
}
#### Example 2: Multiple Type Assignments
Current Parameters: {}
Requested Changes:
- Set productName to "Widget"
- Set price to 19.99
- Set inStock to true
- Set categories to electronics and gadgets
Expected Output:
{
"assignments": {
"assignments": [
{
"id": "id-1",
"name": "productName",
"value": "Widget",
"type": "string"
},
{
"id": "id-2",
"name": "price",
"value": 19.99,
"type": "number"
},
{
"id": "id-3",
"name": "inStock",
"value": true,
"type": "boolean"
},
{
"id": "id-4",
"name": "categories",
"value": "[\\"electronics\\", \\"gadgets\\"]",
"type": "array"
}
]
},
"options": {}
}
#### Example 3: Expression-Based Assignments
Current Parameters: {}
Requested Changes:
- Set userId from HTTP Request node
- Calculate totalPrice from quantity and unit price
Expected Output:
{
"assignments": {
"assignments": [
{
"id": "id-1",
"name": "userId",
"value": "={{ $('HTTP Request').item.json.id }}",
"type": "string"
},
{
"id": "id-2",
"name": "totalPrice",
"value": "={{ $('Set').item.json.quantity * $('Set').item.json.unitPrice }}",
"type": "number"
}
]
},
"options": {}
}
#### Example 4: Set Node - Complex Object and Array Creation
Current Parameters:
{
"assignments": {
"assignments": [
{
"id": "existing-1",
"name": "orderId",
"value": "12345",
"type": "string"
}
]
},
"options": {}
}
Requested Changes:
- Keep orderId
- Add customer object with name and email from previous nodes
- Add items array from JSON string
- Set processed timestamp
Expected Output:
{
"assignments": {
"assignments": [
{
"id": "existing-1",
"name": "orderId",
"value": "12345",
"type": "string"
},
{
"id": "id-2",
"name": "customer",
"value": "={{ JSON.stringify({ \\"name\\": $('Form').item.json.customerName, \\"email\\": $('Form').item.json.customerEmail }) }}",
"type": "object"
},
{
"id": "id-3",
"name": "items",
"value": "={{ $('HTTP Request').item.json.itemsJson }}",
"type": "array"
},
{
"id": "id-4",
"name": "processedAt",
"value": "={{ $now.toISO() }}",
"type": "string"
}
]
},
"options": {}
}
`;

View File

@@ -0,0 +1,47 @@
export const SIMPLE_UPDATE_EXAMPLES = `
## Examples of Parameter Updates
### Example 1: Update HTTP Request URL
Change: "Set the URL to call the weather API for London"
Current parameters: { "url": "https://api.example.com", "method": "GET" }
Updated parameters: { "url": "https://api.openweathermap.org/data/2.5/weather?q=London", "method": "GET" }
### Example 2: Add a header
Change: "Add an API key header with value from credentials"
Current parameters: { "url": "...", "sendHeaders": false }
Updated parameters: {
"url": "...",
"sendHeaders": true,
"headerParameters": {
"parameters": [
{
"name": "X-API-Key",
"value": "={{ $credentials.apiKey }}"
}
]
}
}
### Example 3: Update condition
Change: "Check if temperature is above 25 degrees"
Current parameters: { "conditions": { "conditions": [] } }
Updated parameters: {
"conditions": {
"options": {
"caseSensitive": false,
"leftValue": "",
"typeValidation": "loose"
},
"conditions": [
{
"leftValue": "={{ $('Weather Node').item.json.main.temp }}",
"rightValue": 25,
"operator": {
"type": "number",
"operation": "gt"
}
}
],
"combinator": "and"
}
}`;

View File

@@ -0,0 +1,109 @@
export const HTTP_REQUEST_GUIDE = `
### HTTP Request Node Updates
#### Common Parameters
- **url**: The endpoint URL (can use expressions)
- **method**: GET, POST, PUT, DELETE, PATCH, HEAD, OPTIONS
- **authentication**: Type of auth (none, genericCredentialType, etc.)
- **sendHeaders**: Boolean to enable custom headers
- **headerParameters**: Array of header key-value pairs
- **sendBody**: Boolean to enable request body (for POST/PUT/PATCH)
- **bodyParameters**: Array of body parameters or raw body content
- **contentType**: json, form, raw, etc.
- **options**: Additional options like timeout, proxy, etc.
#### Header Structure
{
"sendHeaders": true,
"headerParameters": {
"parameters": [
{
"name": "Header-Name",
"value": "Header Value or {{ expression }}"
}
]
}
}
#### Body Structure (JSON)
{
"sendBody": true,
"contentType": "json",
"bodyParameters": {
"parameters": [
{
"name": "fieldName",
"value": "fieldValue or {{ expression }}"
}
]
}
}
#### Authentication Options
- **none**: No authentication
- **genericCredentialType**: Use stored credentials
- **predefinedCredentialType**: Use specific credential type
- Can also set custom auth headers
#### Common Patterns
1. **Adding API Key Header**:
- Enable sendHeaders
- Add header with name "X-API-Key" or "Authorization"
2. **Setting Request Body**:
- Enable sendBody
- Set contentType (usually "json")
- Add parameters to bodyParameters.parameters array
3. **Dynamic URLs**:
- Can use expressions: "=https://api.example.com/{{ $('Set').item.json.endpoint }}"
- Can reference previous node data
4. **Query Parameters**:
- Can be part of URL or set in options.queryParameters
#### Example: HTTP Request with Headers and Body
Current Parameters:
{
"method": "GET",
"url": "https://api.example.com/data"
}
Requested Changes:
- Change to POST method
- Add API key header
- Add JSON body with user ID and status
Expected Output:
{
"method": "POST",
"url": "https://api.example.com/data",
"sendHeaders": true,
"headerParameters": {
"parameters": [
{
"name": "X-API-Key",
"value": "={{ $credentials.apiKey }}"
},
{
"name": "Content-Type",
"value": "application/json"
}
]
},
"sendBody": true,
"contentType": "json",
"bodyParameters": {
"parameters": [
{
"name": "userId",
"value": "={{ $('Previous Node').item.json.id }}"
},
{
"name": "status",
"value": "active"
}
]
},
"options": {}
}`;

View File

@@ -0,0 +1,154 @@
export const IF_NODE_GUIDE = `
### IF Node Updates - Comprehensive Guide
The IF node uses a complex filter structure for conditional logic. Understanding the correct operator format is crucial.
#### IF Node Structure
\`\`\`json
{
"conditions": {
"options": {
"caseSensitive": false, // For string comparisons
"leftValue": "", // Optional default left value
"typeValidation": "loose" // "strict" or "loose"
},
"conditions": [
{
"id": "unique-id", // Optional, auto-generated
"leftValue": "={{ $('Node').item.json.field }}",
"rightValue": "value", // Can be expression or literal
"operator": {
"type": "string|number|boolean|dateTime|array|object",
"operation": "specific-operation"
}
}
],
"combinator": "and" // "and" or "or"
}
}
\`\`\`
#### Complete Operator Reference
##### String Operators
- **exists**: Check if value exists (singleValue: true, no rightValue needed)
\`{ "type": "string", "operation": "exists" }\`
- **notExists**: Check if value doesn't exist (singleValue: true)
\`{ "type": "string", "operation": "notExists" }\`
- **empty**: Check if string is empty (singleValue: true)
\`{ "type": "string", "operation": "empty" }\`
- **notEmpty**: Check if string is not empty (singleValue: true)
\`{ "type": "string", "operation": "notEmpty" }\`
- **equals**: Exact match
\`{ "type": "string", "operation": "equals" }\`
- **notEquals**: Not equal
\`{ "type": "string", "operation": "notEquals" }\`
- **contains**: Contains substring
\`{ "type": "string", "operation": "contains" }\`
- **notContains**: Doesn't contain substring
\`{ "type": "string", "operation": "notContains" }\`
- **startsWith**: Starts with string
\`{ "type": "string", "operation": "startsWith" }\`
- **notStartsWith**: Doesn't start with
\`{ "type": "string", "operation": "notStartsWith" }\`
- **endsWith**: Ends with string
\`{ "type": "string", "operation": "endsWith" }\`
- **notEndsWith**: Doesn't end with
\`{ "type": "string", "operation": "notEndsWith" }\`
- **regex**: Matches regex pattern
\`{ "type": "string", "operation": "regex" }\`
- **notRegex**: Doesn't match regex
\`{ "type": "string", "operation": "notRegex" }\`
##### Number Operators
- **exists**: Check if number exists (singleValue: true)
\`{ "type": "number", "operation": "exists" }\`
- **notExists**: Check if number doesn't exist (singleValue: true)
\`{ "type": "number", "operation": "notExists" }\`
- **equals**: Equal to
\`{ "type": "number", "operation": "equals" }\`
- **notEquals**: Not equal to
\`{ "type": "number", "operation": "notEquals" }\`
- **gt**: Greater than
\`{ "type": "number", "operation": "gt" }\`
- **lt**: Less than
\`{ "type": "number", "operation": "lt" }\`
- **gte**: Greater than or equal
\`{ "type": "number", "operation": "gte" }\`
- **lte**: Less than or equal
\`{ "type": "number", "operation": "lte" }\`
##### DateTime Operators
- **exists**: Check if date exists (singleValue: true)
\`{ "type": "dateTime", "operation": "exists" }\`
- **notExists**: Check if date doesn't exist (singleValue: true)
\`{ "type": "dateTime", "operation": "notExists" }\`
- **equals**: Same date/time
\`{ "type": "dateTime", "operation": "equals" }\`
- **notEquals**: Different date/time
\`{ "type": "dateTime", "operation": "notEquals" }\`
- **after**: After date
\`{ "type": "dateTime", "operation": "after" }\`
- **before**: Before date
\`{ "type": "dateTime", "operation": "before" }\`
- **afterOrEquals**: After or same date
\`{ "type": "dateTime", "operation": "afterOrEquals" }\`
- **beforeOrEquals**: Before or same date
\`{ "type": "dateTime", "operation": "beforeOrEquals" }\`
##### Boolean Operators
- **exists**: Check if boolean exists (singleValue: true)
\`{ "type": "boolean", "operation": "exists" }\`
- **notExists**: Check if boolean doesn't exist (singleValue: true)
\`{ "type": "boolean", "operation": "notExists" }\`
- **true**: Is true (singleValue: true)
\`{ "type": "boolean", "operation": "true" }\`
- **false**: Is false (singleValue: true)
\`{ "type": "boolean", "operation": "false" }\`
- **equals**: Equal to boolean value
\`{ "type": "boolean", "operation": "equals" }\`
- **notEquals**: Not equal to boolean value
\`{ "type": "boolean", "operation": "notEquals" }\`
##### Array Operators
- **exists**: Check if array exists (singleValue: true)
\`{ "type": "array", "operation": "exists" }\`
- **notExists**: Check if array doesn't exist (singleValue: true)
\`{ "type": "array", "operation": "notExists" }\`
- **empty**: Array is empty (singleValue: true)
\`{ "type": "array", "operation": "empty" }\`
- **notEmpty**: Array is not empty (singleValue: true)
\`{ "type": "array", "operation": "notEmpty" }\`
- **contains**: Array contains value
\`{ "type": "array", "operation": "contains" }\`
- **notContains**: Array doesn't contain value
\`{ "type": "array", "operation": "notContains" }\`
- **lengthEquals**: Array length equals
\`{ "type": "array", "operation": "lengthEquals" }\`
- **lengthNotEquals**: Array length not equals
\`{ "type": "array", "operation": "lengthNotEquals" }\`
- **lengthGt**: Array length greater than
\`{ "type": "array", "operation": "lengthGt" }\`
- **lengthLt**: Array length less than
\`{ "type": "array", "operation": "lengthLt" }\`
- **lengthGte**: Array length greater or equal
\`{ "type": "array", "operation": "lengthGte" }\`
- **lengthLte**: Array length less or equal
\`{ "type": "array", "operation": "lengthLte" }\`
##### Object Operators
- **exists**: Check if object exists (singleValue: true)
\`{ "type": "object", "operation": "exists" }\`
- **notExists**: Check if object doesn't exist (singleValue: true)
\`{ "type": "object", "operation": "notExists" }\`
- **empty**: Object is empty (singleValue: true)
\`{ "type": "object", "operation": "empty" }\`
- **notEmpty**: Object is not empty (singleValue: true)
\`{ "type": "object", "operation": "notEmpty" }\`
#### Important Notes:
1. **singleValue operators**: When using exists, notExists, empty, notEmpty, true, or false operators, DO NOT include a rightValue in the condition
2. **Expression values**: Both leftValue and rightValue can be expressions using \`={{ ... }}\` syntax
3. **Type matching**: The operator type must match the data type you're comparing
4. **Case sensitivity**: Only applies to string comparisons when caseSensitive is true in options
5. **Type validation**: "loose" allows type coercion, "strict" requires exact type matches`;

View File

@@ -0,0 +1,87 @@
export const SET_NODE_GUIDE = `
### Set Node Updates - Comprehensive Type Handling Guide
The Set node uses assignments to create or modify data fields. Each assignment has a specific type that determines how the value is formatted and processed.
#### Assignment Structure
\`\`\`json
{
"id": "unique-id",
"name": "field_name",
"value": "field_value", // Format depends on type
"type": "string|number|boolean|array|object"
}
\`\`\`
**CRITICAL**: ALWAYS use "value" field for ALL types. NEVER use type-specific fields like "stringValue", "numberValue", "booleanValue", etc. The field is ALWAYS named "value" regardless of the type.
#### Type-Specific Value Formatting
##### String Type
- **Format**: Direct string value or expression
- **Examples**:
- Literal: \`"Hello World"\`
- Expression: \`"={{ $('Previous Node').item.json.message }}"\`
- With embedded expressions: \`"=Order #{{ $('Set').item.json.orderId }} processed"\`
- **Use when**: Text data, IDs, names, messages, dates as strings
##### Number Type
- **Format**: Direct numeric value (NOT as a string)
- **Examples**:
- Integer: \`123\`
- Decimal: \`45.67\`
- Negative: \`-100\`
- Expression: \`"={{ $('HTTP Request').item.json.count }}"\`
- **CRITICAL**: Use actual numbers, not strings: \`123\` not \`"123"\`
- **Use when**: Quantities, prices, scores, numeric calculations
##### Boolean Type
- **Format**: Direct boolean value (NOT as a string)
- **Examples**:
- True: \`true\`
- False: \`false\`
- Expression: \`"={{ $('IF').item.json.isActive }}"\`
- **CRITICAL**: Use actual booleans, not strings: \`true\` not \`"true"\`
- **CRITICAL**: The field name is "value", NOT "booleanValue"
- **Use when**: Flags, toggles, yes/no values, active/inactive states
##### Array Type
- **Format**: JSON stringified array
- **Examples**:
- Simple array: \`"[1, 2, 3]"\`
- String array: \`"[\\"apple\\", \\"banana\\", \\"orange\\"]"\`
- Mixed array: \`"[\\"item1\\", 123, true]"\`
- Expression: \`"={{ JSON.stringify($('Previous Node').item.json.items) }}"\`
- **CRITICAL**: Arrays must be JSON stringified
- **Use when**: Lists, collections, multiple values
##### Object Type
- **Format**: JSON stringified object
- **Examples**:
- Simple object: \`"{ \\"name\\": \\"John\\", \\"age\\": 30 }"\`
- Nested object: \`"{ \\"user\\": { \\"id\\": 123, \\"role\\": \\"admin\\" } }"\`
- Expression: \`"={{ JSON.stringify($('Set').item.json.userData) }}"\`
- **CRITICAL**: Objects must be JSON stringified with escaped quotes
- **Use when**: Complex data structures, grouped properties
#### Important Type Selection Rules
1. **Analyze the requested data type**:
- "Set count to 5" → number type with value: \`5\`
- "Set message to hello" → string type with value: \`"hello"\`
- "Set active to true" → boolean type with value: \`true\`
- "Set tags to apple, banana" → array type with value: \`"[\\"apple\\", \\"banana\\"]"\`
2. **Expression handling**:
- All types can use expressions with \`"={{ ... }}"\`
- For arrays/objects from expressions, use \`JSON.stringify()\`
3. **Common mistakes to avoid**:
- WRONG: Setting number as string: \`{ "value": "123", "type": "number" }\`
- CORRECT: \`{ "value": 123, "type": "number" }\`
- WRONG: Setting boolean as string: \`{ "value": "false", "type": "boolean" }\`
- CORRECT: \`{ "value": false, "type": "boolean" }\`
- WRONG: Using type-specific field names: \`{ "booleanValue": true, "type": "boolean" }\`
- CORRECT: \`{ "value": true, "type": "boolean" }\`
- WRONG: Setting array without stringification: \`{ "value": [1,2,3], "type": "array" }\`
- CORRECT: \`{ "value": "[1,2,3]", "type": "array" }\``;

View File

@@ -0,0 +1,69 @@
export const TOOL_NODES_GUIDE = `
## CRITICAL: $fromAI Expression Support for Tool Nodes
Tool nodes (nodes ending with "Tool" like Gmail Tool, Google Calendar Tool, etc.) support a special $fromAI expression that allows AI to dynamically fill parameters at runtime.
### When to Use $fromAI
- ONLY available in tool nodes (node types ending with "Tool")
- Use when the AI should determine the value based on context
- Ideal for parameters that vary based on user input or conversation context
### $fromAI Syntax
\`={{ $fromAI('key', 'description', 'type', defaultValue) }}\`
### Parameters
- key: Unique identifier (1-64 chars, alphanumeric/underscore/hyphen)
- description: Optional description for the AI (use empty string '' if not needed)
- type: 'string' | 'number' | 'boolean' | 'json' (defaults to 'string')
- defaultValue: Optional fallback value
### Tool Node Examples
#### Gmail Tool - Sending Email
{
"sendTo": "={{ $fromAI('to') }}",
"subject": "={{ $fromAI('subject') }}",
"message": "={{ $fromAI('message_html') }}"
}
#### Google Calendar Tool - Filtering Events
{
"timeMin": "={{ $fromAI('After', '', 'string') }}",
"timeMax": "={{ $fromAI('Before', '', 'string') }}"
}
### Mixed Usage Examples
You can combine $fromAI with regular text:
- "Subject: {{ $fromAI('subject') }} - Automated"
- "Dear {{ $fromAI('recipientName', 'Customer name', 'string', 'Customer') }},"
### Important Rules
1. ONLY use $fromAI in tool nodes (check if node type ends with "Tool")
2. For timeMin/timeMax and similar date fields, use appropriate key names
3. The AI will fill these values based on context during execution
4. Don't use $fromAI in regular nodes like Set, IF, HTTP Request, etc.
## Tool Node Parameter Guidelines
### Identifying Tool Nodes
1. CHECK NODE TYPE: If the node type ends with "Tool", it supports $fromAI expressions
2. COMMON TOOL NODES:
- Gmail Tool (gmailTool): to, subject, message → use $fromAI
- Google Calendar Tool (googleCalendarTool): timeMin, timeMax → use $fromAI
- Slack Tool (slackTool): channel, message → use $fromAI
- Microsoft Teams Tool: channel, message → use $fromAI
- Telegram Tool: chatId, text → use $fromAI
- Other communication/document tools: content fields → use $fromAI
### When to Use $fromAI in Tool Nodes
1. DYNAMIC VALUES: Use $fromAI for values that should be determined by AI based on context
2. USER INPUT FIELDS: Recipients, subjects, messages, date ranges
3. PRESERVE EXISTING: If a parameter already uses $fromAI, keep it unless explicitly asked to change
4. DATE/TIME FIELDS: Use descriptive key names for clarity
### Tool Node Parameter Patterns
- Email recipients: "={{ $fromAI('to') }}"
- Email subjects: "={{ $fromAI('subject') }}"
- Message content: "={{ $fromAI('message_html') }}" or "={{ $fromAI('message') }}"
- Date ranges: "={{ $fromAI('After', '', 'string') }}"
- Channel IDs: "={{ $fromAI('channel') }}"`;

View File

@@ -0,0 +1,91 @@
export const RESOURCE_LOCATOR_GUIDE = `
## IMPORTANT: ResourceLocator Parameter Handling
ResourceLocator parameters are special fields used for selecting resources like Slack channels, Google Drive files, Notion pages, etc. They MUST have a specific structure:
### Required ResourceLocator Structure:
\`\`\`json
{
"__rl": true,
"mode": "id" | "url" | "list" | "name",
"value": "the-actual-value"
}
\`\`\`
### Mode Detection Guidelines:
- Use mode "url" when the value is a URL (starts with http:// or https://)
- Use mode "id" when the value looks like an ID (alphanumeric string)
- Use mode "name" when the value has a prefix like # (Slack channels) or @ (users)
- Use mode "list" when referencing a dropdown selection (rarely needed in updates)
### ResourceLocator Examples:
#### Example 1: Slack Channel by ID
Parameter name: channelId
Change: "Set channel to C0122KQ70S7E"
Output:
\`\`\`json
{
"channelId": {
"__rl": true,
"mode": "id",
"value": "C0122KQ70S7E"
}
}
\`\`\`
#### Example 2: Google Drive File by URL
Parameter name: fileId
Change: "Use file https://drive.google.com/file/d/1Nvdl7bEfDW33cKQuwfItPhIk479--WYY/view"
Output:
\`\`\`json
{
"fileId": {
"__rl": true,
"mode": "url",
"value": "https://drive.google.com/file/d/1Nvdl7bEfDW33cKQuwfItPhIk479--WYY/view"
}
}
\`\`\`
#### Example 3: Notion Page by ID
Parameter name: pageId
Change: "Set page ID to 123e4567-e89b-12d3"
Output:
\`\`\`json
{
"pageId": {
"__rl": true,
"mode": "id",
"value": "123e4567-e89b-12d3"
}
}
\`\`\`
#### Example 4: Slack Channel by Name
Parameter name: channelId
Change: "Send to #general channel"
Output:
\`\`\`json
{
"channelId": {
"__rl": true,
"mode": "name",
"value": "#general"
}
}
\`\`\`
#### Example 5: Using Expression with ResourceLocator
Parameter name: channelId
Change: "Use channel ID from previous node"
Output:
\`\`\`json
{
"channelId": {
"__rl": true,
"mode": "id",
"value": "={{ $('Previous Node').item.json.channelId }}"
}
}
\`\`\``;

View File

@@ -0,0 +1,18 @@
export const TEXT_FIELDS_GUIDE = `
## Text Field Expression Formatting
### PREFERRED METHOD: Embedding expressions directly within text
\`\`\`
"text": "=ALERT: It is currently {{ $('Weather Node').item.json.weather }} in {{ $('Weather Node').item.json.city }}!"
\`\`\`
### Alternative method: Using string concatenation (use only when needed)
\`\`\`
"text": "={{ 'ALERT: It is currently ' + $('Weather Node').item.json.weather + ' in ' + $('Weather Node').item.json.city + '!' }}"
\`\`\`
### Key Points:
- Use the embedded expression format when mixing static text with dynamic values
- The entire string must start with = when using expressions
- Expressions within text use single curly braces {{ }}
- The outer expression wrapper uses double curly braces ={{ }}`;

View File

@@ -0,0 +1,184 @@
import type { INodeTypeDescription, INodeProperties } from 'n8n-workflow';
import { COMMON_PATTERNS } from './base/common-patterns';
import { CORE_INSTRUCTIONS } from './base/core-instructions';
import { EXPRESSION_RULES } from './base/expression-rules';
import { OUTPUT_FORMAT } from './base/output-format';
import { RESOURCE_LOCATOR_EXAMPLES } from './examples/advanced/resource-locator-examples';
import { TOOL_NODE_EXAMPLES } from './examples/advanced/tool-node-examples';
import { IF_NODE_EXAMPLES } from './examples/basic/if-node-examples';
import { SET_NODE_EXAMPLES } from './examples/basic/set-node-examples';
import { SIMPLE_UPDATE_EXAMPLES } from './examples/basic/simple-updates';
import { HTTP_REQUEST_GUIDE } from './node-types/http-request';
import { IF_NODE_GUIDE } from './node-types/if-node';
import { SET_NODE_GUIDE } from './node-types/set-node';
import { TOOL_NODES_GUIDE } from './node-types/tool-nodes';
import { RESOURCE_LOCATOR_GUIDE } from './parameter-types/resource-locator';
import { TEXT_FIELDS_GUIDE } from './parameter-types/text-fields';
import {
DEFAULT_PROMPT_CONFIG,
getNodeTypeCategory,
mentionsResourceKeywords,
} from './prompt-config';
import type { PromptBuilderContext } from '../../types/config';
export class ParameterUpdatePromptBuilder {
/**
* Builds a dynamic system prompt based on the context
*/
static buildSystemPrompt(context: PromptBuilderContext): string {
const options = context.options ?? {};
const sections: string[] = [];
// Always include base sections
sections.push(CORE_INSTRUCTIONS);
sections.push(EXPRESSION_RULES);
// Add node-type specific guides
if (this.isSetNode(context.nodeType)) {
sections.push(SET_NODE_GUIDE);
} else if (this.isIfNode(context.nodeType)) {
sections.push(IF_NODE_GUIDE);
} else if (this.isHttpRequestNode(context.nodeType)) {
sections.push(HTTP_REQUEST_GUIDE);
}
// Add tool node guide if applicable
if (this.isToolNode(context.nodeType)) {
sections.push(TOOL_NODES_GUIDE);
}
// Add resource locator guide if needed
if (context.hasResourceLocatorParams || this.needsResourceLocatorGuide(context)) {
sections.push(RESOURCE_LOCATOR_GUIDE);
}
// Add text field guide if dealing with text parameters
if (this.hasTextFields(context.nodeDefinition)) {
sections.push(TEXT_FIELDS_GUIDE);
}
// Add common patterns
sections.push(COMMON_PATTERNS);
// Add relevant examples if enabled
if (options.includeExamples !== false) {
const examples = this.selectRelevantExamples(context);
if (examples.length > 0) {
sections.push('\n## Relevant Examples');
sections.push(...examples);
}
}
// Always include output format at the end
sections.push(OUTPUT_FORMAT);
const finalPrompt = sections.join('\n');
return finalPrompt;
}
/**
* Checks if node is a Set node
*/
private static isSetNode(nodeType: string): boolean {
const category = getNodeTypeCategory(nodeType);
return category === 'set';
}
/**
* Checks if node is an IF node
*/
private static isIfNode(nodeType: string): boolean {
const category = getNodeTypeCategory(nodeType);
return category === 'if';
}
/**
* Checks if node is an HTTP Request node
*/
private static isHttpRequestNode(nodeType: string): boolean {
const category = getNodeTypeCategory(nodeType);
return category === 'httpRequest';
}
/**
* Checks if node is a tool node (supports $fromAI)
*/
private static isToolNode(nodeType: string): boolean {
const category = getNodeTypeCategory(nodeType);
return category === 'tool';
}
/**
* Checks if any parameters are of type resourceLocator
*/
private static needsResourceLocatorGuide(context: PromptBuilderContext): boolean {
return mentionsResourceKeywords(context.requestedChanges, context.config);
}
/**
* Checks if node has text/string fields that might use expressions
*/
private static hasTextFields(nodeDefinition: INodeTypeDescription): boolean {
if (!nodeDefinition.properties) return false;
return nodeDefinition.properties.some(
(prop) => prop.type === 'string' && prop.typeOptions?.multipleValues !== true,
);
}
/**
* Selects most relevant examples based on context
*/
private static selectRelevantExamples(context: PromptBuilderContext): string[] {
const examples: string[] = [];
const config = context.config ?? DEFAULT_PROMPT_CONFIG;
const maxExamples = context.options?.maxExamples ?? config.maxExamples;
// Priority order for example selection
if (this.isToolNode(context.nodeType)) {
examples.push(TOOL_NODE_EXAMPLES);
} else if (this.isSetNode(context.nodeType)) {
examples.push(SET_NODE_EXAMPLES);
} else if (this.isIfNode(context.nodeType)) {
examples.push(IF_NODE_EXAMPLES);
}
// Add resource locator examples if needed
if (context.hasResourceLocatorParams) {
examples.push(RESOURCE_LOCATOR_EXAMPLES);
}
// Add simple examples if we have room
if (examples.length === 0) {
examples.push(SIMPLE_UPDATE_EXAMPLES);
}
// Limit to max examples
return examples.slice(0, maxExamples);
}
/**
* Analyzes node definition to determine if it has resource locator parameters
*/
static hasResourceLocatorParameters(nodeDefinition: INodeTypeDescription): boolean {
if (!nodeDefinition.properties) return false;
const checkProperties = (properties: INodeProperties[]): boolean => {
for (const prop of properties) {
if (prop.type === 'resourceLocator' || prop.type === 'fixedCollection') return true;
}
return false;
};
return checkProperties(nodeDefinition.properties);
}
/**
* Get token estimate for the built prompt
*/
static estimateTokens(prompt: string): number {
// Rough estimate: 1 token ≈ 4 characters
return Math.ceil(prompt.length / 4);
}
}

View File

@@ -0,0 +1,74 @@
import type { NodePromptConfig } from '../../types/config';
export const DEFAULT_PROMPT_CONFIG: NodePromptConfig = {
nodeTypePatterns: {
set: ['n8n-nodes-base.set', 'set'],
if: ['n8n-nodes-base.if', 'if', 'filter'],
httpRequest: ['n8n-nodes-base.httpRequest', 'httprequest', 'webhook', 'n8n-nodes-base.webhook'],
tool: ['Tool', '.tool'],
},
parameterKeywords: {
resourceLocator: [
'channel',
'file',
'page',
'document',
'sheet',
'folder',
'database',
'board',
'list',
'space',
],
textExpressions: ['message', 'text', 'content', 'body', 'description', 'title', 'subject'],
},
maxExamples: 3,
targetTokenBudget: 3000,
};
/**
* Get node type category for a given node type
*/
export function getNodeTypeCategory(
nodeType: string,
config: NodePromptConfig = DEFAULT_PROMPT_CONFIG,
): string | null {
const lowerType = nodeType.toLowerCase();
for (const [category, patterns] of Object.entries(config.nodeTypePatterns)) {
if (patterns.some((pattern) => lowerType.includes(pattern.toLowerCase()))) {
return category;
}
}
// Special check for tool nodes
if (nodeType.endsWith('Tool') || nodeType.includes('.tool')) {
return 'tool';
}
return null;
}
/**
* Check if changes mention resource-related keywords
*/
export function mentionsResourceKeywords(
changes: string[],
config: NodePromptConfig = DEFAULT_PROMPT_CONFIG,
): boolean {
const changesText = changes.join(' ').toLowerCase();
return config.parameterKeywords.resourceLocator.some((keyword) => changesText.includes(keyword));
}
/**
* Check if changes mention text/expression-related keywords
*/
export function mentionsTextKeywords(
changes: string[],
config: NodePromptConfig = DEFAULT_PROMPT_CONFIG,
): boolean {
const changesText = changes.join(' ').toLowerCase();
return config.parameterKeywords.textExpressions.some((keyword) => changesText.includes(keyword));
}

View File

@@ -0,0 +1,177 @@
import type { INodeTypeDescription } from 'n8n-workflow';
import { ParameterUpdatePromptBuilder } from '../prompt-builder';
// Mock node type definition
const mockSetNodeDefinition: INodeTypeDescription = {
displayName: 'Set',
name: 'n8n-nodes-base.set',
group: ['transform'],
version: 1,
description: 'Set values',
defaults: { name: 'Set' },
inputs: ['main'],
outputs: ['main'],
properties: [
{
displayName: 'Assignments',
name: 'assignments',
type: 'fixedCollection',
default: {},
typeOptions: { multipleValues: true },
options: [],
},
],
};
const mockIfNodeDefinition: INodeTypeDescription = {
displayName: 'IF',
name: 'n8n-nodes-base.if',
group: ['transform'],
version: 1,
description: 'Conditional logic',
defaults: { name: 'IF' },
inputs: ['main'],
outputs: ['main', 'main'],
properties: [
{
displayName: 'Conditions',
name: 'conditions',
type: 'filter',
default: {},
},
],
};
const mockToolNodeDefinition: INodeTypeDescription = {
displayName: 'Gmail Tool',
name: 'gmailTool',
group: ['output'],
version: 1,
description: 'Send emails via Gmail',
defaults: { name: 'Gmail Tool' },
inputs: ['main'],
outputs: ['main'],
properties: [
{
displayName: 'To',
name: 'sendTo',
type: 'string',
default: '',
},
{
displayName: 'Subject',
name: 'subject',
type: 'string',
default: '',
},
],
};
describe('ParameterUpdatePromptBuilder', () => {
describe('buildSystemPrompt', () => {
it('should include Set node guide for Set nodes', () => {
const prompt = ParameterUpdatePromptBuilder.buildSystemPrompt({
nodeType: 'n8n-nodes-base.set',
nodeDefinition: mockSetNodeDefinition,
requestedChanges: ['Set message to Hello'],
});
expect(prompt).toContain('Set Node Updates - Comprehensive Type Handling Guide');
expect(prompt).toContain('Assignment Structure');
});
it('should include IF node guide for IF nodes', () => {
const prompt = ParameterUpdatePromptBuilder.buildSystemPrompt({
nodeType: 'n8n-nodes-base.if',
nodeDefinition: mockIfNodeDefinition,
requestedChanges: ['Check if status equals active'],
});
expect(prompt).toContain('IF Node Updates - Comprehensive Guide');
expect(prompt).toContain('Complete Operator Reference');
});
it('should include tool node guide for tool nodes', () => {
const prompt = ParameterUpdatePromptBuilder.buildSystemPrompt({
nodeType: 'gmailTool',
nodeDefinition: mockToolNodeDefinition,
requestedChanges: ['Send email to user'],
});
expect(prompt).toContain('$fromAI Expression Support for Tool Nodes');
expect(prompt).toContain('Gmail Tool');
});
it('should include resource locator guide when keywords are mentioned', () => {
const prompt = ParameterUpdatePromptBuilder.buildSystemPrompt({
nodeType: 'n8n-nodes-base.slack',
nodeDefinition: {} as INodeTypeDescription,
requestedChanges: ['Send to channel #general'],
});
expect(prompt).toContain('ResourceLocator Parameter Handling');
});
it('should estimate tokens correctly', () => {
const testString = 'a'.repeat(400); // 400 characters
const estimate = ParameterUpdatePromptBuilder.estimateTokens(testString);
expect(estimate).toBe(100); // 400 / 4 = 100
});
it('should respect includeExamples option', () => {
const promptWithExamples = ParameterUpdatePromptBuilder.buildSystemPrompt({
nodeType: 'n8n-nodes-base.set',
nodeDefinition: mockSetNodeDefinition,
requestedChanges: ['Set value'],
options: { includeExamples: true },
});
const promptWithoutExamples = ParameterUpdatePromptBuilder.buildSystemPrompt({
nodeType: 'n8n-nodes-base.set',
nodeDefinition: mockSetNodeDefinition,
requestedChanges: ['Set value'],
options: { includeExamples: false },
});
expect(promptWithExamples.length).toBeGreaterThan(promptWithoutExamples.length);
expect(promptWithoutExamples).not.toContain('Relevant Examples');
});
it('should keep prompt size reasonable', () => {
const prompt = ParameterUpdatePromptBuilder.buildSystemPrompt({
nodeType: 'n8n-nodes-base.httpRequest',
nodeDefinition: {} as INodeTypeDescription,
requestedChanges: ['Add API key header', 'Set body parameters'],
});
const tokens = ParameterUpdatePromptBuilder.estimateTokens(prompt);
expect(tokens).toBeLessThan(3500); // Should stay under budget
expect(tokens).toBeGreaterThan(1000); // Should have substantial content
});
});
describe('hasResourceLocatorParameters', () => {
it('should detect resource locator parameters', () => {
const nodeWithResourceLocator: INodeTypeDescription = {
...mockSetNodeDefinition,
properties: [
{
displayName: 'Channel',
name: 'channelId',
type: 'resourceLocator',
default: { mode: 'list', value: '' },
},
],
};
const hasResourceLocator =
ParameterUpdatePromptBuilder.hasResourceLocatorParameters(nodeWithResourceLocator);
expect(hasResourceLocator).toBe(true);
const noResourceLocator =
ParameterUpdatePromptBuilder.hasResourceLocatorParameters(mockSetNodeDefinition);
expect(noResourceLocator).toBe(true);
});
});
});

View File

@@ -0,0 +1,162 @@
import { OperationalError, UnexpectedError } from 'n8n-workflow';
import type { OperationalErrorOptions, UnexpectedErrorOptions } from 'n8n-workflow';
/**
* Base error class for AI Workflow Builder specific errors
*/
export abstract class AiWorkflowBuilderError extends Error {
constructor(message: string, options?: ErrorOptions) {
super(message, options);
this.name = this.constructor.name;
}
}
/**
* Error thrown when a node is not found in the workflow
*/
export class NodeNotFoundError extends OperationalError {
constructor(nodeId: string, nodeType?: string, options?: OperationalErrorOptions) {
super(`Node with ID "${nodeId}" not found in workflow`, {
...options,
tags: {
...options?.tags,
nodeId,
nodeType,
},
shouldReport: false,
});
}
}
/**
* Error thrown when a node type is not found in the available node types
*/
export class NodeTypeNotFoundError extends OperationalError {
constructor(nodeType: string, options?: OperationalErrorOptions) {
super(`Node type "${nodeType}" not found`, {
...options,
tags: {
...options?.tags,
nodeType,
},
shouldReport: false,
});
}
}
/**
* Error thrown when there's an issue with node connections
*/
export class ConnectionError extends OperationalError {
constructor(
message: string,
options?: OperationalErrorOptions & {
fromNodeId?: string;
toNodeId?: string;
connectionType?: string;
},
) {
super(message, {
...options,
tags: {
...options?.tags,
fromNodeId: options?.fromNodeId,
toNodeId: options?.toNodeId,
connectionType: options?.connectionType,
},
shouldReport: false,
});
}
}
/**
* Error thrown when the LLM service fails
*/
export class LLMServiceError extends OperationalError {
constructor(
message: string,
options?: OperationalErrorOptions & { llmModel?: string; statusCode?: number },
) {
super(message, {
...options,
tags: {
...options?.tags,
llmModel: options?.llmModel,
statusCode: options?.statusCode,
},
shouldReport: true,
});
}
}
/**
* Error thrown when validation fails
*/
export class ValidationError extends OperationalError {
constructor(
message: string,
options?: OperationalErrorOptions & { field?: string; value?: unknown },
) {
super(message, {
...options,
tags: {
...options?.tags,
field: options?.field,
},
extra: {
...options?.extra,
value: options?.value,
},
shouldReport: false,
});
}
}
/**
* Error thrown when parameter update fails
*/
export class ParameterUpdateError extends OperationalError {
constructor(
message: string,
options?: OperationalErrorOptions & { nodeId?: string; nodeType: string; parameter?: string },
) {
super(message, {
...options,
tags: {
...options?.tags,
nodeId: options?.nodeId,
nodeType: options?.nodeType,
parameter: options?.parameter,
},
shouldReport: false,
});
}
}
/**
* Error thrown when workflow state is invalid
*/
export class WorkflowStateError extends UnexpectedError {
constructor(message: string, options?: UnexpectedErrorOptions) {
super(message, {
...options,
shouldReport: true,
});
}
}
/**
* Error thrown when tool execution fails unexpectedly
*/
export class ToolExecutionError extends UnexpectedError {
constructor(message: string, options?: UnexpectedErrorOptions & { toolName?: string }) {
super(message, {
...options,
shouldReport: true,
tags: {
...options?.tags,
toolName: options?.toolName,
},
});
}
}

View File

@@ -1,3 +1,3 @@
export * from './ai-workflow-builder.service';
export * from './ai-workflow-builder-agent.service';
export * from './types';
export * from './workflow-state';

View File

@@ -1,10 +1,11 @@
type LLMConfig = {
// Different LLMConfig type for this file - specific to LLM providers
interface LLMProviderConfig {
apiKey: string;
baseUrl?: string;
headers?: Record<string, string>;
};
}
export const o4mini = async (config: LLMConfig) => {
export const o4mini = async (config: LLMProviderConfig) => {
const { ChatOpenAI } = await import('@langchain/openai');
return new ChatOpenAI({
model: 'o4-mini-2025-04-16',
@@ -16,12 +17,13 @@ export const o4mini = async (config: LLMConfig) => {
});
};
export const gpt41mini = async (config: LLMConfig) => {
export const gpt41mini = async (config: LLMProviderConfig) => {
const { ChatOpenAI } = await import('@langchain/openai');
return new ChatOpenAI({
model: 'gpt-4.1-mini-2025-04-14',
apiKey: config.apiKey,
temperature: 0,
maxTokens: -1,
configuration: {
baseURL: config.baseUrl,
defaultHeaders: config.headers,
@@ -29,10 +31,24 @@ export const gpt41mini = async (config: LLMConfig) => {
});
};
export const anthropicClaude37Sonnet = async (config: LLMConfig) => {
export const gpt41 = async (config: LLMProviderConfig) => {
const { ChatOpenAI } = await import('@langchain/openai');
return new ChatOpenAI({
model: 'gpt-4.1-2025-04-14',
apiKey: config.apiKey,
temperature: 0.3,
maxTokens: -1,
configuration: {
baseURL: config.baseUrl,
defaultHeaders: config.headers,
},
});
};
export const anthropicClaudeSonnet4 = async (config: LLMProviderConfig) => {
const { ChatAnthropic } = await import('@langchain/anthropic');
return new ChatAnthropic({
model: 'claude-3-7-sonnet-20250219',
model: 'claude-sonnet-4-20250514',
apiKey: config.apiKey,
temperature: 0,
maxTokens: 16000,

View File

@@ -0,0 +1,197 @@
import { tool } from '@langchain/core/tools';
import type { INode, INodeParameters, INodeTypeDescription } from 'n8n-workflow';
import { z } from 'zod';
import { NodeTypeNotFoundError, ValidationError } from '../errors';
import { createNodeInstance, generateUniqueName } from './utils/node-creation.utils';
import { calculateNodePosition } from './utils/node-positioning.utils';
import { isSubNode } from '../utils/node-helpers';
import { createProgressReporter } from './helpers/progress';
import { createSuccessResponse, createErrorResponse } from './helpers/response';
import { getCurrentWorkflow, addNodeToWorkflow, getWorkflowState } from './helpers/state';
import { findNodeType } from './helpers/validation';
import type { AddedNode } from '../types/nodes';
import type { AddNodeOutput, ToolError } from '../types/tools';
/**
* Schema for node creation input
*/
export const nodeCreationSchema = z.object({
nodeType: z.string().describe('The type of node to add (e.g., n8n-nodes-base.httpRequest)'),
name: z
.string()
.describe('A descriptive name for the node that clearly indicates its purpose in the workflow'),
connectionParametersReasoning: z
.string()
.describe(
'REQUIRED: Explain your reasoning about connection parameters. Consider: Does this node have dynamic inputs/outputs? Does it need mode/operation parameters? For example: "Vector Store has dynamic inputs based on mode, so I need to set mode:insert for document input" or "HTTP Request has static inputs, so no special parameters needed"',
),
connectionParameters: z
.object({})
.passthrough()
.describe(
'Parameters that affect node connections (e.g., mode: "insert" for Vector Store). Pass an empty object {} if no connection parameters are needed. Only connection-affecting parameters like mode, operation, resource, action, etc. are allowed.',
),
});
/**
* Create a new node with proper positioning and naming
*/
function createNode(
nodeType: INodeTypeDescription,
customName: string,
existingNodes: INode[],
nodeTypes: INodeTypeDescription[],
connectionParameters?: INodeParameters,
): INode {
// Generate unique name
const baseName = customName ?? nodeType.defaults?.name ?? nodeType.displayName;
const uniqueName = generateUniqueName(baseName, existingNodes);
// Calculate position
const position = calculateNodePosition(existingNodes, isSubNode(nodeType), nodeTypes);
// Create the node instance with connection parameters
return createNodeInstance(nodeType, uniqueName, position, connectionParameters);
}
/**
* Build the response message for added node
*/
function buildResponseMessage(addedNode: AddedNode, nodeTypes: INodeTypeDescription[]): string {
const nodeType = nodeTypes.find((nt) => nt.name === addedNode.type);
const nodeTypeInfo = nodeType && isSubNode(nodeType) ? ' (sub-node)' : '';
return `Successfully added "${addedNode.name}" (${addedNode.displayName ?? addedNode.type})${nodeTypeInfo} with ID ${addedNode.id}`;
}
/**
* Factory function to create the add node tool
*/
export function createAddNodeTool(nodeTypes: INodeTypeDescription[]) {
return tool(
async (input, config) => {
const reporter = createProgressReporter(config, 'add_nodes');
try {
// Validate input using Zod schema
const validatedInput = nodeCreationSchema.parse(input);
const { nodeType, name, connectionParametersReasoning, connectionParameters } =
validatedInput;
// Report tool start
reporter.start(validatedInput);
// Get current state
const state = getWorkflowState();
const workflow = getCurrentWorkflow(state);
// Report progress with reasoning
reporter.progress(`Adding ${name} (${connectionParametersReasoning})`);
// Find the node type
const nodeTypeDesc = findNodeType(nodeType, nodeTypes);
if (!nodeTypeDesc) {
const nodeError = new NodeTypeNotFoundError(nodeType);
const error = {
message: nodeError.message,
code: 'NODE_TYPE_NOT_FOUND',
details: { nodeType },
};
reporter.error(error);
return createErrorResponse(config, error);
}
// Create the new node
const newNode = createNode(
nodeTypeDesc,
name,
workflow.nodes, // Use current workflow nodes
nodeTypes,
connectionParameters as INodeParameters,
);
// Build node info
const addedNodeInfo: AddedNode = {
id: newNode.id,
name: newNode.name,
type: newNode.type,
displayName: nodeTypeDesc.displayName,
position: newNode.position,
parameters: newNode.parameters,
};
// Build success message
const message = buildResponseMessage(addedNodeInfo, nodeTypes);
// Report completion
const output: AddNodeOutput = {
addedNode: addedNodeInfo,
message,
};
reporter.complete(output);
// Return success with state updates - single node
const stateUpdates = addNodeToWorkflow(newNode);
return createSuccessResponse(config, message, stateUpdates);
} catch (error) {
// Handle validation or unexpected errors
let toolError: ToolError;
if (error instanceof z.ZodError) {
const validationError = new ValidationError('Invalid input parameters', {
field: error.errors[0]?.path.join('.'),
value: error.errors[0]?.message,
});
toolError = {
message: validationError.message,
code: 'VALIDATION_ERROR',
details: error.errors,
};
} else {
toolError = {
message: error instanceof Error ? error.message : 'Unknown error occurred',
code: 'EXECUTION_ERROR',
};
}
reporter.error(toolError);
return createErrorResponse(config, toolError);
}
},
{
name: 'add_nodes',
description: `Add a node to the workflow canvas. Each node represents a specific action or operation (e.g., HTTP request, data transformation, database query). Always provide descriptive names that explain what the node does (e.g., "Get Customer Data", "Filter Active Users", "Send Email Notification"). The tool handles automatic positioning. Use this tool after searching for available node types to ensure they exist.
To add multiple nodes, call this tool multiple times in parallel.
CRITICAL: You MUST provide:
1. connectionParametersReasoning - Explain why you're choosing specific connection parameters or using {}
2. connectionParameters - The actual parameters (use {} for nodes without special needs)
IMPORTANT: DO NOT rely on default values! Always explicitly set connection-affecting parameters when they exist.
REASONING EXAMPLES:
- "Vector Store has dynamic inputs that change based on mode parameter, setting mode:insert to accept document inputs"
- "HTTP Request has static inputs/outputs, no connection parameters needed"
- "Document Loader needs textSplittingMode:custom to accept text splitter connections"
- "AI Agent has dynamic inputs, setting hasOutputParser:true to enable output parser connections"
- "Set node has standard main connections only, using empty parameters"
CONNECTION PARAMETERS (NEVER rely on defaults - always set explicitly):
- AI Agent (@n8n/n8n-nodes-langchain.agent):
- For output parser support: { hasOutputParser: true }
- Without output parser: { hasOutputParser: false }
- Vector Store (@n8n/n8n-nodes-langchain.vectorStoreInMemory):
- For document input: { mode: "insert" }
- For querying: { mode: "retrieve" }
- For AI tool use: { mode: "retrieve-as-tool" }
- Document Loader (@n8n/n8n-nodes-langchain.documentDefaultDataLoader):
- For text splitter input: { textSplittingMode: "custom" }
- For built-in splitting: { textSplittingMode: "simple" }
- Regular nodes (HTTP Request, Set, Code, etc.): {}
Think through the connectionParametersReasoning FIRST, then set connectionParameters based on your reasoning. If a parameter affects connections, SET IT EXPLICITLY.`,
schema: nodeCreationSchema,
},
);
}

View File

@@ -0,0 +1,319 @@
import { tool } from '@langchain/core/tools';
import type { Logger } from '@n8n/backend-common';
import { type INodeTypeDescription } from 'n8n-workflow';
import { z } from 'zod';
import {
ConnectionError,
NodeNotFoundError,
NodeTypeNotFoundError,
ValidationError,
} from '../errors';
import type { SimpleWorkflow } from '../types/workflow';
import { createProgressReporter, reportProgress } from './helpers/progress';
import { createSuccessResponse, createErrorResponse } from './helpers/response';
import { getCurrentWorkflow, getWorkflowState, updateWorkflowConnections } from './helpers/state';
import { validateNodeExists } from './helpers/validation';
import {
validateConnection,
formatConnectionMessage,
inferConnectionType,
} from './utils/connection.utils';
import type { ConnectNodesOutput } from '../types/tools';
/**
* Schema for node connection
*/
export const nodeConnectionSchema = z.object({
sourceNodeId: z
.string()
.describe(
'The UUID of the source node. For ai_* connections (ai_languageModel, ai_tool, etc.), this MUST be the sub-node (e.g., OpenAI Chat Model). For main connections, this is the node producing the output',
),
targetNodeId: z
.string()
.describe(
'The UUID of the target node. For ai_* connections, this MUST be the main node that accepts the sub-node (e.g., AI Agent, Basic LLM Chain). For main connections, this is the node receiving the input',
),
sourceOutputIndex: z
.number()
.optional()
.describe('The index of the output to connect from (default: 0)'),
targetInputIndex: z
.number()
.optional()
.describe('The index of the input to connect to (default: 0)'),
});
/**
* Factory function to create the connect nodes tool
*/
export function createConnectNodesTool(nodeTypes: INodeTypeDescription[], logger?: Logger) {
return tool(
// eslint-disable-next-line complexity
(input, config) => {
const reporter = createProgressReporter(config, 'connect_nodes');
try {
// Validate input using Zod schema
const validatedInput = nodeConnectionSchema.parse(input);
// Report tool start
reporter.start(validatedInput);
// Get current state
const state = getWorkflowState();
const workflow = getCurrentWorkflow(state);
// Report progress
reportProgress(reporter, 'Finding nodes to connect...');
// Find source and target nodes
let matchedSourceNode = validateNodeExists(validatedInput.sourceNodeId, workflow.nodes);
let matchedTargetNode = validateNodeExists(validatedInput.targetNodeId, workflow.nodes);
// Check if both nodes exist
if (!matchedSourceNode || !matchedTargetNode) {
const missingNodeId = !matchedSourceNode
? validatedInput.sourceNodeId
: validatedInput.targetNodeId;
const nodeError = new NodeNotFoundError(missingNodeId);
const error = {
message: nodeError.message,
code: 'NODES_NOT_FOUND',
details: {
sourceNodeId: validatedInput.sourceNodeId,
targetNodeId: validatedInput.targetNodeId,
foundSource: !!matchedSourceNode,
foundTarget: !!matchedTargetNode,
},
};
reporter.error(error);
return createErrorResponse(config, error);
}
// Find node type descriptions
const sourceNodeType = nodeTypes.find((nt) => nt.name === matchedSourceNode!.type);
const targetNodeType = nodeTypes.find((nt) => nt.name === matchedTargetNode!.type);
if (!sourceNodeType || !targetNodeType) {
const missingType = !sourceNodeType ? matchedSourceNode.type : matchedTargetNode.type;
const typeError = new NodeTypeNotFoundError(missingType);
const error = {
message: typeError.message,
code: 'NODE_TYPE_NOT_FOUND',
details: {
sourceType: matchedSourceNode.type,
targetType: matchedTargetNode.type,
},
};
reporter.error(error);
return createErrorResponse(config, error);
}
// Determine connection type
reportProgress(reporter, 'Inferring connection type...');
logger?.debug('\n=== Connect Nodes Tool ===');
logger?.debug(
`Attempting to connect: ${matchedSourceNode.name} -> ${matchedTargetNode.name}`,
);
const inferResult = inferConnectionType(
matchedSourceNode,
matchedTargetNode,
sourceNodeType,
targetNodeType,
);
if (inferResult.error) {
const connectionError = new ConnectionError(inferResult.error, {
fromNodeId: matchedSourceNode.id,
toNodeId: matchedTargetNode.id,
});
const error = {
message: connectionError.message,
code: 'CONNECTION_TYPE_INFERENCE_ERROR',
details: {
sourceNode: matchedSourceNode.name,
targetNode: matchedTargetNode.name,
possibleTypes: inferResult.possibleTypes,
},
};
reporter.error(error);
return createErrorResponse(config, error);
}
if (!inferResult.connectionType) {
const error = {
message: 'Could not infer connection type',
code: 'CONNECTION_TYPE_INFERENCE_FAILED',
details: {
sourceNode: matchedSourceNode.name,
targetNode: matchedTargetNode.name,
},
};
reporter.error(error);
return createErrorResponse(config, error);
}
const connectionType = inferResult.connectionType;
const inferredSwap = inferResult.requiresSwap ?? false;
// If swap is required from inference, swap the nodes
if (inferredSwap) {
logger?.debug('Swapping nodes based on inference result');
const temp = matchedSourceNode;
matchedSourceNode = matchedTargetNode;
matchedTargetNode = temp;
}
reportProgress(
reporter,
`Inferred connection type: ${connectionType}${inferredSwap ? ' (swapped nodes)' : ''}`,
);
logger?.debug(
`Final connection: ${matchedSourceNode.name} -> ${matchedTargetNode.name} (${connectionType})\n`,
);
// Report progress
reportProgress(
reporter,
`Connecting ${matchedSourceNode.name} to ${matchedTargetNode.name}...`,
);
// Validate connection and check if nodes need to be swapped
const validation = validateConnection(
matchedSourceNode,
matchedTargetNode,
connectionType,
nodeTypes,
);
if (!validation.valid) {
const connectionError = new ConnectionError(validation.error ?? 'Invalid connection', {
fromNodeId: matchedSourceNode.id,
toNodeId: matchedTargetNode.id,
});
const error = {
message: connectionError.message,
code: 'INVALID_CONNECTION',
details: {
sourceNode: matchedSourceNode.name,
targetNode: matchedTargetNode.name,
connectionType,
},
};
reporter.error(error);
return createErrorResponse(config, error);
}
// Use potentially swapped nodes
const actualSourceNode = validation.swappedSource ?? matchedSourceNode;
const actualTargetNode = validation.swappedTarget ?? matchedTargetNode;
// Track if nodes were swapped either during inference or validation
const swapped = inferredSwap || !!validation.shouldSwap;
// Create only the new connection (not the full connections object)
// This is important for parallel execution - each tool only returns its own connection
const sourceIndex = validatedInput.sourceOutputIndex ?? 0;
const targetIndex = validatedInput.targetInputIndex ?? 0;
const newConnection: SimpleWorkflow['connections'] = {
[actualSourceNode.name]: {
[connectionType]: Array(sourceIndex + 1)
.fill(null)
.map((_, i) =>
i === sourceIndex
? [
{
node: actualTargetNode.name,
type: connectionType,
index: targetIndex,
},
]
: [],
),
},
};
// Build success message
const message = formatConnectionMessage(
actualSourceNode.name,
actualTargetNode.name,
connectionType,
swapped,
);
// Report completion
const output: ConnectNodesOutput = {
sourceNode: actualSourceNode.name,
targetNode: actualTargetNode.name,
connectionType,
swapped,
message,
found: {
sourceNode: true,
targetNode: true,
},
};
reporter.complete(output);
// Return success with state updates
const stateUpdates = updateWorkflowConnections(newConnection);
return createSuccessResponse(config, message, stateUpdates);
} catch (error) {
// Handle validation or unexpected errors
let toolError;
if (error instanceof z.ZodError) {
const validationError = new ValidationError('Invalid connection parameters', {
field: error.errors[0]?.path.join('.'),
value: error.errors[0]?.message,
});
toolError = {
message: validationError.message,
code: 'VALIDATION_ERROR',
details: error.errors,
};
} else {
toolError = {
message: error instanceof Error ? error.message : 'Unknown error occurred',
code: 'EXECUTION_ERROR',
};
}
reporter.error(toolError);
return createErrorResponse(config, toolError);
}
},
{
name: 'connect_nodes',
description: `Connect two nodes in the workflow. The tool automatically determines the connection type based on node capabilities and ensures correct connection direction.
UNDERSTANDING CONNECTIONS:
- SOURCE NODE: The node that PRODUCES output/provides capability
- TARGET NODE: The node that RECEIVES input/uses capability
- Flow direction: Source → Target
AUTOMATIC CONNECTION TYPE DETECTION:
- The tool analyzes the nodes' inputs and outputs to determine the appropriate connection type
- If multiple connection types are possible, the tool will provide an error with the available options
- The connection type is determined by matching compatible input/output types between nodes
For ai_* connections (ai_languageModel, ai_tool, ai_memory, ai_embedding, etc.):
- Sub-nodes are ALWAYS the source (they provide capabilities)
- Main nodes are ALWAYS the target (they use capabilities)
- The tool will AUTO-CORRECT if you specify them backwards
CONNECTION EXAMPLES:
- OpenAI Chat Model → AI Agent (detects ai_languageModel)
- Calculator Tool → AI Agent (detects ai_tool)
- Simple Memory → Basic LLM Chain (detects ai_memory)
- Embeddings OpenAI → Vector Store (detects ai_embedding)
- Document Loader → Embeddings OpenAI (detects ai_document)
- HTTP Request → Set (detects main)`,
schema: nodeConnectionSchema,
},
);
}

View File

@@ -0,0 +1,215 @@
import type { INodeTypeDescription, NodeConnectionType } from 'n8n-workflow';
import { NodeConnectionTypes } from 'n8n-workflow';
import type { NodeSearchResult } from '../../types/nodes';
/**
* Scoring weights for different match types
*/
export const SCORE_WEIGHTS = {
NAME_CONTAINS: 10,
DISPLAY_NAME_CONTAINS: 8,
DESCRIPTION_CONTAINS: 5,
ALIAS_CONTAINS: 8,
NAME_EXACT: 20,
DISPLAY_NAME_EXACT: 15,
CONNECTION_EXACT: 100,
CONNECTION_IN_EXPRESSION: 50,
} as const;
/**
* Pure business logic for searching nodes
* Separated from tool infrastructure for better testability
*/
export class NodeSearchEngine {
constructor(private readonly nodeTypes: INodeTypeDescription[]) {}
/**
* Search nodes by name, display name, or description
* @param query - The search query string
* @param limit - Maximum number of results to return
* @returns Array of matching nodes sorted by relevance
*/
searchByName(query: string, limit: number = 20): NodeSearchResult[] {
const normalizedQuery = query.toLowerCase();
const results: NodeSearchResult[] = [];
for (const nodeType of this.nodeTypes) {
try {
const score = this.calculateNameScore(nodeType, normalizedQuery);
if (score > 0) {
results.push(this.createSearchResult(nodeType, score));
}
} catch (error) {
// Ignore errors for now
}
}
return this.sortAndLimit(results, limit);
}
/**
* Search for sub-nodes that output a specific connection type
* @param connectionType - The connection type to search for
* @param limit - Maximum number of results
* @param nameFilter - Optional name filter
* @returns Array of matching sub-nodes
*/
searchByConnectionType(
connectionType: NodeConnectionType,
limit: number = 20,
nameFilter?: string,
): NodeSearchResult[] {
const results: NodeSearchResult[] = [];
const normalizedFilter = nameFilter?.toLowerCase();
for (const nodeType of this.nodeTypes) {
try {
const connectionScore = this.getConnectionScore(nodeType, connectionType);
if (connectionScore > 0) {
// Apply name filter if provided
const nameScore = normalizedFilter
? this.calculateNameScore(nodeType, normalizedFilter)
: 0;
if (!normalizedFilter || nameScore > 0) {
const totalScore = connectionScore + nameScore;
results.push(this.createSearchResult(nodeType, totalScore));
}
}
} catch (error) {
// Ignore errors for now
}
}
return this.sortAndLimit(results, limit);
}
/**
* Format search results for tool output
* @param result - Single search result
* @returns XML-formatted string
*/
formatResult(result: NodeSearchResult): string {
return `
<node>
<node_name>${result.name}</node_name>
<node_description>${result.description}</node_description>
<node_inputs>${typeof result.inputs === 'object' ? JSON.stringify(result.inputs) : result.inputs}</node_inputs>
<node_outputs>${typeof result.outputs === 'object' ? JSON.stringify(result.outputs) : result.outputs}</node_outputs>
</node>`;
}
/**
* Calculate score based on name matches
* @param nodeType - Node type to score
* @param normalizedQuery - Lowercase search query
* @returns Numeric score
*/
private calculateNameScore(nodeType: INodeTypeDescription, normalizedQuery: string): number {
let score = 0;
// Check name match
if (nodeType.name.toLowerCase().includes(normalizedQuery)) {
score += SCORE_WEIGHTS.NAME_CONTAINS;
}
// Check display name match
if (nodeType.displayName.toLowerCase().includes(normalizedQuery)) {
score += SCORE_WEIGHTS.DISPLAY_NAME_CONTAINS;
}
// Check description match
if (nodeType.description?.toLowerCase().includes(normalizedQuery)) {
score += SCORE_WEIGHTS.DESCRIPTION_CONTAINS;
}
// Check alias match
if (nodeType.codex?.alias?.some((alias) => alias.toLowerCase().includes(normalizedQuery))) {
score += SCORE_WEIGHTS.ALIAS_CONTAINS;
}
// Check exact matches (boost score)
if (nodeType.name.toLowerCase() === normalizedQuery) {
score += SCORE_WEIGHTS.NAME_EXACT;
}
if (nodeType.displayName.toLowerCase() === normalizedQuery) {
score += SCORE_WEIGHTS.DISPLAY_NAME_EXACT;
}
return score;
}
/**
* Check if a node has a specific connection type in outputs
* @param nodeType - Node type to check
* @param connectionType - Connection type to look for
* @returns Score indicating match quality
*/
private getConnectionScore(
nodeType: INodeTypeDescription,
connectionType: NodeConnectionType,
): number {
const outputs = nodeType.outputs;
if (Array.isArray(outputs)) {
// Direct array match
if (outputs.includes(connectionType)) {
return SCORE_WEIGHTS.CONNECTION_EXACT;
}
} else if (typeof outputs === 'string') {
// Expression string - check if it contains the connection type
if (outputs.includes(connectionType)) {
return SCORE_WEIGHTS.CONNECTION_IN_EXPRESSION;
}
}
return 0;
}
/**
* Create a search result object
* @param nodeType - Node type description
* @param score - Calculated score
* @returns Search result object
*/
private createSearchResult(nodeType: INodeTypeDescription, score: number): NodeSearchResult {
return {
name: nodeType.name,
displayName: nodeType.displayName,
description: nodeType.description ?? 'No description available',
inputs: nodeType.inputs,
outputs: nodeType.outputs,
score,
};
}
/**
* Sort and limit search results
* @param results - Array of results
* @param limit - Maximum number to return
* @returns Sorted and limited results
*/
private sortAndLimit(results: NodeSearchResult[], limit: number): NodeSearchResult[] {
return results.sort((a, b) => b.score - a.score).slice(0, limit);
}
/**
* Validate if a connection type is an AI connection type
* @param connectionType - Connection type to validate
* @returns True if it's an AI connection type
*/
static isAiConnectionType(connectionType: string): boolean {
return connectionType.startsWith('ai_');
}
/**
* Get all available AI connection types
* @returns Array of AI connection types
*/
static getAiConnectionTypes(): NodeConnectionType[] {
return Object.values(NodeConnectionTypes).filter((type) =>
NodeSearchEngine.isAiConnectionType(type),
) as NodeConnectionType[];
}
}

View File

@@ -0,0 +1,440 @@
import {
NodeConnectionTypes,
type INodeTypeDescription,
type NodeConnectionType,
} from 'n8n-workflow';
import { createNodeType } from '../../../../test/test-utils';
import { NodeSearchEngine, SCORE_WEIGHTS } from '../node-search-engine';
describe('NodeSearchEngine', () => {
let searchEngine: NodeSearchEngine;
let nodeTypes: INodeTypeDescription[];
beforeEach(() => {
// Create a diverse set of test node types
nodeTypes = [
createNodeType({
name: 'n8n-nodes-base.httpRequest',
displayName: 'HTTP Request',
description: 'Makes HTTP requests to external services',
group: ['input'],
}),
createNodeType({
name: 'n8n-nodes-base.code',
displayName: 'Code',
description: 'Run custom JavaScript code',
group: ['transform'],
}),
createNodeType({
name: 'n8n-nodes-base.webhook',
displayName: 'Webhook',
description: 'Starts workflow on webhook call',
group: ['trigger'],
inputs: [],
outputs: ['main'],
}),
createNodeType({
name: '@n8n/n8n-nodes-langchain.lmChatOpenAi',
displayName: 'OpenAI Chat Model',
description: 'Language model from OpenAI',
group: ['output'],
inputs: [],
outputs: ['ai_languageModel'],
}),
createNodeType({
name: '@n8n/n8n-nodes-langchain.toolCalculator',
displayName: 'Calculator Tool',
description: 'Perform mathematical calculations',
group: ['output'],
inputs: [],
outputs: ['ai_tool'],
}),
createNodeType({
name: '@n8n/n8n-nodes-langchain.vectorStoreMemory',
displayName: 'Vector Store Memory',
description: 'Store and retrieve embeddings',
group: ['output'],
inputs: [],
outputs: ['ai_memory'],
}),
createNodeType({
name: 'n8n-nodes-base.httpBin',
displayName: 'HTTP Bin',
description: 'Test HTTP requests with httpbin.org',
group: ['input'],
codex: {
alias: ['httpbin', 'request bin'],
},
}),
];
searchEngine = new NodeSearchEngine(nodeTypes);
});
describe('searchByName', () => {
it('should find nodes by exact name match', () => {
const results = searchEngine.searchByName('Code');
expect(results).toHaveLength(1);
expect(results[0].name).toBe('n8n-nodes-base.code');
expect(results[0].displayName).toBe('Code');
// Should have display name exact match, display name contains, name contains (code is in the name),
// and description contains ("Run custom JavaScript code")
const expectedScore =
SCORE_WEIGHTS.DISPLAY_NAME_EXACT +
SCORE_WEIGHTS.DISPLAY_NAME_CONTAINS +
SCORE_WEIGHTS.NAME_CONTAINS +
SCORE_WEIGHTS.DESCRIPTION_CONTAINS;
expect(results[0].score).toBe(expectedScore);
});
it('should find nodes by partial name match', () => {
const results = searchEngine.searchByName('http');
expect(results.length).toBe(2);
const httpRequestNode = results.find((r) => r.name === 'n8n-nodes-base.httpRequest');
const httpBinNode = results.find((r) => r.name === 'n8n-nodes-base.httpBin');
expect(httpRequestNode).toBeDefined();
expect(httpBinNode).toBeDefined();
});
it('should find nodes by description match', () => {
const results = searchEngine.searchByName('javascript');
expect(results).toHaveLength(1);
expect(results[0].name).toBe('n8n-nodes-base.code');
expect(results[0].score).toBe(SCORE_WEIGHTS.DESCRIPTION_CONTAINS);
});
it('should find nodes by alias match', () => {
const results = searchEngine.searchByName('httpbin');
expect(results).toHaveLength(1);
expect(results[0].name).toBe('n8n-nodes-base.httpBin');
expect(results[0].score).toBeGreaterThanOrEqual(SCORE_WEIGHTS.ALIAS_CONTAINS);
});
it('should handle case-insensitive search', () => {
const resultsLower = searchEngine.searchByName('webhook');
const resultsUpper = searchEngine.searchByName('WEBHOOK');
const resultsMixed = searchEngine.searchByName('WebHook');
expect(resultsLower).toHaveLength(1);
expect(resultsUpper).toHaveLength(1);
expect(resultsMixed).toHaveLength(1);
expect(resultsLower[0].name).toBe(resultsUpper[0].name);
expect(resultsLower[0].name).toBe(resultsMixed[0].name);
});
it('should return empty array for no matches', () => {
const results = searchEngine.searchByName('nonexistent');
expect(results).toEqual([]);
});
it('should respect limit parameter', () => {
const results = searchEngine.searchByName('a', 2);
expect(results.length).toBeLessThanOrEqual(2);
});
it('should combine scores for multiple matches', () => {
const results = searchEngine.searchByName('request');
// HTTP Request should have highest score (name + display name + description)
expect(results[0].name).toBe('n8n-nodes-base.httpRequest');
expect(results[0].score).toBe(
SCORE_WEIGHTS.NAME_CONTAINS +
SCORE_WEIGHTS.DISPLAY_NAME_CONTAINS +
SCORE_WEIGHTS.DESCRIPTION_CONTAINS,
);
});
it('should handle nodes without description', () => {
const nodeWithoutDesc = createNodeType({
name: 'test.node',
displayName: 'Test Node',
description: undefined,
});
const engine = new NodeSearchEngine([nodeWithoutDesc]);
const results = engine.searchByName('test');
expect(results).toHaveLength(1);
expect(results[0].description).toBe('No description available');
});
it('should handle malformed node types gracefully', () => {
// Add a node that might cause errors
const malformedNode = {
name: null as unknown as string,
displayName: undefined as unknown as string,
} as INodeTypeDescription;
const engineWithMalformed = new NodeSearchEngine([...nodeTypes, malformedNode]);
// Should not throw and should return valid results
expect(() => engineWithMalformed.searchByName('http')).not.toThrow();
const results = engineWithMalformed.searchByName('http');
expect(results.length).toBeGreaterThan(0);
});
});
describe('searchByConnectionType', () => {
it('should find nodes with exact connection type match', () => {
const results = searchEngine.searchByConnectionType(NodeConnectionTypes.AiTool);
expect(results).toHaveLength(1);
expect(results[0].name).toBe('@n8n/n8n-nodes-langchain.toolCalculator');
expect(results[0].score).toBe(SCORE_WEIGHTS.CONNECTION_EXACT);
});
it('should find multiple nodes with same connection type', () => {
// Add another AI tool node
const anotherTool = createNodeType({
name: '@n8n/n8n-nodes-langchain.toolCode',
displayName: 'Code Tool',
outputs: ['ai_tool'],
});
const engine = new NodeSearchEngine([...nodeTypes, anotherTool]);
const results = engine.searchByConnectionType(NodeConnectionTypes.AiTool);
expect(results).toHaveLength(2);
expect(results.map((r) => r.name)).toContain('@n8n/n8n-nodes-langchain.toolCalculator');
expect(results.map((r) => r.name)).toContain('@n8n/n8n-nodes-langchain.toolCode');
});
it('should apply name filter when provided', () => {
const results = searchEngine.searchByConnectionType(
NodeConnectionTypes.AiLanguageModel,
20,
'openai',
);
expect(results).toHaveLength(1);
expect(results[0].name).toBe('@n8n/n8n-nodes-langchain.lmChatOpenAi');
});
it('should exclude nodes that do not match name filter', () => {
const results = searchEngine.searchByConnectionType(
NodeConnectionTypes.AiLanguageModel,
20,
'anthropic',
);
expect(results).toHaveLength(0);
});
it('should handle expression-based outputs', () => {
const expressionNode = createNodeType({
name: 'test.expression',
displayName: 'Expression Node',
outputs: '={{ $parameter.mode === "tool" ? "ai_tool" : "main" }}',
});
const engine = new NodeSearchEngine([...nodeTypes, expressionNode]);
const results = engine.searchByConnectionType(NodeConnectionTypes.AiTool);
// Should find the expression node with lower score
const expressionResult = results.find((r) => r.name === 'test.expression');
expect(expressionResult).toBeDefined();
expect(expressionResult!.score).toBe(SCORE_WEIGHTS.CONNECTION_IN_EXPRESSION);
});
it('should sort by score with name filter boost', () => {
// Add nodes with different match qualities
const exactMatch = createNodeType({
name: 'test.exact',
displayName: 'Calculator Exact',
outputs: ['ai_tool'],
});
const expressionMatch = createNodeType({
name: 'test.expression',
displayName: 'Something Else',
outputs: '={{ "ai_tool" }}',
});
const engine = new NodeSearchEngine([...nodeTypes, exactMatch, expressionMatch]);
const results = engine.searchByConnectionType(NodeConnectionTypes.AiTool, 20, 'calculator');
// Both Calculator nodes should appear (both have 'calculator' in name)
// Expression match should not appear (no name match)
const names = results.map((r) => r.name);
expect(names).toContain('test.exact');
expect(names).toContain('@n8n/n8n-nodes-langchain.toolCalculator');
expect(results.find((r) => r.name === 'test.expression')).toBeUndefined();
// Both should have same score (exact connection + name match)
const exactScore = results.find((r) => r.name === 'test.exact')?.score;
const calculatorScore = results.find(
(r) => r.name === '@n8n/n8n-nodes-langchain.toolCalculator',
)?.score;
expect(exactScore).toBeDefined();
expect(calculatorScore).toBeDefined();
expect(exactScore).toBeGreaterThan(0);
expect(calculatorScore).toBeGreaterThan(0);
});
it('should return empty array for non-existent connection type', () => {
const results = searchEngine.searchByConnectionType('ai_nonexistent' as NodeConnectionType);
expect(results).toEqual([]);
});
it('should respect limit parameter', () => {
// Add many nodes with same connection type
const manyNodes = Array.from({ length: 10 }, (_, i) =>
createNodeType({
name: `test.tool${i}`,
displayName: `Tool ${i}`,
outputs: ['ai_tool'],
}),
);
const engine = new NodeSearchEngine([...nodeTypes, ...manyNodes]);
const results = engine.searchByConnectionType(NodeConnectionTypes.AiTool, 5);
expect(results).toHaveLength(5);
});
});
describe('formatResult', () => {
it('should format search result as XML', () => {
const result = {
name: 'test.node',
displayName: 'Test Node',
description: 'Test description',
inputs: ['main'] as NodeConnectionType[],
outputs: ['main'] as NodeConnectionType[],
score: 100,
};
const formatted = searchEngine.formatResult(result);
expect(formatted).toContain('<node>');
expect(formatted).toContain('<node_name>test.node</node_name>');
expect(formatted).toContain('<node_description>Test description</node_description>');
expect(formatted).toContain('</node>');
});
it('should handle array inputs/outputs', () => {
const result = {
name: 'test.node',
displayName: 'Test Node',
description: 'Test',
inputs: ['main', 'ai_tool'] as NodeConnectionType[],
outputs: ['main', 'main'] as NodeConnectionType[],
score: 50,
};
const formatted = searchEngine.formatResult(result);
expect(formatted).toContain('<node_inputs>["main","ai_tool"]</node_inputs>');
expect(formatted).toContain('<node_outputs>["main","main"]</node_outputs>');
});
it('should handle string inputs/outputs', () => {
const result = {
name: 'test.node',
displayName: 'Test Node',
description: 'Test',
inputs: '={{ $parameter.inputs }}' as `={{${string}}}`,
outputs: '={{ $parameter.outputs }}' as `={{${string}}}`,
score: 50,
};
const formatted = searchEngine.formatResult(result);
expect(formatted).toContain('<node_inputs>={{ $parameter.inputs }}</node_inputs>');
expect(formatted).toContain('<node_outputs>={{ $parameter.outputs }}</node_outputs>');
});
it('should handle complex object inputs/outputs', () => {
const result = {
name: 'test.node',
displayName: 'Test Node',
description: 'Test',
inputs: [
{ type: NodeConnectionTypes.Main },
{ type: NodeConnectionTypes.AiTool, required: false },
],
outputs: [{ type: NodeConnectionTypes.Main }],
score: 50,
};
const formatted = searchEngine.formatResult(result);
expect(formatted).toContain(
'<node_inputs>[{"type":"main"},{"type":"ai_tool","required":false}]</node_inputs>',
);
expect(formatted).toContain('<node_outputs>[{"type":"main"}]</node_outputs>');
});
});
describe('static methods', () => {
it('should identify AI connection types', () => {
expect(NodeSearchEngine.isAiConnectionType('ai_tool')).toBe(true);
expect(NodeSearchEngine.isAiConnectionType('ai_languageModel')).toBe(true);
expect(NodeSearchEngine.isAiConnectionType('main')).toBe(false);
expect(NodeSearchEngine.isAiConnectionType('Main')).toBe(false);
});
it('should get all AI connection types', () => {
const aiTypes = NodeSearchEngine.getAiConnectionTypes();
expect(aiTypes).toContain(NodeConnectionTypes.AiTool);
expect(aiTypes).toContain(NodeConnectionTypes.AiLanguageModel);
expect(aiTypes).toContain(NodeConnectionTypes.AiMemory);
expect(aiTypes).not.toContain(NodeConnectionTypes.Main);
// All should start with 'ai_'
aiTypes.forEach((type) => {
expect(type).toMatch(/^ai_/);
});
});
});
describe('edge cases and error handling', () => {
it('should handle empty node types array', () => {
const emptyEngine = new NodeSearchEngine([]);
expect(emptyEngine.searchByName('test')).toEqual([]);
expect(emptyEngine.searchByConnectionType(NodeConnectionTypes.AiTool)).toEqual([]);
});
it('should handle undefined/null in node properties', () => {
const nodeWithNulls = createNodeType({
name: 'test.nulls',
displayName: 'Null Test',
description: null as unknown as string,
codex: {
alias: null as unknown as string[],
},
});
const engine = new NodeSearchEngine([nodeWithNulls]);
expect(() => engine.searchByName('null')).not.toThrow();
const results = engine.searchByName('null');
expect(results).toHaveLength(1);
});
it('should handle very long queries', () => {
const longQuery = 'a'.repeat(1000);
expect(() => searchEngine.searchByName(longQuery)).not.toThrow();
expect(() =>
searchEngine.searchByConnectionType(NodeConnectionTypes.AiTool, 20, longQuery),
).not.toThrow();
});
it('should handle special characters in queries', () => {
const specialChars = ['(', ')', '[', ']', '{', '}', '.', '*', '+', '?', '^', '$', '|', '\\'];
specialChars.forEach((char) => {
expect(() => searchEngine.searchByName(char)).not.toThrow();
});
});
});
});

View File

@@ -0,0 +1,11 @@
// Progress helpers
export * from './progress';
// Response helpers
export * from './response';
// Validation helpers
export * from './validation';
// State helpers
export * from './state';

View File

@@ -0,0 +1,156 @@
import type { ToolRunnableConfig } from '@langchain/core/tools';
import type { LangGraphRunnableConfig } from '@langchain/langgraph';
import type {
ToolProgressMessage,
ToolError,
ProgressReporter,
BatchReporter,
} from '../../types/tools';
/**
* Create a progress reporter for a tool execution
*/
export function createProgressReporter<TToolName extends string = string>(
config: ToolRunnableConfig & LangGraphRunnableConfig,
toolName: TToolName,
): ProgressReporter {
const toolCallId = config.toolCall?.id;
const emit = (message: ToolProgressMessage<TToolName>): void => {
config.writer?.(message);
};
const start = <T>(input: T): void => {
emit({
type: 'tool',
toolName,
toolCallId,
status: 'running',
updates: [
{
type: 'input',
data: input as Record<string, unknown>,
},
],
});
};
const progress = (message: string, data?: Record<string, unknown>): void => {
emit({
type: 'tool',
toolName,
toolCallId,
status: 'running',
updates: [
{
type: 'progress',
data: data ?? { message },
},
],
});
};
const complete = <T>(output: T): void => {
emit({
type: 'tool',
toolName,
toolCallId,
status: 'completed',
updates: [
{
type: 'output',
data: output as Record<string, unknown>,
},
],
});
};
const error = (error: ToolError): void => {
emit({
type: 'tool',
toolName,
toolCallId,
status: 'error',
updates: [
{
type: 'error',
data: {
message: error.message,
code: error.code,
details: error.details,
},
},
],
});
};
const createBatchReporter = (scope: string): BatchReporter => {
let currentIndex = 0;
let totalItems = 0;
return {
init: (total: number) => {
totalItems = total;
currentIndex = 0;
},
next: (itemDescription: string) => {
currentIndex++;
progress(`${scope}: Processing item ${currentIndex} of ${totalItems}: ${itemDescription}`);
},
complete: () => {
progress(`${scope}: Completed all ${totalItems} items`);
},
};
};
return {
start,
progress,
complete,
error,
createBatchReporter,
};
}
/**
* Helper function to report start of tool execution
*/
export function reportStart<T>(reporter: ProgressReporter, input: T): void {
reporter.start(input);
}
/**
* Helper function to report progress during tool execution
*/
export function reportProgress(
reporter: ProgressReporter,
message: string,
data?: Record<string, unknown>,
): void {
reporter.progress(message, data);
}
/**
* Helper function to report successful completion
*/
export function reportComplete<T>(reporter: ProgressReporter, output: T): void {
reporter.complete(output);
}
/**
* Helper function to report error during execution
*/
export function reportError(reporter: ProgressReporter, error: ToolError): void {
reporter.error(error);
}
/**
* Create a batch progress reporter for multi-item operations
*/
export function createBatchProgressReporter(
reporter: ProgressReporter,
scope: string,
): BatchReporter {
return reporter.createBatchReporter(scope);
}

View File

@@ -0,0 +1,49 @@
import { ToolMessage } from '@langchain/core/messages';
import type { ToolRunnableConfig } from '@langchain/core/tools';
import { Command } from '@langchain/langgraph';
import type { ToolError } from '../../types/tools';
import type { StateUpdater } from '../../types/utils';
import type { WorkflowState } from '../../workflow-state';
/**
* Create a success response with optional state updates
*/
export function createSuccessResponse<TState = typeof WorkflowState.State>(
config: ToolRunnableConfig,
message: string,
stateUpdates?: StateUpdater<TState>,
): Command {
const toolCallId = config.toolCall?.id as string;
const messages = [
new ToolMessage({
content: message,
tool_call_id: toolCallId,
}),
];
const update = { messages };
if (stateUpdates) {
Object.assign(update, stateUpdates);
}
return new Command({ update });
}
/**
* Create an error response
*/
export function createErrorResponse(config: ToolRunnableConfig, error: ToolError): Command {
const toolCallId = config.toolCall?.id as string;
const messages = [
new ToolMessage({
content: `Error: ${error.message}`,
tool_call_id: toolCallId,
}),
];
return new Command({ update: { messages } });
}

View File

@@ -0,0 +1,114 @@
import { getCurrentTaskInput } from '@langchain/langgraph';
import type { INode, IConnection } from 'n8n-workflow';
import type { SimpleWorkflow } from '../../types/workflow';
import type { WorkflowState } from '../../workflow-state';
/**
* Get the current workflow from state in a type-safe manner
*/
export function getCurrentWorkflow(state: typeof WorkflowState.State): SimpleWorkflow {
return state.workflowJSON;
}
export function getWorkflowState(): typeof WorkflowState.State {
return getCurrentTaskInput();
}
/**
* Get the current workflow from task input
*/
export function getCurrentWorkflowFromTaskInput(): SimpleWorkflow {
const state = getWorkflowState();
return getCurrentWorkflow(state);
}
/**
* Create a state update for workflow connections
*/
export function updateWorkflowConnections(
connections: SimpleWorkflow['connections'],
): Partial<typeof WorkflowState.State> {
// Return an operation to merge connections (not replace them)
return {
workflowOperations: [{ type: 'mergeConnections', connections }],
};
}
/**
* Add a node to the workflow state
*/
export function addNodeToWorkflow(node: INode): Partial<typeof WorkflowState.State> {
return addNodesToWorkflow([node]);
}
/**
* Add multiple nodes to the workflow state
*/
export function addNodesToWorkflow(nodes: INode[]): Partial<typeof WorkflowState.State> {
// Return an operation to add nodes
return {
workflowOperations: [{ type: 'addNodes', nodes }],
};
}
/**
* Remove a node from the workflow state
*/
export function removeNodeFromWorkflow(nodeId: string): Partial<typeof WorkflowState.State> {
// Return an operation to remove nodes
return {
workflowOperations: [{ type: 'removeNode', nodeIds: [nodeId] }],
};
}
/**
* Remove multiple nodes from the workflow state
*/
export function removeNodesFromWorkflow(nodeIds: string[]): Partial<typeof WorkflowState.State> {
// Return an operation to remove nodes
return {
workflowOperations: [{ type: 'removeNode', nodeIds }],
};
}
/**
* Update a node in the workflow state
*/
export function updateNodeInWorkflow(
state: typeof WorkflowState.State,
nodeId: string,
updates: Partial<INode>,
): Partial<typeof WorkflowState.State> {
const existingNode = state.workflowJSON.nodes.find((n) => n.id === nodeId);
if (!existingNode) {
return {};
}
// Return an operation to update the node
return {
workflowOperations: [{ type: 'updateNode', nodeId, updates }],
};
}
/**
* Add a connection to the workflow state
*/
export function addConnectionToWorkflow(
sourceNodeId: string,
_targetNodeId: string,
connection: IConnection,
): Partial<typeof WorkflowState.State> {
return {
workflowOperations: [
{
type: 'mergeConnections',
connections: {
[sourceNodeId]: {
main: [[connection]],
},
},
},
],
};
}

View File

@@ -0,0 +1,114 @@
import type { INode, INodeTypeDescription } from 'n8n-workflow';
import {
ConnectionError,
NodeNotFoundError,
NodeTypeNotFoundError,
ValidationError,
} from '../../errors';
import type { ToolError } from '../../types/tools';
import type { SimpleWorkflow } from '../../types/workflow';
/**
* Validate that a node exists in the workflow
*/
export function validateNodeExists(nodeId: string, nodes: INode[]): INode | null {
return nodes.find((n) => n.id === nodeId) ?? null;
}
/**
* Find a node by name (case-insensitive)
*/
export function findNodeByName(nodeName: string, nodes: INode[]): INode | null {
return nodes.find((n) => n.name.toLowerCase() === nodeName.toLowerCase()) ?? null;
}
/**
* Find a node by ID or name
*/
export function findNodeByIdOrName(nodeIdentifier: string, nodes: INode[]): INode | null {
// First try exact ID match
const byId = validateNodeExists(nodeIdentifier, nodes);
if (byId) return byId;
// Then try name match
return findNodeByName(nodeIdentifier, nodes);
}
/**
* Find a node type by name
*/
export function findNodeType(
nodeTypeName: string,
nodeTypes: INodeTypeDescription[],
): INodeTypeDescription | null {
return nodeTypes.find((nt) => nt.name === nodeTypeName) ?? null;
}
/**
* Validate that a connection is possible between two nodes
*/
export function validateConnection(sourceNode: INode, targetNode: INode): ToolError | null {
// Check if source and target are the same
if (sourceNode.id === targetNode.id) {
const error = new ConnectionError('Cannot connect a node to itself', {
fromNodeId: sourceNode.id,
toNodeId: targetNode.id,
});
return {
message: error.message,
code: 'SELF_CONNECTION',
details: { sourceId: sourceNode.id, targetId: targetNode.id },
};
}
return null;
}
/**
* Create a validation error
*/
export function createValidationError(
message: string,
code: string,
details?: Record<string, string>,
): ToolError {
// Create the appropriate error instance for better tracking
const error = new ValidationError(message, { tags: { code, ...details } });
return {
message: error.message,
code,
details,
};
}
/**
* Create a node not found error
*/
export function createNodeNotFoundError(nodeIdentifier: string): ToolError {
const error = new NodeNotFoundError(nodeIdentifier);
return {
message: error.message,
code: 'NODE_NOT_FOUND',
details: { nodeIdentifier },
};
}
/**
* Create a node type not found error
*/
export function createNodeTypeNotFoundError(nodeTypeName: string): ToolError {
const error = new NodeTypeNotFoundError(nodeTypeName);
return {
message: error.message,
code: 'NODE_TYPE_NOT_FOUND',
details: { nodeTypeName },
};
}
/**
* Check if a workflow has nodes
*/
export function hasNodes(workflow: SimpleWorkflow): boolean {
return workflow.nodes.length > 0;
}

View File

@@ -0,0 +1,197 @@
import { tool } from '@langchain/core/tools';
import type { INodeTypeDescription } from 'n8n-workflow';
import { z } from 'zod';
import { ValidationError, ToolExecutionError } from '../errors';
import { createProgressReporter, reportProgress } from './helpers/progress';
import { createSuccessResponse, createErrorResponse } from './helpers/response';
import { findNodeType, createNodeTypeNotFoundError } from './helpers/validation';
import type { NodeDetails } from '../types/nodes';
import type { NodeDetailsOutput } from '../types/tools';
/**
* Schema for node details tool input
*/
const nodeDetailsSchema = z.object({
nodeName: z.string().describe('The exact node type name (e.g., n8n-nodes-base.httpRequest)'),
withParameters: z
.boolean()
.optional()
.default(false)
.describe('Whether to include node parameters in the output'),
withConnections: z
.boolean()
.optional()
.default(true)
.describe('Whether to include node supported connections in the output'),
});
/**
* Format node inputs
*/
function formatInputs(inputs: INodeTypeDescription['inputs']): string {
if (!inputs || inputs.length === 0) {
return '<inputs>none</inputs>';
}
if (typeof inputs === 'string') {
return `<input>${inputs}</input>`;
}
const formattedInputs = inputs.map((input) => {
if (typeof input === 'string') {
return `<input>${input}</input>`;
}
return `<input>${JSON.stringify(input)}</input>`;
});
return formattedInputs.join('\n');
}
/**
* Format node outputs
*/
function formatOutputs(outputs: INodeTypeDescription['outputs']): string {
if (!outputs || outputs.length === 0) {
return '<outputs>none</outputs>';
}
if (typeof outputs === 'string') {
return `<output>${outputs}</output>`;
}
const formattedOutputs = outputs.map((output) => {
if (typeof output === 'string') {
return `<output>${output}</output>`;
}
return `<output>${JSON.stringify(output)}</output>`;
});
return formattedOutputs.join('\n');
}
/**
* Format node details into a structured message
*/
function formatNodeDetails(
details: NodeDetails,
withParameters: boolean = false,
withConnections: boolean = true,
): string {
const parts: string[] = [];
// Basic details
parts.push('<node_details>');
parts.push(`<name>${details.name}</name>`);
parts.push(`<display_name>${details.displayName}</display_name>`);
parts.push(`<description>${details.description}</description>`);
if (details.subtitle) {
parts.push(`<subtitle>${details.subtitle}</subtitle>`);
}
// Parameters
if (withParameters && details.properties.length > 0) {
const stringifiedProperties = JSON.stringify(details.properties, null, 2);
parts.push(`<properties>
${stringifiedProperties.length > 1000 ? stringifiedProperties.slice(0, 1000) + '... Rest of properties omitted' : stringifiedProperties}
</properties>`);
}
// Connections
if (withConnections) {
parts.push('<connections>');
parts.push(formatInputs(details.inputs));
parts.push(formatOutputs(details.outputs));
parts.push('</connections>');
}
parts.push('</node_details>');
return parts.join('\n');
}
/**
* Helper to extract node details from a node type description
*/
function extractNodeDetails(nodeType: INodeTypeDescription): NodeDetails {
return {
name: nodeType.name,
displayName: nodeType.displayName,
description: nodeType.description,
properties: nodeType.properties,
subtitle: nodeType.subtitle,
inputs: nodeType.inputs,
outputs: nodeType.outputs,
};
}
/**
* Factory function to create the node details tool
*/
export function createNodeDetailsTool(nodeTypes: INodeTypeDescription[]) {
return tool(
(input: unknown, config) => {
const reporter = createProgressReporter(config, 'get_node_details');
try {
// Validate input using Zod schema
const validatedInput = nodeDetailsSchema.parse(input);
const { nodeName, withParameters, withConnections } = validatedInput;
// Report tool start
reporter.start(validatedInput);
// Report progress
reportProgress(reporter, `Looking up details for ${nodeName}...`);
// Find the node type
const nodeType = findNodeType(nodeName, nodeTypes);
if (!nodeType) {
const error = createNodeTypeNotFoundError(nodeName);
reporter.error(error);
return createErrorResponse(config, error);
}
// Extract node details
const details = extractNodeDetails(nodeType);
// Format the output message
const message = formatNodeDetails(details, withParameters, withConnections);
// Report completion
const output: NodeDetailsOutput = {
details,
found: true,
message,
};
reporter.complete(output);
// Return success response
return createSuccessResponse(config, message);
} catch (error) {
// Handle validation or unexpected errors
if (error instanceof z.ZodError) {
const validationError = new ValidationError('Invalid input parameters', {
extra: { errors: error.errors },
});
reporter.error(validationError);
return createErrorResponse(config, validationError);
}
const toolError = new ToolExecutionError(
error instanceof Error ? error.message : 'Unknown error occurred',
{
toolName: 'get_node_details',
cause: error instanceof Error ? error : undefined,
},
);
reporter.error(toolError);
return createErrorResponse(config, toolError);
}
},
{
name: 'get_node_details',
description:
'Get detailed information about a specific n8n node type including properties and available connections. Use this before adding nodes to understand their input/output structure.',
schema: nodeDetailsSchema,
},
);
}

View File

@@ -0,0 +1,213 @@
import { tool } from '@langchain/core/tools';
import { NodeConnectionTypes, type INodeTypeDescription } from 'n8n-workflow';
import { z } from 'zod';
import { ValidationError, ToolExecutionError } from '../errors';
import { NodeSearchEngine } from './engines/node-search-engine';
import { createProgressReporter, createBatchProgressReporter } from './helpers/progress';
import { createSuccessResponse, createErrorResponse } from './helpers/response';
import type { NodeSearchResult } from '../types/nodes';
import type { NodeSearchOutput } from '../types/tools';
/**
* Search query schema - simplified for better LLM compatibility
*/
const searchQuerySchema = z.object({
queryType: z.enum(['name', 'subNodeSearch']).describe('Type of search to perform'),
query: z.string().optional().describe('Search term to filter results'),
connectionType: z
.nativeEnum(NodeConnectionTypes)
.optional()
.describe('For subNodeSearch: connection type like ai_languageModel, ai_tool, etc.'),
});
/**
* Main schema for node search tool
*/
const nodeSearchSchema = z.object({
queries: z
.array(searchQuerySchema)
.min(1)
.describe('Array of search queries to find different types of nodes'),
});
/**
* Inferred types from schemas
*/
type SearchQuery = z.infer<typeof searchQuerySchema>;
const SEARCH_LIMIT = 5;
/**
* Process a single search query
*/
function processQuery(
query: SearchQuery,
searchEngine: NodeSearchEngine,
): { searchResults: NodeSearchResult[]; searchIdentifier: string } {
if (query.queryType === 'name') {
// Name-based search
const searchTerm = query.query;
if (!searchTerm) {
return {
searchResults: [],
searchIdentifier: '',
};
}
const searchResults = searchEngine.searchByName(searchTerm, SEARCH_LIMIT);
return {
searchResults,
searchIdentifier: searchTerm,
};
} else {
// Sub-node search by connection type
const connectionType = query.connectionType;
if (!connectionType) {
return {
searchResults: [],
searchIdentifier: '',
};
}
const searchResults = searchEngine.searchByConnectionType(
connectionType,
SEARCH_LIMIT,
query.query,
);
const searchIdentifier = query.query
? `sub-nodes with ${connectionType} output matching "${query.query}"`
: `sub-nodes with ${connectionType} output`;
return {
searchResults,
searchIdentifier,
};
}
}
/**
* Build the response message from search results
*/
function buildResponseMessage(
results: NodeSearchOutput['results'],
nodeTypes: INodeTypeDescription[],
): string {
const searchEngine = new NodeSearchEngine(nodeTypes);
let responseContent = '';
for (const { query, results: searchResults } of results) {
if (responseContent) responseContent += '\n\n';
if (searchResults.length === 0) {
responseContent += `No nodes found matching "${query}"`;
} else {
responseContent += `Found ${searchResults.length} nodes matching "${query}":${searchResults
.map((node) => searchEngine.formatResult(node))
.join('')}`;
}
}
return responseContent;
}
/**
* Factory function to create the node search tool
*/
export function createNodeSearchTool(nodeTypes: INodeTypeDescription[]) {
return tool(
(input: unknown, config) => {
const reporter = createProgressReporter(config, 'search_nodes');
try {
// Validate input using Zod schema
const validatedInput = nodeSearchSchema.parse(input);
const { queries } = validatedInput;
// Report tool start
reporter.start(validatedInput);
const allResults: NodeSearchOutput['results'] = [];
// Create search engine instance
const searchEngine = new NodeSearchEngine(nodeTypes);
// Create batch reporter for progress tracking
const batchReporter = createBatchProgressReporter(reporter, 'Searching nodes');
batchReporter.init(queries.length);
// Process each query
for (const searchQuery of queries) {
const { searchResults, searchIdentifier } = processQuery(searchQuery, searchEngine);
// Report progress
batchReporter.next(searchIdentifier);
// Add to results
allResults.push({
query: searchIdentifier,
results: searchResults,
});
}
// Complete batch reporting
batchReporter.complete();
// Build response message
const responseMessage = buildResponseMessage(allResults, nodeTypes);
// Report completion
const output: NodeSearchOutput = {
results: allResults,
totalResults: allResults.reduce((sum, r) => sum + r.results.length, 0),
message: responseMessage,
};
reporter.complete(output);
// Return success response
return createSuccessResponse(config, responseMessage);
} catch (error) {
// Handle validation or unexpected errors
if (error instanceof z.ZodError) {
const validationError = new ValidationError('Invalid input parameters', {
extra: { errors: error.errors },
});
reporter.error(validationError);
return createErrorResponse(config, validationError);
}
const toolError = new ToolExecutionError(
error instanceof Error ? error.message : 'Unknown error occurred',
{
toolName: 'search_nodes',
cause: error instanceof Error ? error : undefined,
},
);
reporter.error(toolError);
return createErrorResponse(config, toolError);
}
},
{
name: 'search_nodes',
description: `Search for n8n nodes by name or find sub-nodes that output specific connection types. Use this before adding nodes to find the correct node types.
Search modes:
1. Name search (default): Search nodes by name/description
Example: { queryType: "name", query: "http" }
2. Sub-node search: Find sub-nodes that output specific AI connection types
Example: { queryType: "subNodeSearch", connectionType: NodeConnectionTypes.AiTool }
With optional query filter: { queryType: "subNodeSearch", connectionType: NodeConnectionTypes.AiTool, query: "calculator" }
This finds sub-nodes (like "Calculator Tool") that can be connected to nodes accepting that connection type
Common AI connection types for sub-node search:
- NodeConnectionTypes.AiLanguageModel (finds LLM provider sub-nodes like "OpenAI Chat Model")
- NodeConnectionTypes.AiTool (finds tool sub-nodes like "Calculator Tool", "Code Tool")
- NodeConnectionTypes.AiMemory (finds memory sub-nodes like "Window Buffer Memory")
- NodeConnectionTypes.AiEmbedding (finds embedding sub-nodes like "Embeddings OpenAI")
- NodeConnectionTypes.AiVectorStore (finds vector store sub-nodes)
- NodeConnectionTypes.AiDocument (finds document loader sub-nodes)
- NodeConnectionTypes.AiTextSplitter (finds text splitter sub-nodes)
You can search for multiple different criteria at once by providing an array of queries.`,
schema: nodeSearchSchema,
},
);
}

View File

@@ -0,0 +1,391 @@
import { ChatPromptTemplate } from '@langchain/core/prompts';
const systemPrompt = `You are an AI assistant specialized in creating and editing n8n workflows. Your goal is to help users build efficient, well-connected workflows by intelligently using the available tools.
<prime_directive>
ALWAYS end your workflow mutation responses with a brief note that the workflow can be adjusted if needed. For example: "Feel free to let me know if you'd like to adjust any part of this workflow!" This is mandatory for all workflow mutation responses.
</prime_directive>
<core_principle>
After receiving tool results, reflect on their quality and determine optimal next steps. Use this reflection to plan your approach and ensure all nodes are properly configured and connected.
</core_principle>
<communication_style>
Be warm, helpful, and most importantlyconcise. Focus on actionable information.
- Lead with what was accomplished
- Highlight only critical configuration needs
- Provide clear next steps
- Save detailed explanations for when users ask
- One emoji per section maximum
</communication_style>
<tool_execution_strategy>
For maximum efficiency, invoke all relevant tools simultaneously when performing independent operations. This significantly reduces wait time and improves user experience.
Parallel execution guidelines:
- ALL tools support parallel execution, including add_nodes
- Information gathering: Call search_nodes and get_node_details in parallel for multiple node types
- Node creation: Add multiple nodes by calling add_nodes multiple times in parallel
- Parameter updates: Update different nodes' parameters simultaneously
- Connection creation: Connect multiple node pairs simultaneously
The system's operations processor ensures state consistency across all parallel operations.
</tool_execution_strategy>
<workflow_creation_sequence>
Follow this proven sequence for creating robust workflows:
1. **Discovery Phase** (parallel execution)
- Search for all required node types simultaneously
- Why: Ensures you work with actual available nodes, not assumptions
2. **Analysis Phase** (parallel execution)
- Get details for ALL nodes before proceeding
- Why: Understanding inputs/outputs prevents connection errors and ensures proper parameter configuration
3. **Creation Phase** (parallel execution)
- Add nodes individually by calling add_nodes for each node
- Execute multiple add_nodes calls in parallel for efficiency
- Why: Each node addition is independent, parallel execution is faster, and the operations processor ensures consistency
4. **Connection Phase** (parallel execution)
- Connect all nodes based on discovered input/output structure
- Why: Parallel connections are safe and faster
5. **Configuration Phase** (parallel execution) - MANDATORY
- ALWAYS configure nodes using update_node_parameters
- Even for "simple" nodes like HTTP Request, Set, etc.
- Configure all nodes in parallel for efficiency
- Why: Unconfigured nodes will fail at runtime
- Pay special attention to parameters that control node behavior (dataType, mode, operation)
- Why: Unconfigured nodes will fail at runtime, defaults are unreliable
<parallel_node_creation_example>
Example: Creating and configuring a workflow (complete process):
Step 1 - Add nodes in parallel:
- add_nodes({{ nodeType: "n8n-nodes-base.httpRequest", name: "Fetch Data", ... }})
- add_nodes({{ nodeType: "n8n-nodes-base.set", name: "Transform Data", ... }})
Step 2 - Connect nodes:
- connect_nodes({{ sourceNodeId: "Fetch Data", targetNodeId: "Transform Data" }})
Step 3 - Configure ALL nodes in parallel (MANDATORY):
- update_node_parameters({{ nodeId: "Fetch Data", instructions: ["Set URL to https://api.example.com/users", "Set method to GET"] }})
- update_node_parameters({{ nodeId: "Transform Data", instructions: ["Add field status with value 'processed'", "Add field timestamp with current date"] }})
</parallel_node_creation_example>
</workflow_creation_sequence>
<connection_parameters_rules>
Every node addition requires both reasoning and parameters. Each add_nodes call adds a single node.
This two-step process ensures proper connections:
<reasoning_first>
Always determine connectionParametersReasoning before setting connectionParameters. Ask yourself:
- Does this node have dynamic inputs/outputs?
- Which parameters affect the connection structure?
- What mode or operation changes the available connections?
</reasoning_first>
<parameter_examples>
Static nodes (standard inputs/outputs):
- HTTP Request, Set, Code: reasoning="Static inputs/outputs", parameters={{}}
Dynamic nodes (parameter-dependent connections):
- AI Agent with parser: reasoning="hasOutputParser creates additional input for schema", parameters={{ hasOutputParser: true }}
- Vector Store insert: reasoning="Insert mode requires document input", parameters={{ mode: "insert" }}
- Vector Store as tool: reasoning="Tool mode provides AI connection output", parameters={{ mode: "retrieve-as-tool" }}
- Document Loader custom: reasoning="Custom mode enables text splitter input", parameters={{ textSplittingMode: "custom" }}
- Document Loader binary: reasoning="Binary mode for processing files instead of JSON", parameters={{ dataType: "binary" }}
</parameter_examples>
</connection_parameters_rules>
<node_connections_understanding>
n8n connections flow from SOURCE (output) to TARGET (input).
<main_connections>
Regular data flow: Source node output → Target node input
Example: HTTP Request → Set (HTTP Request is source, Set is target)
</main_connections>
<ai_connections>
AI sub-nodes PROVIDE capabilities, making them the SOURCE:
- OpenAI Chat Model → AI Agent [ai_languageModel]
- Calculator Tool → AI Agent [ai_tool]
- Window Buffer Memory → AI Agent [ai_memory]
- Token Splitter → Default Data Loader [ai_textSplitter]
- Default Data Loader → Vector Store [ai_document]
- Embeddings OpenAI → Vector Store [ai_embedding]
Why: Sub-nodes enhance main nodes with their capabilities
</ai_connections>
<rag_workflow_pattern>
CRITICAL: For RAG (Retrieval-Augmented Generation) workflows, follow this specific pattern:
Main data flow:
- Data source (e.g., HTTP Request) → Vector Store [main connection]
- The Vector Store receives the actual data through its main input
AI capability connections:
- Document Loader → Vector Store [ai_document] - provides document processing
- Embeddings → Vector Store [ai_embedding] - provides embedding generation
- Text Splitter → Document Loader [ai_textSplitter] - provides text chunking
Common mistake to avoid:
- NEVER connect Document Loader to main data outputs
- Document Loader is NOT a data processor in the main flow
- Document Loader is an AI sub-node that gives Vector Store or Summarization Chain the ability to process documents
Example RAG workflow:
1. Schedule Trigger → HTTP Request (download PDF)
2. HTTP Request → Vector Store (main data flow)
3. Token Splitter → Document Loader [ai_textSplitter]
4. Document Loader → Vector Store [ai_document]
5. OpenAI Embeddings → Vector Store [ai_embedding]
Why: Vector Store needs three things: data (main input), document processing capability (Document Loader), and embedding capability (Embeddings)
</rag_workflow_pattern>
</node_connections_understanding>
<node_defaults_warning>
⚠️ CRITICAL: NEVER RELY ON DEFAULT PARAMETER VALUES ⚠️
Default values are a common source of runtime failures. You MUST explicitly configure ALL parameters that control node behavior, even if they have defaults.
Common failures from relying on defaults:
- Document Loader: Defaults to dataType='json' but MUST be set to 'binary' when processing files (PDFs, DOCX, etc.)
- Vector Store: Mode parameter affects available connections - always set explicitly
- AI Agent: hasOutputParser default may not match your workflow needs
- HTTP Request: Default method is GET but many APIs require POST
- Database nodes: Default operations may not match intended behavior
ALWAYS check node details obtained in Analysis Phase and configure accordingly. Defaults are NOT your friend - they are traps that cause workflows to fail at runtime.
</node_defaults_warning>
<configuration_requirements>
ALWAYS configure nodes after adding and connecting them. This is NOT optional.
Use update_node_parameters for EVERY node that processes data:
- HTTP Request: MUST set URL, method, headers
- Set: MUST define fields to set
- Code: MUST provide the code to execute
- AI nodes: MUST configure prompts and models
- Database nodes: MUST set queries
- Trigger nodes: MUST define schedules/conditions
- Tool nodes: Use $fromAI expressions for dynamic values based on context (recipients, subjects, messages, dates)
- Document Loader: MUST set dataType parameter - 'json' for JSON data, 'binary' for files (PDF, DOCX, etc.)
- When processing files, ALWAYS set dataType to 'binary' - the default 'json' will cause failures
- Also configure loader type, text splitting mode, and other parameters based on use case
Only skip configuration for pure routing nodes (like Switch) that work with defaults.
Configure multiple nodes in parallel:
- update_node_parameters({{ nodeId: "httpRequest1", instructions: ["Set URL to https://api.example.com/data", "Add header Authorization: Bearer token"] }})
- update_node_parameters({{ nodeId: "set1", instructions: ["Add field 'processed' with value true", "Add field 'timestamp' with current date"] }})
- update_node_parameters({{ nodeId: "code1", instructions: ["Parse JSON input", "Extract and return user emails array"] }})
- update_node_parameters({{ nodeId: "gmailTool1", instructions: ["Set sendTo to ={{ $fromAI('to') }}", "Set subject to \${{ $fromAI('subject') }}", "Set message to =\${{ $fromAI('message_html') }}"] }})
- update_node_parameters({{ nodeId: "documentLoader1", instructions: ["Set dataType to 'binary' for processing PDF files", "Set loader to 'pdfLoader'", "Enable splitPages option"] }})
Why: Unconfigured nodes WILL fail at runtime
</configuration_requirements>
<data_parsing_strategy>
For AI-generated structured data, prefer Structured Output Parser nodes over Code nodes.
Why: Purpose-built parsers are more reliable and handle edge cases better than custom code.
Use Code nodes only for:
- Simple string manipulations
- Already structured data (JSON, CSV)
- Custom business logic beyond parsing
</data_parsing_strategy>
<fromAI_expressions>
## CRITICAL: $fromAI Expression Support for Tool Nodes
Tool nodes (nodes ending with "Tool" like Gmail Tool, Google Calendar Tool, etc.) support a special $fromAI expression that allows AI to dynamically fill parameters at runtime.
### When to Use $fromAI
- ONLY available in tool nodes (node types ending with "Tool")
- Use when the AI should determine the value based on context
- Ideal for parameters that vary based on user input or conversation context
### $fromAI Syntax
\`={{ $fromAI('key', 'description', 'type', defaultValue) }}\`
### Parameters
- key: Unique identifier (1-64 chars, alphanumeric/underscore/hyphen)
- description: Optional description for the AI (use empty string '' if not needed)
- type: 'string' | 'number' | 'boolean' | 'json' (defaults to 'string')
- defaultValue: Optional fallback value
### Tool Node Examples
#### Gmail Tool - Sending Email
{{
"sendTo": "={{ $fromAI('to') }}",
"subject": "={{ $fromAI('subject') }}",
"message": "={{ $fromAI('message_html') }}"
}}
#### Google Calendar Tool - Filtering Events
{{
"timeMin": "={{ $fromAI('After', '', 'string') }}",
"timeMax": "={{ $fromAI('Before', '', 'string') }}"
}}
### Mixed Usage Examples
You can combine $fromAI with regular text:
- "Subject: {{ $fromAI('subject') }} - Automated"
- "Dear {{ $fromAI('recipientName', 'Customer name', 'string', 'Customer') }}, "
### Important Rules
1. ONLY use $fromAI in tool nodes (check if node type ends with "Tool")
2. For timeMin/timeMax and similar date fields, use appropriate key names
3. The AI will fill these values based on context during execution
4. Don't use $fromAI in regular nodes like Set, IF, HTTP Request, etc.
## Tool Node Parameter Guidelines
### Identifying Tool Nodes
1. CHECK NODE TYPE: If the node type ends with "Tool", it supports $fromAI expressions
2. COMMON TOOL NODES:
- Gmail Tool (gmailTool): to, subject, message → use $fromAI
- Google Calendar Tool (googleCalendarTool): timeMin, timeMax → use $fromAI
- Slack Tool (slackTool): channel, message → use $fromAI
- Microsoft Teams Tool: channel, message → use $fromAI
- Telegram Tool: chatId, text → use $fromAI
- Other communication/document tools: content fields → use $fromAI
### When to Use $fromAI in Tool Nodes
1. DYNAMIC VALUES: Use $fromAI for values that should be determined by AI based on context
2. USER INPUT FIELDS: Recipients, subjects, messages, date ranges
3. PRESERVE EXISTING: If a parameter already uses $fromAI, keep it unless explicitly asked to change
4. DATE/TIME FIELDS: Use descriptive key names for clarity
### Tool Node Parameter Patterns
- Email recipients: "={{ $fromAI('to') }}"
- Email subjects: "={{ $fromAI('subject') }}"
- Message content: "={{ $fromAI('message_html') }}" or "={{ $fromAI('message') }}"
- Date ranges: "={{ $fromAI('After', '', 'string') }}"
- Channel IDs: "={{ $fromAI('channel') }}"
</fromAI_expressions>
<proactive_design>
Anticipate workflow needs and suggest enhancements:
- IF nodes for conditional logic when multiple outcomes exist
- Set nodes for data transformation between incompatible formats
- Schedule Triggers for recurring tasks
- Error handling for external service calls
- Split In Batches for large dataset processing
Why: Proactive suggestions create more robust, production-ready workflows
</proactive_design>
<parameter_updates>
When modifying existing nodes:
- Use update_node_parameters with natural language instructions
- Update multiple nodes in parallel for efficiency
- The tool preserves existing parameters while applying changes
- For tool nodes, use $fromAI expressions for dynamic values: "Set recipient to ={{ $fromAI('to') }}"
- For regular nodes, use static values or expressions: "Set URL to https://api.example.com"
- Proceed directly with updates when you have the needed information
</parameter_updates>
<handling_uncertainty>
When unsure about specific values:
- Add nodes and connections confidently
- For uncertain parameters, use update_node_parameters with clear placeholders
- For tool nodes with dynamic values, use $fromAI expressions instead of placeholders
- Always mention what needs user input in your response
Example for regular nodes:
update_node_parameters({{
nodeId: "httpRequest1",
instructions: ["Set URL to YOUR_API_ENDPOINT", "Add your authentication headers"]
}})
Example for tool nodes:
update_node_parameters({{
nodeId: "gmailTool1",
instructions: ["Set sendTo to {{ $fromAI('to') }}", "Set subject to {{ $fromAI('subject') }}"]
}})
Then tell the user: "I've set up the Gmail Tool node with dynamic AI parameters - it will automatically determine recipients and subjects based on context."
</handling_uncertainty>`;
const responsePatterns = `
<response_patterns>
After completing workflow tasks, follow this structure:
1. **Brief Summary** (1-2 sentences)
State what was created/modified without listing every parameter
2. **Key Requirements** (if any)
- Credentials needed
- Parameters the user should verify
- Any manual configuration required
3. **How to Use** (when relevant)
Quick steps to get started
4. **Next Steps** (if applicable)
What the user might want to do next
<communication_style>
Be warm, helpful, and most importantly concise. Focus on actionable information.
- Lead with what was accomplished
- Provide clear next steps
- Highlight only critical configuration needs
- Be warm and encouraging without excessive enthusiasm
- Use emojis sparingly (1-2 max per response)
- Focus on what the user needs to know
- Expand details only when asked
- End with a brief note that the workflow can be adjusted if needed
</communication_style>
</response_patterns>
`;
const currentWorkflowJson = `
<current_workflow_json>
{workflowJSON}
</current_workflow_json>`;
const currentExecutionData = `
<current_simplified_execution_data>
{executionData}
</current_simplified_execution_data>`;
const currentExecutionNodesSchemas = `
<current_execution_nodes_schemas>
{executionSchema}
</current_execution_nodes_schemas>`;
export const mainAgentPrompt = ChatPromptTemplate.fromMessages([
[
'system',
[
{
type: 'text',
text: systemPrompt,
cache_control: { type: 'ephemeral' },
},
{
type: 'text',
text: currentWorkflowJson,
},
{
type: 'text',
text: currentExecutionData,
},
{
type: 'text',
text: currentExecutionNodesSchemas,
},
{
type: 'text',
text: responsePatterns,
cache_control: { type: 'ephemeral' },
},
],
],
['placeholder', '{messages}'],
]);

View File

@@ -0,0 +1,155 @@
import { tool } from '@langchain/core/tools';
import type { Logger } from '@n8n/backend-common';
import type { IConnections } from 'n8n-workflow';
import { z } from 'zod';
import { ValidationError, ToolExecutionError } from '../errors';
import { createProgressReporter, reportProgress } from './helpers/progress';
import { createSuccessResponse, createErrorResponse } from './helpers/response';
import { getCurrentWorkflow, getWorkflowState, removeNodeFromWorkflow } from './helpers/state';
import { validateNodeExists, createNodeNotFoundError } from './helpers/validation';
import type { RemoveNodeOutput } from '../types/tools';
/**
* Schema for the remove node tool
*/
const removeNodeSchema = z.object({
nodeId: z.string().describe('The ID of the node to remove from the workflow'),
});
/**
* Count connections that will be removed for a node
*/
function countNodeConnections(nodeId: string, connections: IConnections): number {
let count = 0;
// Count outgoing connections
if (connections[nodeId]) {
for (const connectionType of Object.values(connections[nodeId])) {
if (Array.isArray(connectionType)) {
for (const outputs of connectionType) {
if (Array.isArray(outputs)) {
count += outputs.length;
}
}
}
}
}
// Count incoming connections
for (const [_sourceNodeId, nodeConnections] of Object.entries(connections)) {
for (const outputs of Object.values(nodeConnections)) {
if (Array.isArray(outputs)) {
for (const outputConnections of outputs) {
if (Array.isArray(outputConnections)) {
count += outputConnections.filter((conn) => conn.node === nodeId).length;
}
}
}
}
}
return count;
}
/**
* Build the response message for the removed node
*/
function buildResponseMessage(
nodeName: string,
nodeType: string,
connectionsRemoved: number,
): string {
const parts: string[] = [`Successfully removed node "${nodeName}" (${nodeType})`];
if (connectionsRemoved > 0) {
parts.push(`Removed ${connectionsRemoved} connection${connectionsRemoved > 1 ? 's' : ''}`);
}
return parts.join('\n');
}
/**
* Factory function to create the remove node tool
*/
export function createRemoveNodeTool(_logger?: Logger) {
return tool(
(input, config) => {
const reporter = createProgressReporter(config, 'remove_node');
try {
// Validate input using Zod schema
const validatedInput = removeNodeSchema.parse(input);
const { nodeId } = validatedInput;
// Report tool start
reporter.start(validatedInput);
// Get current state
const state = getWorkflowState();
const workflow = getCurrentWorkflow(state);
// Report progress
reportProgress(reporter, `Removing node ${nodeId}`);
// Find the node to remove
const nodeToRemove = validateNodeExists(nodeId, workflow.nodes);
if (!nodeToRemove) {
const error = createNodeNotFoundError(nodeId);
reporter.error(error);
return createErrorResponse(config, error);
}
// Count connections that will be removed
const connectionsRemoved = countNodeConnections(nodeId, workflow.connections);
// Build success message
const message = buildResponseMessage(
nodeToRemove.name,
nodeToRemove.type,
connectionsRemoved,
);
// Report completion
const output: RemoveNodeOutput = {
removedNodeId: nodeId,
removedNodeName: nodeToRemove.name,
removedNodeType: nodeToRemove.type,
connectionsRemoved,
message,
};
reporter.complete(output);
// Return success with state updates
const stateUpdates = removeNodeFromWorkflow(nodeId);
return createSuccessResponse(config, message, stateUpdates);
} catch (error) {
// Handle validation or unexpected errors
if (error instanceof z.ZodError) {
const validationError = new ValidationError('Invalid input parameters', {
extra: { errors: error.errors },
});
reporter.error(validationError);
return createErrorResponse(config, validationError);
}
const toolError = new ToolExecutionError(
error instanceof Error ? error.message : 'Unknown error occurred',
{
toolName: 'remove_node',
cause: error instanceof Error ? error : undefined,
},
);
reporter.error(toolError);
return createErrorResponse(config, toolError);
}
},
{
name: 'remove_node',
description:
'Remove a node from the workflow by its ID. This will also remove all connections to and from the node. Use this tool when you need to delete a node that is no longer needed in the workflow.',
schema: removeNodeSchema,
},
);
}

View File

@@ -0,0 +1,301 @@
import { getCurrentTaskInput } from '@langchain/langgraph';
import type { INodeTypeDescription, INode } from 'n8n-workflow';
import {
createNode,
createWorkflow,
nodeTypes,
parseToolResult,
extractProgressMessages,
findProgressMessage,
createToolConfigWithWriter,
createToolConfig,
setupWorkflowState,
expectToolSuccess,
expectToolError,
expectNodeAdded,
buildAddNodeInput,
REASONING,
type ParsedToolContent,
} from '../../../test/test-utils';
import { createAddNodeTool } from '../add-node.tool';
// Mock LangGraph dependencies
jest.mock('@langchain/langgraph', () => ({
getCurrentTaskInput: jest.fn(),
Command: jest.fn().mockImplementation((params: Record<string, unknown>) => ({
// Transform the Command params to match what the test expects
content: JSON.stringify(params),
})),
}));
// Mock crypto module
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
jest.mock('crypto', () => ({
...jest.requireActual('crypto'),
randomUUID: jest.fn().mockReturnValue('test-uuid-123'),
}));
describe('AddNodeTool', () => {
let nodeTypesList: INodeTypeDescription[];
let addNodeTool: ReturnType<typeof createAddNodeTool>;
const mockGetCurrentTaskInput = getCurrentTaskInput as jest.MockedFunction<
typeof getCurrentTaskInput
>;
beforeEach(() => {
jest.clearAllMocks();
nodeTypesList = [nodeTypes.code, nodeTypes.httpRequest, nodeTypes.webhook, nodeTypes.agent];
addNodeTool = createAddNodeTool(nodeTypesList);
});
afterEach(() => {
jest.clearAllMocks();
});
describe('invoke', () => {
it('should add a node with custom name', async () => {
const existingWorkflow = createWorkflow([createNode({ id: 'existing', name: 'Code' })]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfigWithWriter('add_nodes', 'test-call-1');
const result = await addNodeTool.invoke(
buildAddNodeInput({
nodeType: 'n8n-nodes-base.code',
name: 'Process Data',
connectionParametersReasoning: REASONING.STATIC_NODE,
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectNodeAdded(content, {
name: 'Process Data',
type: 'n8n-nodes-base.code',
parameters: {},
});
const addedNode = content.update.workflowOperations?.[0]?.nodes?.[0];
expect(addedNode?.id).toBeDefined();
expect(typeof addedNode?.id).toBe('string');
expect(addedNode?.position).toEqual(expect.any(Array));
expect(addedNode?.position?.length).toBe(2);
expectToolSuccess(content, 'Successfully added "Process Data"');
expect(mockConfig.writer).toHaveBeenCalled();
const progressCalls = extractProgressMessages(mockConfig.writer);
expect(progressCalls.length).toBeGreaterThanOrEqual(3);
const startMessage = findProgressMessage(progressCalls, 'running', 'input');
expect(startMessage).toBeDefined();
expect(startMessage?.updates[0]?.data).toMatchObject({
nodeType: 'n8n-nodes-base.code',
name: 'Process Data',
});
const progressMessage = findProgressMessage(progressCalls, 'running', 'progress');
expect(progressMessage).toBeDefined();
const completeMessage = findProgressMessage(progressCalls, 'completed');
expect(completeMessage).toBeDefined();
});
it('should generate unique name when no custom name provided', async () => {
const existingWorkflow = createWorkflow([createNode({ id: 'existing', name: 'Code' })]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfig('add_nodes', 'test-call-1b');
const result = await addNodeTool.invoke(
buildAddNodeInput({
nodeType: 'n8n-nodes-base.code',
name: 'Code',
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expect(content.update.workflowOperations?.[0]?.nodes?.[0]?.name).toBe('Code1');
});
it('should handle connection parameters for AI nodes', async () => {
setupWorkflowState(mockGetCurrentTaskInput);
const mockConfig = createToolConfig('add_nodes', 'test-call-2');
const result = await addNodeTool.invoke(
buildAddNodeInput({
nodeType: '@n8n/n8n-nodes-langchain.agent',
name: 'AI Assistant',
connectionParametersReasoning:
REASONING.DYNAMIC_AI_NODE + ', setting hasOutputParser:true',
connectionParameters: { hasOutputParser: true },
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectNodeAdded(content, {
name: 'AI Assistant',
type: '@n8n/n8n-nodes-langchain.agent',
parameters: { hasOutputParser: true },
});
});
it('should handle validation errors', async () => {
setupWorkflowState(mockGetCurrentTaskInput);
const mockConfig = createToolConfig('add_nodes', 'test-call-3');
try {
await addNodeTool.invoke(
{
nodeType: 'n8n-nodes-base.code',
} as Parameters<typeof addNodeTool.invoke>[0],
mockConfig,
);
expect(true).toBe(false);
} catch (error) {
expect(error).toBeDefined();
expect(String(error)).toContain('Received tool input did not match expected schema');
}
});
it('should handle unknown node type', async () => {
setupWorkflowState(mockGetCurrentTaskInput);
const mockConfig = createToolConfig('add_nodes', 'test-call-4');
const result = await addNodeTool.invoke(
buildAddNodeInput({
nodeType: 'n8n-nodes-base.unknown',
name: 'Unknown Node',
connectionParametersReasoning: 'Testing unknown node',
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectToolError(content, 'Error: Node type "n8n-nodes-base.unknown" not found');
});
it('should calculate correct position for nodes', async () => {
const existingWorkflow = createWorkflow([
createNode({ id: 'node1', position: [100, 100] }),
createNode({ id: 'node2', position: [300, 100] }),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfig('add_nodes', 'test-call-5');
const result = await addNodeTool.invoke(
buildAddNodeInput({
nodeType: 'n8n-nodes-base.httpRequest',
name: 'Fetch Data',
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const addedNode = content.update.workflowOperations?.[0]?.nodes?.[0];
expect(addedNode?.position).toBeDefined();
expect(addedNode?.position?.[0]).toBeGreaterThan(300);
});
it('should handle webhook nodes with special properties', async () => {
setupWorkflowState(mockGetCurrentTaskInput);
const mockConfig = createToolConfig('add_nodes', 'test-call-6');
const result = await addNodeTool.invoke(
buildAddNodeInput({
nodeType: 'n8n-nodes-base.webhook',
name: 'Incoming Webhook',
connectionParametersReasoning: REASONING.WEBHOOK_NODE,
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const addedNode = content.update.workflowOperations?.[0]?.nodes?.[0] as INode & {
webhookId?: string;
};
expect(addedNode?.type).toBe('n8n-nodes-base.webhook');
expect(addedNode?.webhookId).toBeDefined();
expect(typeof addedNode?.webhookId).toBe('string');
});
it('should use custom name instead of default name', async () => {
setupWorkflowState(mockGetCurrentTaskInput);
const mockConfig = createToolConfig('add_nodes', 'test-call-7');
const result = await addNodeTool.invoke(
buildAddNodeInput({
nodeType: 'n8n-nodes-base.code',
name: 'My Custom Code Node',
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const addedNode = content.update.workflowOperations?.[0]?.nodes?.[0];
expect(addedNode?.name).toBe('My Custom Code Node');
});
it('should generate unique names when adding multiple nodes of same type', async () => {
const existingWorkflow = createWorkflow([
createNode({ id: 'node1', name: 'Code' }),
createNode({ id: 'node2', name: 'Code1' }),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfig('add_nodes', 'test-call-8');
const result = await addNodeTool.invoke(
buildAddNodeInput({
nodeType: 'n8n-nodes-base.code',
name: 'Code',
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const addedNode = content.update.workflowOperations?.[0]?.nodes?.[0];
expect(addedNode?.name).toBe('Code2');
});
it('should handle sub-nodes positioning differently', async () => {
const existingWorkflow = createWorkflow([createNode({ id: 'node1', position: [100, 100] })]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfig('add_nodes', 'test-call-9');
const result = await addNodeTool.invoke(
buildAddNodeInput({
nodeType: '@n8n/n8n-nodes-langchain.agent',
name: 'AI Agent',
connectionParametersReasoning: 'Agent node for AI processing',
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const addedNode = content.update.workflowOperations?.[0]?.nodes?.[0];
expect(addedNode?.position?.[1]).toBeGreaterThan(100);
});
});
});

View File

@@ -0,0 +1,400 @@
import { getCurrentTaskInput } from '@langchain/langgraph';
import type { IConnections, INodeTypeDescription } from 'n8n-workflow';
import {
createNode,
createWorkflow,
nodeTypes,
parseToolResult,
extractProgressMessages,
findProgressMessage,
createToolConfigWithWriter,
createToolConfig,
setupWorkflowState,
expectToolSuccess,
expectToolError,
expectWorkflowOperation,
buildConnectNodesInput,
type ParsedToolContent,
createNodeType,
} from '../../../test/test-utils';
import { createConnectNodesTool } from '../connect-nodes.tool';
// Mock LangGraph dependencies
jest.mock('@langchain/langgraph', () => ({
getCurrentTaskInput: jest.fn(),
Command: jest.fn().mockImplementation((params: Record<string, unknown>) => ({
content: JSON.stringify(params),
})),
}));
describe('ConnectNodesTool', () => {
let nodeTypesList: INodeTypeDescription[];
let connectNodesTool: ReturnType<typeof createConnectNodesTool>;
const mockGetCurrentTaskInput = getCurrentTaskInput as jest.MockedFunction<
typeof getCurrentTaskInput
>;
beforeEach(() => {
jest.clearAllMocks();
nodeTypesList = [nodeTypes.code, nodeTypes.httpRequest, nodeTypes.webhook, nodeTypes.agent];
connectNodesTool = createConnectNodesTool(nodeTypesList);
});
afterEach(() => {
jest.clearAllMocks();
});
describe('invoke', () => {
it('should connect two nodes with main connection', async () => {
const existingWorkflow = createWorkflow([
createNode({ id: 'node1', name: 'Code', type: 'n8n-nodes-base.code' }),
createNode({ id: 'node2', name: 'HTTP Request', type: 'n8n-nodes-base.httpRequest' }),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfigWithWriter('connect_nodes', 'test-call-1');
const result = await connectNodesTool.invoke(
{
sourceNodeId: 'node1',
targetNodeId: 'node2',
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectWorkflowOperation(content, 'mergeConnections');
const connections = content.update.workflowOperations?.[0]?.connections as IConnections;
expect(connections).toBeDefined();
expect(connections?.['Code']).toBeDefined();
expect(connections?.['Code']?.main).toBeDefined();
expect(connections?.['Code']?.main?.[0]).toEqual([
{
node: 'HTTP Request',
type: 'main',
index: 0,
},
]);
expectToolSuccess(content, 'Connected: Code → HTTP Request (main)');
// Check progress messages
const progressCalls = extractProgressMessages(mockConfig.writer);
expect(progressCalls.length).toBeGreaterThanOrEqual(3);
const startMessage = findProgressMessage(progressCalls, 'running', 'input');
expect(startMessage).toBeDefined();
const completeMessage = findProgressMessage(progressCalls, 'completed');
expect(completeMessage).toBeDefined();
});
it('should auto-swap nodes when AI sub-node is specified as target', async () => {
// Create node types with proper AI connections
const agentNodeType = createNodeType({
displayName: 'AI Agent',
name: '@n8n/n8n-nodes-langchain.agent',
group: ['output'],
inputs: ['main', 'ai_tool'],
outputs: ['main'],
});
const toolNodeType = createNodeType({
displayName: 'Calculator Tool',
name: '@n8n/n8n-nodes-langchain.toolCalculator',
group: ['output'],
inputs: [],
outputs: ['ai_tool'],
});
// Update node types list
nodeTypesList = [nodeTypes.code, nodeTypes.httpRequest, agentNodeType, toolNodeType];
connectNodesTool = createConnectNodesTool(nodeTypesList);
const existingWorkflow = createWorkflow([
createNode({ id: 'agent1', name: 'AI Agent', type: '@n8n/n8n-nodes-langchain.agent' }),
createNode({
id: 'tool1',
name: 'Calculator',
type: '@n8n/n8n-nodes-langchain.toolCalculator',
}),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfig('connect_nodes', 'test-call-2');
const result = await connectNodesTool.invoke(
buildConnectNodesInput({
// Intentionally backwards - tool should be source, agent should be target
sourceNodeId: 'agent1',
targetNodeId: 'tool1',
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
// The tool should auto-swap and connect Calculator (source) to AI Agent (target)
expectWorkflowOperation(content, 'mergeConnections');
const connections = content.update.workflowOperations?.[0]?.connections as IConnections;
expect(connections).toBeDefined();
expect(connections?.['Calculator']).toBeDefined();
expect(connections?.['Calculator']?.ai_tool?.[0]).toEqual([
{
node: 'AI Agent',
type: 'ai_tool',
index: 0,
},
]);
// Check for swapped message pattern
expectToolSuccess(content, /Auto-corrected connection: Calculator \(ai_tool\) → AI Agent/);
});
it('should connect AI sub-nodes with proper connection type', async () => {
// Update the agent node type to accept ai_languageModel input
const agentNodeType = createNodeType({
displayName: 'AI Agent',
name: '@n8n/n8n-nodes-langchain.agent',
group: ['output'],
inputs: ['main', 'ai_languageModel'],
outputs: ['main'],
});
// Add language model node type
const languageModelNodeType = createNodeType({
displayName: 'OpenAI Chat Model',
name: '@n8n/n8n-nodes-langchain.lmChatOpenAi',
group: ['output'],
inputs: [],
outputs: ['ai_languageModel'],
});
// Replace the agent node type in the list
nodeTypesList = nodeTypesList.filter((nt) => nt.name !== '@n8n/n8n-nodes-langchain.agent');
nodeTypesList.push(agentNodeType, languageModelNodeType);
connectNodesTool = createConnectNodesTool(nodeTypesList);
const existingWorkflow = createWorkflow([
createNode({
id: 'agent1',
name: 'AI Agent',
type: '@n8n/n8n-nodes-langchain.agent',
}),
createNode({
id: 'model1',
name: 'OpenAI Model',
type: '@n8n/n8n-nodes-langchain.lmChatOpenAi',
}),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfig('connect_nodes', 'test-call-3');
const result = await connectNodesTool.invoke(
buildConnectNodesInput({
sourceNodeId: 'model1',
targetNodeId: 'agent1',
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectWorkflowOperation(content, 'mergeConnections');
const connections = content.update.workflowOperations?.[0]?.connections as IConnections;
expect(connections).toBeDefined();
expect(connections?.['OpenAI Model']).toBeDefined();
expect(connections?.['OpenAI Model']?.ai_languageModel).toBeDefined();
expect(connections?.['OpenAI Model']?.ai_languageModel?.[0]).toEqual([
{
node: 'AI Agent',
type: 'ai_languageModel',
index: 0,
},
]);
expectToolSuccess(content, 'Connected: OpenAI Model → AI Agent (ai_languageModel)');
});
it('should handle custom source and target indices', async () => {
const existingWorkflow = createWorkflow([
createNode({ id: 'node1', name: 'Multi Output', type: 'n8n-nodes-base.code' }),
createNode({ id: 'node2', name: 'Multi Input', type: 'n8n-nodes-base.code' }),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfig('connect_nodes', 'test-call-4');
const result = await connectNodesTool.invoke(
{
sourceNodeId: 'node1',
targetNodeId: 'node2',
sourceOutputIndex: 1,
targetInputIndex: 2,
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const connections = content.update.workflowOperations?.[0]?.connections as IConnections;
expect(connections?.['Multi Output']).toBeDefined();
expect(connections?.['Multi Output']?.main?.[1]).toEqual([
{
node: 'Multi Input',
type: 'main',
index: 2,
},
]);
});
it('should handle validation errors for missing required fields', async () => {
setupWorkflowState(mockGetCurrentTaskInput);
const mockConfig = createToolConfig('connect_nodes', 'test-call-5');
try {
await connectNodesTool.invoke(
{
sourceNodeId: 'node1',
// Missing targetNodeId
} as Parameters<typeof connectNodesTool.invoke>[0],
mockConfig,
);
expect(true).toBe(false);
} catch (error) {
expect(error).toBeDefined();
expect(String(error)).toContain('Received tool input did not match expected schema');
}
});
it('should handle non-existent source node', async () => {
const existingWorkflow = createWorkflow([
createNode({ id: 'node1', name: 'Code', type: 'n8n-nodes-base.code' }),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfig('connect_nodes', 'test-call-6');
const result = await connectNodesTool.invoke(
{
sourceNodeId: 'nonexistent',
targetNodeId: 'node1',
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectToolError(content, 'Error: Node with ID "nonexistent" not found in workflow');
});
it('should handle non-existent target node', async () => {
const existingWorkflow = createWorkflow([
createNode({ id: 'node1', name: 'Code', type: 'n8n-nodes-base.code' }),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfig('connect_nodes', 'test-call-7');
const result = await connectNodesTool.invoke(
{
sourceNodeId: 'node1',
targetNodeId: 'nonexistent',
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectToolError(content, 'Error: Node with ID "nonexistent" not found in workflow');
});
it('should handle invalid connection between incompatible nodes', async () => {
const existingWorkflow = createWorkflow([
createNode({ id: 'webhook1', name: 'Webhook 1', type: 'n8n-nodes-base.webhook' }),
createNode({ id: 'webhook2', name: 'Webhook 2', type: 'n8n-nodes-base.webhook' }),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfig('connect_nodes', 'test-call-8');
const result = await connectNodesTool.invoke(
{
sourceNodeId: 'webhook1',
targetNodeId: 'webhook2',
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
// Both webhooks are triggers, so they can't be connected
expectToolError(content, /Error: No compatible connection types found between/);
});
it('should detect existing connection and handle gracefully', async () => {
const existingWorkflow = createWorkflow([
createNode({ id: 'node1', name: 'Code', type: 'n8n-nodes-base.code' }),
createNode({ id: 'node2', name: 'HTTP Request', type: 'n8n-nodes-base.httpRequest' }),
]);
// Add existing connection
existingWorkflow.connections = {
node1: {
main: [[{ node: 'node2', type: 'main', index: 0 }]],
},
};
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfig('connect_nodes', 'test-call-9');
const result = await connectNodesTool.invoke(
{
sourceNodeId: 'node1',
targetNodeId: 'node2',
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
// Should still succeed even if connection already exists
expectToolSuccess(content, 'Connected: Code → HTTP Request (main)');
});
it('should handle multiple output types and pick the right one', async () => {
// Create a node type with multiple output types
const multiOutputNode = createNodeType({
displayName: 'Multi Output',
name: 'test.multiOutput',
outputs: ['main', 'ai_tool'],
});
nodeTypesList.push(multiOutputNode);
connectNodesTool = createConnectNodesTool(nodeTypesList);
const existingWorkflow = createWorkflow([
createNode({ id: 'multi1', name: 'Multi Output', type: 'test.multiOutput' }),
createNode({ id: 'code1', name: 'Code', type: 'n8n-nodes-base.code' }),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfig('connect_nodes', 'test-call-10');
const result = await connectNodesTool.invoke(
{
sourceNodeId: 'multi1',
targetNodeId: 'code1',
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
// Should pick 'main' connection type since Code node accepts 'main'
expectToolSuccess(content, 'Connected: Multi Output → Code (main)');
});
});
});

View File

@@ -0,0 +1,459 @@
import type { INodeTypeDescription } from 'n8n-workflow';
import {
nodeTypes,
parseToolResult,
extractProgressMessages,
findProgressMessage,
createToolConfigWithWriter,
createToolConfig,
expectToolSuccess,
expectToolError,
buildNodeDetailsInput,
expectNodeDetails,
expectXMLTag,
type ParsedToolContent,
createNodeType,
} from '../../../test/test-utils';
import { createNodeDetailsTool } from '../node-details.tool';
// Mock LangGraph dependencies
jest.mock('@langchain/langgraph', () => ({
getCurrentTaskInput: jest.fn(),
Command: jest.fn().mockImplementation((params: Record<string, unknown>) => ({
content: JSON.stringify(params),
})),
}));
describe('NodeDetailsTool', () => {
let nodeTypesList: INodeTypeDescription[];
let nodeDetailsTool: ReturnType<typeof createNodeDetailsTool>;
beforeEach(() => {
jest.clearAllMocks();
nodeTypesList = [
nodeTypes.code,
nodeTypes.httpRequest,
nodeTypes.webhook,
nodeTypes.agent,
nodeTypes.openAiModel,
nodeTypes.setNode,
nodeTypes.ifNode,
nodeTypes.mergeNode,
nodeTypes.vectorStoreNode,
];
nodeDetailsTool = createNodeDetailsTool(nodeTypesList);
});
afterEach(() => {
jest.clearAllMocks();
});
describe('invoke', () => {
it('should retrieve basic node details with default options', async () => {
const mockConfig = createToolConfigWithWriter('get_node_details', 'test-call-1');
const result = await nodeDetailsTool.invoke(
buildNodeDetailsInput({
nodeName: 'n8n-nodes-base.code',
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectToolSuccess(content, '<node_details>');
const message = content.update.messages[0]?.kwargs.content;
// Check basic details
expectNodeDetails(content, {
name: 'n8n-nodes-base.code',
displayName: 'Code',
description: 'Test node description',
});
// Check connections are included by default
expect(message).toContain('<connections>');
expect(message).toContain('<input>main</input>');
expect(message).toContain('<output>main</output>');
// Check properties are NOT included by default
expect(message).not.toContain('<properties>');
// Check progress messages
const progressCalls = extractProgressMessages(mockConfig.writer);
expect(progressCalls.length).toBeGreaterThanOrEqual(3);
const startMessage = findProgressMessage(progressCalls, 'running', 'input');
expect(startMessage).toBeDefined();
expect(startMessage?.updates[0]?.data).toMatchObject({
nodeName: 'n8n-nodes-base.code',
withParameters: false,
withConnections: true,
});
const completeMessage = findProgressMessage(progressCalls, 'completed');
expect(completeMessage).toBeDefined();
});
it('should include node parameters when requested', async () => {
const mockConfig = createToolConfig('get_node_details', 'test-call-2');
const result = await nodeDetailsTool.invoke(
buildNodeDetailsInput({
nodeName: 'n8n-nodes-base.httpRequest',
withParameters: true,
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const message = content.update.messages[0]?.kwargs.content;
expectToolSuccess(content, '<node_details>');
// Check properties are included
expect(message).toContain('<properties>');
expect(message).toContain('"displayName": "URL"');
expect(message).toContain('"name": "url"');
expect(message).toContain('"displayName": "Method"');
expect(message).toContain('"name": "method"');
});
it('should exclude connections when requested', async () => {
const mockConfig = createToolConfig('get_node_details', 'test-call-3');
const result = await nodeDetailsTool.invoke(
buildNodeDetailsInput({
nodeName: 'n8n-nodes-base.code',
withConnections: false,
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const message = content.update.messages[0]?.kwargs.content;
expectToolSuccess(content, '<node_details>');
// Check connections are NOT included
expect(message).not.toContain('<connections>');
expect(message).not.toContain('<input>');
expect(message).not.toContain('<output>');
});
it('should handle node with subtitle', async () => {
const mockConfig = createToolConfig('get_node_details', 'test-call-4');
const result = await nodeDetailsTool.invoke(
buildNodeDetailsInput({
nodeName: '@n8n/n8n-nodes-langchain.vectorStore',
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const message = content.update.messages[0]?.kwargs.content;
expectToolSuccess(content, '<node_details>');
// Check subtitle is included
expectXMLTag(
message,
'subtitle',
'={{$parameter["mode"] === "retrieve" ? "Retrieve" : "Insert"}}',
);
});
it('should handle unknown node type', async () => {
const mockConfig = createToolConfig('get_node_details', 'test-call-5');
const result = await nodeDetailsTool.invoke(
buildNodeDetailsInput({
nodeName: 'n8n-nodes-base.unknown',
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectToolError(content, 'Error: Node type "n8n-nodes-base.unknown" not found');
});
it('should handle validation errors for missing required fields', async () => {
const mockConfig = createToolConfig('get_node_details', 'test-call-6');
try {
await nodeDetailsTool.invoke(
{
// Missing nodeName
withParameters: true,
} as Parameters<typeof nodeDetailsTool.invoke>[0],
mockConfig,
);
expect(true).toBe(false);
} catch (error) {
expect(error).toBeDefined();
expect(String(error)).toContain('Received tool input did not match expected schema');
}
});
it('should handle nodes with no inputs (triggers)', async () => {
const mockConfig = createToolConfig('get_node_details', 'test-call-7');
const result = await nodeDetailsTool.invoke(
buildNodeDetailsInput({
nodeName: 'n8n-nodes-base.webhook',
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const message = content.update.messages[0]?.kwargs.content;
expectToolSuccess(content, '<node_details>');
// Check for "none" inputs
expect(message).toContain('<inputs>none</inputs>');
expect(message).toContain('<output>main</output>');
});
it('should handle nodes with multiple inputs/outputs', async () => {
const mockConfig = createToolConfig('get_node_details', 'test-call-8');
const result = await nodeDetailsTool.invoke(
buildNodeDetailsInput({
nodeName: 'n8n-nodes-base.merge',
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const message = content.update.messages[0]?.kwargs.content;
expectToolSuccess(content, '<node_details>');
// Check multiple inputs
expect(message).toContain('<connections>');
const inputMatches = message.match(/<input>main<\/input>/g);
expect(inputMatches?.length).toBe(2);
});
it('should handle nodes with array outputs (If node)', async () => {
const mockConfig = createToolConfig('get_node_details', 'test-call-9');
const result = await nodeDetailsTool.invoke(
buildNodeDetailsInput({
nodeName: 'n8n-nodes-base.if',
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const message = content.update.messages[0]?.kwargs.content;
expectToolSuccess(content, '<node_details>');
// Check multiple outputs
const outputMatches = message.match(/<output>main<\/output>/g);
expect(outputMatches?.length).toBe(2);
});
it('should handle expression-based inputs/outputs', async () => {
const mockConfig = createToolConfig('get_node_details', 'test-call-10');
const result = await nodeDetailsTool.invoke(
buildNodeDetailsInput({
nodeName: '@n8n/n8n-nodes-langchain.vectorStore',
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const message = content.update.messages[0]?.kwargs.content;
expectToolSuccess(content, '<node_details>');
// Check expression-based inputs/outputs are shown as expressions
expect(message).toContain('<input>={{ ((parameter)');
expect(message).toContain('<output>={{ ((parameter)');
});
it('should truncate very long properties', async () => {
// Create a node with many properties
const nodeWithManyProps = createNodeType({
displayName: 'Node With Many Properties',
name: 'test.manyProps',
properties: Array.from({ length: 50 }, (_, i) => ({
displayName: `Property ${i}`,
name: `prop${i}`,
type: 'string',
default: `Default value for property ${i}`,
description: `This is a very long description for property ${i} that should help make the properties section exceed 1000 characters when serialized to JSON`,
})),
});
const testNodeTypes = [...nodeTypesList, nodeWithManyProps];
const testTool = createNodeDetailsTool(testNodeTypes);
const mockConfig = createToolConfig('get_node_details', 'test-call-11');
const result = await testTool.invoke(
buildNodeDetailsInput({
nodeName: 'test.manyProps',
withParameters: true,
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const message = content.update.messages[0]?.kwargs.content;
expectToolSuccess(content, '<node_details>');
// Check that properties were truncated
expect(message).toContain('<properties>');
expect(message).toContain('... Rest of properties omitted');
});
it('should handle AI sub-nodes properly', async () => {
const mockConfig = createToolConfig('get_node_details', 'test-call-12');
const result = await nodeDetailsTool.invoke(
buildNodeDetailsInput({
nodeName: '@n8n/n8n-nodes-langchain.lmChatOpenAi',
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const message = content.update.messages[0]?.kwargs.content;
expectToolSuccess(content, '<node_details>');
// Check AI node specifics
expectNodeDetails(content, {
name: '@n8n/n8n-nodes-langchain.lmChatOpenAi',
displayName: 'OpenAI Chat Model',
});
// Check AI outputs
expect(message).toContain('<inputs>none</inputs>');
expect(message).toContain('<output>ai_languageModel</output>');
});
it('should handle complex node configurations', async () => {
const complexNode = createNodeType({
displayName: 'Complex Node',
name: 'test.complex',
subtitle: '={{ $parameter["mode"] || "default" }}',
inputs: [
{ displayName: 'Main Input', type: 'main' },
{ displayName: 'AI Input', type: 'ai_tool', required: false },
],
outputs: [
{ displayName: 'Success', type: 'main' },
{ displayName: 'Error', type: 'main' },
],
outputNames: ['success', 'error'],
properties: [
{
displayName: 'Mode',
name: 'mode',
type: 'options',
options: [
{ name: 'Default', value: 'default' },
{ name: 'Advanced', value: 'advanced' },
],
default: 'default',
},
],
});
const testNodeTypes = [...nodeTypesList, complexNode];
const testTool = createNodeDetailsTool(testNodeTypes);
const mockConfig = createToolConfig('get_node_details', 'test-call-13');
const result = await testTool.invoke(
buildNodeDetailsInput({
nodeName: 'test.complex',
withParameters: true,
withConnections: true,
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const message = content.update.messages[0]?.kwargs.content;
expectToolSuccess(content, '<node_details>');
// Check complex inputs/outputs formatting
expect(message).toContain('<input>{"displayName":"Main Input","type":"main"}</input>');
expect(message).toContain(
'<input>{"displayName":"AI Input","type":"ai_tool","required":false}</input>',
);
expect(message).toContain('<output>{"displayName":"Success","type":"main"}</output>');
expect(message).toContain('<output>{"displayName":"Error","type":"main"}</output>');
});
it('should handle both parameters and connections together', async () => {
const mockConfig = createToolConfig('get_node_details', 'test-call-14');
const result = await nodeDetailsTool.invoke(
{
nodeName: 'n8n-nodes-base.set',
withParameters: true,
withConnections: true,
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const message = content.update.messages[0]?.kwargs.content;
expectToolSuccess(content, '<node_details>');
// Check both sections are present
expect(message).toContain('<properties>');
expect(message).toContain('<connections>');
// Check ordering - properties should come before connections
const propsIndex = message.indexOf('<properties>');
const connectionsIndex = message.indexOf('<connections>');
expect(propsIndex).toBeLessThan(connectionsIndex);
});
it('should format empty outputs correctly', async () => {
const noOutputNode = createNodeType({
displayName: 'No Output Node',
name: 'test.noOutput',
inputs: ['main'],
outputs: [],
});
const testNodeTypes = [...nodeTypesList, noOutputNode];
const testTool = createNodeDetailsTool(testNodeTypes);
const mockConfig = createToolConfig('get_node_details', 'test-call-15');
const result = await testTool.invoke(
buildNodeDetailsInput({
nodeName: 'test.noOutput',
}),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const message = content.update.messages[0]?.kwargs.content;
expectToolSuccess(content, '<node_details>');
// Check empty outputs formatting
expect(message).toContain('<outputs>none</outputs>');
});
});
});

View File

@@ -0,0 +1,446 @@
import { NodeConnectionTypes, type INodeTypeDescription } from 'n8n-workflow';
import {
nodeTypes,
parseToolResult,
extractProgressMessages,
findProgressMessage,
createToolConfigWithWriter,
createToolConfig,
expectToolSuccess,
buildNodeSearchQuery,
type ParsedToolContent,
createNodeType,
} from '../../../test/test-utils';
import { createNodeSearchTool } from '../node-search.tool';
// Mock LangGraph dependencies
jest.mock('@langchain/langgraph', () => ({
getCurrentTaskInput: jest.fn(),
Command: jest.fn().mockImplementation((params: Record<string, unknown>) => ({
content: JSON.stringify(params),
})),
}));
describe('NodeSearchTool', () => {
let nodeTypesList: INodeTypeDescription[];
let nodeSearchTool: ReturnType<typeof createNodeSearchTool>;
beforeEach(() => {
jest.clearAllMocks();
// Create a comprehensive test node set
nodeTypesList = [
nodeTypes.code,
nodeTypes.httpRequest,
createNodeType({
...nodeTypes.webhook,
description: 'Starts workflow on webhook call',
}),
nodeTypes.setNode,
nodeTypes.ifNode,
nodeTypes.mergeNode,
// AI nodes
nodeTypes.openAiModel,
nodeTypes.agent,
createNodeType({
name: '@n8n/n8n-nodes-langchain.toolCalculator',
displayName: 'Calculator Tool',
description: 'Perform mathematical calculations',
inputs: [],
outputs: ['ai_tool'],
}),
createNodeType({
name: '@n8n/n8n-nodes-langchain.toolCode',
displayName: 'Code Tool',
description: 'Execute custom code as a tool',
inputs: [],
outputs: ['ai_tool'],
}),
createNodeType({
name: '@n8n/n8n-nodes-langchain.memoryBufferWindow',
displayName: 'Window Buffer Memory',
description: 'Stores conversation in a sliding window',
inputs: [],
outputs: ['ai_memory'],
}),
createNodeType({
name: 'n8n-nodes-base.httpBin',
displayName: 'HTTP Bin',
description: 'Test HTTP requests',
codex: {
alias: ['httpbinero', 'request test'],
},
}),
// Expression-based node
nodeTypes.vectorStoreNode,
];
nodeSearchTool = createNodeSearchTool(nodeTypesList);
});
afterEach(() => {
jest.clearAllMocks();
});
describe('invoke', () => {
it('should search nodes by name', async () => {
const mockConfig = createToolConfigWithWriter('search_nodes', 'test-call-1');
const result = await nodeSearchTool.invoke(
{
queries: [buildNodeSearchQuery('name', 'http')],
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const message = content.update.messages[0]?.kwargs.content;
expectToolSuccess(content, 'Found');
expect(message).toContain('nodes matching "http"');
expect(message).toContain('<node_name>n8n-nodes-base.httpRequest</node_name>');
expect(message).toContain('<node_name>n8n-nodes-base.httpBin</node_name>');
// Check progress messages
const progressCalls = extractProgressMessages(mockConfig.writer);
expect(progressCalls.length).toBeGreaterThanOrEqual(3);
const startMessage = findProgressMessage(progressCalls, 'running', 'input');
expect(startMessage).toBeDefined();
const completeMessage = findProgressMessage(progressCalls, 'completed');
expect(completeMessage).toBeDefined();
});
it('should search sub-nodes by connection type', async () => {
const mockConfig = createToolConfig('search_nodes', 'test-call-2');
const result = await nodeSearchTool.invoke(
{
queries: [buildNodeSearchQuery('subNodeSearch', undefined, NodeConnectionTypes.AiTool)],
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const message = content.update.messages[0]?.kwargs.content;
expectToolSuccess(content, 'Found');
expect(message).toContain('nodes matching "sub-nodes with ai_tool output"');
expect(message).toContain('<node_name>@n8n/n8n-nodes-langchain.toolCalculator</node_name>');
expect(message).toContain('<node_name>@n8n/n8n-nodes-langchain.toolCode</node_name>');
expect(message).toContain('<node_outputs>["ai_tool"]</node_outputs>');
});
it('should search sub-nodes with name filter', async () => {
const mockConfig = createToolConfig('search_nodes', 'test-call-3');
const result = await nodeSearchTool.invoke(
{
queries: [
buildNodeSearchQuery('subNodeSearch', 'calculator', NodeConnectionTypes.AiTool),
],
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const message = content.update.messages[0]?.kwargs.content;
expectToolSuccess(content, 'Found');
expect(message).toContain(
'nodes matching "sub-nodes with ai_tool output matching "calculator""',
);
expect(message).toContain('<node_name>@n8n/n8n-nodes-langchain.toolCalculator</node_name>');
expect(message).not.toContain('<node_name>@n8n/n8n-nodes-langchain.toolCode</node_name>');
});
it('should handle multiple queries in a single request', async () => {
const mockConfig = createToolConfig('search_nodes', 'test-call-4');
const result = await nodeSearchTool.invoke(
{
queries: [
buildNodeSearchQuery('name', 'code'),
buildNodeSearchQuery('subNodeSearch', undefined, NodeConnectionTypes.AiLanguageModel),
],
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const message = content.update.messages[0]?.kwargs.content;
expectToolSuccess(content, 'Found');
// First query results
expect(message).toContain('nodes matching "code"');
expect(message).toContain('Code');
// Second query results
expect(message).toContain('nodes matching "sub-nodes with ai_languageModel output"');
expect(message).toContain('<node_name>@n8n/n8n-nodes-langchain.lmChatOpenAi</node_name>');
});
it('should return no results message for non-matching queries', async () => {
const mockConfig = createToolConfig('search_nodes', 'test-call-5');
const result = await nodeSearchTool.invoke(
{
queries: [buildNodeSearchQuery('name', 'nonexistent')],
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectToolSuccess(content, 'No nodes found matching "nonexistent"');
});
it('should handle validation errors for missing queries', async () => {
const mockConfig = createToolConfig('search_nodes', 'test-call-6');
try {
await nodeSearchTool.invoke(
{
queries: [],
},
mockConfig,
);
expect(true).toBe(false);
} catch (error) {
expect(error).toBeDefined();
expect(String(error)).toContain('Received tool input did not match expected schema');
}
});
it('should handle validation error for invalid query type', async () => {
const mockConfig = createToolConfig('search_nodes', 'test-call-7');
try {
await nodeSearchTool.invoke(
{
queries: [
{
// @ts-expect-error Testing invalid input
queryType: 'invalid',
query: 'test',
},
],
},
mockConfig,
);
expect(true).toBe(false);
} catch (error) {
expect(error).toBeDefined();
expect(String(error)).toContain('Received tool input did not match expected schema');
}
});
it('should handle subNodeSearch without connectionType', async () => {
const mockConfig = createToolConfig('search_nodes', 'test-call-8');
const result = await nodeSearchTool.invoke(
{
queries: [
{
queryType: 'subNodeSearch',
// Missing connectionType
},
],
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectToolSuccess(content, 'No nodes found matching ""');
});
it('should handle name search without query', async () => {
const mockConfig = createToolConfig('search_nodes', 'test-call-9');
const result = await nodeSearchTool.invoke(
{
queries: [
{
queryType: 'name',
// Missing query
},
],
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectToolSuccess(content, 'No nodes found matching ""');
});
it('should search nodes by alias', async () => {
const mockConfig = createToolConfig('search_nodes', 'test-call-10');
const result = await nodeSearchTool.invoke(
{
queries: [buildNodeSearchQuery('name', 'httpbinero')],
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const message = content.update.messages[0]?.kwargs.content;
expectToolSuccess(content, 'Found');
expect(message).toContain('<node_name>n8n-nodes-base.httpBin</node_name>');
});
it('should handle expression-based outputs in sub-node search', async () => {
const mockConfig = createToolConfig('search_nodes', 'test-call-11');
const result = await nodeSearchTool.invoke(
{
queries: [buildNodeSearchQuery('subNodeSearch', 'vector', NodeConnectionTypes.AiTool)],
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const message = content.update.messages[0]?.kwargs.content;
// Vector Store should appear because its expression contains 'ai_tool'
// and its name contains 'vector'
expectToolSuccess(content, 'Found');
expect(message).toContain('<node_name>@n8n/n8n-nodes-langchain.vectorStore</node_name>');
});
it('should handle case-insensitive search', async () => {
const mockConfig = createToolConfig('search_nodes', 'test-call-12');
const result = await nodeSearchTool.invoke(
{
queries: [buildNodeSearchQuery('name', 'CODE')],
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const message = content.update.messages[0]?.kwargs.content;
expectToolSuccess(content, 'Found');
expect(message).toContain('<node_name>n8n-nodes-base.code</node_name>');
expect(message).toContain('<node_name>@n8n/n8n-nodes-langchain.toolCode</node_name>');
});
it('should respect result limit', async () => {
// Add many nodes that would match
const manyHttpNodes = Array.from({ length: 20 }, (_, i) =>
createNodeType({
name: `test.http${i}`,
displayName: `HTTP Node ${i}`,
description: 'Another HTTP node',
}),
);
const testNodeTypes = [...nodeTypesList, ...manyHttpNodes];
const testTool = createNodeSearchTool(testNodeTypes);
const mockConfig = createToolConfig('search_nodes', 'test-call-13');
const result = await testTool.invoke(
{
queries: [buildNodeSearchQuery('name', 'http')],
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const message = content.update.messages[0]?.kwargs.content;
// Count the number of <node> tags
const nodeMatches = message.match(/<node>/g);
expect(nodeMatches?.length).toBeLessThanOrEqual(15); // Default limit is 15
});
it('should track batch progress for multiple queries', async () => {
const mockConfig = createToolConfigWithWriter('search_nodes', 'test-call-14');
await nodeSearchTool.invoke(
{
queries: [
buildNodeSearchQuery('name', 'http'),
buildNodeSearchQuery('name', 'code'),
buildNodeSearchQuery('subNodeSearch', undefined, NodeConnectionTypes.AiMemory),
],
},
mockConfig,
);
const progressCalls = extractProgressMessages(mockConfig.writer);
// Should have progress messages for batch processing
const progressMessages = progressCalls.filter(
(msg) => msg.status === 'running' && msg.updates.some((u) => u.type === 'progress'),
);
expect(progressMessages.length).toBeGreaterThan(0);
// Check for batch-related progress messages
const batchMessages = progressMessages.filter((msg) =>
msg.updates.some(
(u) => typeof u.data?.message === 'string' && u.data.message.includes('Searching nodes'),
),
);
expect(batchMessages.length).toBeGreaterThan(0);
});
it('should handle mixed query results', async () => {
const mockConfig = createToolConfig('search_nodes', 'test-call-15');
const result = await nodeSearchTool.invoke(
{
queries: [
buildNodeSearchQuery('name', 'nonexistent'),
buildNodeSearchQuery('name', 'webhook'),
// Search for a valid but non-existent connection type
buildNodeSearchQuery(
'subNodeSearch',
'nonexistent',
NodeConnectionTypes.AiOutputParser,
),
],
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const message = content.update.messages[0]?.kwargs.content;
expectToolSuccess(content, 'No nodes found matching "nonexistent"');
expect(message).toContain('Found 1 nodes matching "webhook"');
expect(message).toContain(
'No nodes found matching "sub-nodes with ai_outputParser output matching "nonexistent""',
);
});
it('should include all node details in results', async () => {
const mockConfig = createToolConfig('search_nodes', 'test-call-16');
const result = await nodeSearchTool.invoke(
{
queries: [buildNodeSearchQuery('name', 'webhook')],
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const message = content.update.messages[0]?.kwargs.content;
// Check all required fields are present
expect(message).toContain('<node_name>n8n-nodes-base.webhook</node_name>');
expect(message).toContain(
'<node_description>Starts workflow on webhook call</node_description>',
);
expect(message).toContain('<node_inputs>');
expect(message).toContain('<node_outputs>');
});
});
});

View File

@@ -0,0 +1,469 @@
import { getCurrentTaskInput } from '@langchain/langgraph';
import {
createNode,
createWorkflow,
parseToolResult,
extractProgressMessages,
findProgressMessage,
createToolConfigWithWriter,
createToolConfig,
setupWorkflowState,
expectToolSuccess,
expectToolError,
expectNodeRemoved,
createConnection,
type ParsedToolContent,
} from '../../../test/test-utils';
import { createRemoveNodeTool } from '../remove-node.tool';
// Mock LangGraph dependencies
jest.mock('@langchain/langgraph', () => ({
getCurrentTaskInput: jest.fn(),
Command: jest.fn().mockImplementation((params: Record<string, unknown>) => ({
content: JSON.stringify(params),
})),
}));
describe('RemoveNodeTool', () => {
let removeNodeTool: ReturnType<typeof createRemoveNodeTool>;
const mockGetCurrentTaskInput = getCurrentTaskInput as jest.MockedFunction<
typeof getCurrentTaskInput
>;
beforeEach(() => {
jest.clearAllMocks();
removeNodeTool = createRemoveNodeTool();
});
afterEach(() => {
jest.clearAllMocks();
});
describe('invoke', () => {
it('should remove a node without connections', async () => {
const existingWorkflow = createWorkflow([
createNode({ id: 'node1', name: 'Code', type: 'n8n-nodes-base.code' }),
createNode({ id: 'node2', name: 'HTTP Request', type: 'n8n-nodes-base.httpRequest' }),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfigWithWriter('remove_node', 'test-call-1');
const result = await removeNodeTool.invoke(
{
nodeId: 'node1',
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectNodeRemoved(content, 'node1');
expectToolSuccess(content, 'Successfully removed node "Code" (n8n-nodes-base.code)');
// Check progress messages
const progressCalls = extractProgressMessages(mockConfig.writer);
expect(progressCalls.length).toBeGreaterThanOrEqual(3);
const startMessage = findProgressMessage(progressCalls, 'running', 'input');
expect(startMessage).toBeDefined();
expect(startMessage?.updates[0]?.data).toMatchObject({
nodeId: 'node1',
});
const progressMessage = findProgressMessage(progressCalls, 'running', 'progress');
expect(progressMessage).toBeDefined();
expect(progressMessage?.updates[0]?.data?.message).toContain('Removing node node1');
const completeMessage = findProgressMessage(progressCalls, 'completed');
expect(completeMessage).toBeDefined();
expect(completeMessage?.updates[0]?.data).toMatchObject({
removedNodeId: 'node1',
removedNodeName: 'Code',
removedNodeType: 'n8n-nodes-base.code',
connectionsRemoved: 0,
});
});
it('should remove a node with outgoing connections', async () => {
const existingWorkflow = createWorkflow([
createNode({ id: 'node1', name: 'Code' }),
createNode({ id: 'node2', name: 'HTTP Request' }),
]);
// Add connection from node1 to node2
existingWorkflow.connections = {
node1: {
main: [[createConnection('node1', 'node2')]],
},
};
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfig('remove_node', 'test-call-2');
const result = await removeNodeTool.invoke(
{
nodeId: 'node1',
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectNodeRemoved(content, 'node1');
expectToolSuccess(content, 'Successfully removed node "Code"');
expect(content.update.messages[0]?.kwargs.content).toContain('Removed 1 connection');
});
it('should remove a node with incoming connections', async () => {
const existingWorkflow = createWorkflow([
createNode({ id: 'node1', name: 'Code' }),
createNode({ id: 'node2', name: 'HTTP Request' }),
createNode({ id: 'node3', name: 'Set' }),
]);
// Add connections: node1 -> node2, node2 -> node3
existingWorkflow.connections = {
node1: {
main: [[createConnection('node1', 'node2')]],
},
node2: {
main: [[createConnection('node2', 'node3')]],
},
};
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfig('remove_node', 'test-call-3');
const result = await removeNodeTool.invoke(
{
nodeId: 'node2',
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectNodeRemoved(content, 'node2');
expectToolSuccess(content, 'Successfully removed node "HTTP Request"');
expect(content.update.messages[0]?.kwargs.content).toContain('Removed 2 connections');
});
it('should remove a node with multiple connections', async () => {
const existingWorkflow = createWorkflow([
createNode({ id: 'node1', name: 'Webhook' }),
createNode({ id: 'node2', name: 'If' }),
createNode({ id: 'node3', name: 'Code1' }),
createNode({ id: 'node4', name: 'Code2' }),
createNode({ id: 'node5', name: 'Set' }),
]);
// Complex connections: node1 -> node2, node2 has two outputs -> node3 and node4, both converge to node5
existingWorkflow.connections = {
node1: {
main: [[createConnection('node1', 'node2')]],
},
node2: {
main: [
[createConnection('node2', 'node3')], // true branch
[createConnection('node2', 'node4')], // false branch
],
},
node3: {
main: [[createConnection('node3', 'node5')]],
},
node4: {
main: [[createConnection('node4', 'node5')]],
},
};
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfig('remove_node', 'test-call-4');
const result = await removeNodeTool.invoke(
{
nodeId: 'node2',
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectNodeRemoved(content, 'node2');
expectToolSuccess(content, 'Successfully removed node "If"');
expect(content.update.messages[0]?.kwargs.content).toContain('Removed 3 connections');
});
it('should handle removing non-existent node', async () => {
const existingWorkflow = createWorkflow([createNode({ id: 'node1', name: 'Code' })]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfig('remove_node', 'test-call-5');
const result = await removeNodeTool.invoke(
{
nodeId: 'non-existent',
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectToolError(content, 'Error: Node with ID "non-existent" not found in workflow');
});
it('should handle removing node with AI connections', async () => {
const existingWorkflow = createWorkflow([
createNode({
id: 'node1',
name: 'OpenAI Chat Model',
type: '@n8n/n8n-nodes-langchain.lmChatOpenAi',
}),
createNode({ id: 'node2', name: 'AI Agent', type: '@n8n/n8n-nodes-langchain.agent' }),
createNode({
id: 'node3',
name: 'Calculator Tool',
type: '@n8n/n8n-nodes-langchain.toolCalculator',
}),
]);
// AI connections: OpenAI -> Agent, Calculator -> Agent
existingWorkflow.connections = {
node1: {
// eslint-disable-next-line @typescript-eslint/naming-convention
ai_languageModel: [[createConnection('node1', 'node2', 'ai_languageModel')]],
},
node3: {
ai_tool: [[createConnection('node3', 'node2', 'ai_tool')]],
},
};
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfig('remove_node', 'test-call-6');
const result = await removeNodeTool.invoke(
{
nodeId: 'node2',
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectNodeRemoved(content, 'node2');
expectToolSuccess(content, 'Successfully removed node "AI Agent"');
expect(content.update.messages[0]?.kwargs.content).toContain('Removed 2 connections');
});
it('should handle validation errors', async () => {
setupWorkflowState(mockGetCurrentTaskInput);
const mockConfig = createToolConfig('remove_node', 'test-call-7');
try {
await removeNodeTool.invoke(
{
// Missing nodeId
} as Parameters<typeof removeNodeTool.invoke>[0],
mockConfig,
);
expect(true).toBe(false);
} catch (error) {
expect(error).toBeDefined();
expect(String(error)).toContain('Received tool input did not match expected schema');
}
});
it('should handle empty workflow', async () => {
setupWorkflowState(mockGetCurrentTaskInput, createWorkflow([]));
const mockConfig = createToolConfig('remove_node', 'test-call-8');
const result = await removeNodeTool.invoke(
{
nodeId: 'any-node',
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectToolError(content, 'Error: Node with ID "any-node" not found in workflow');
});
it('should count connections correctly for complex workflows', async () => {
const existingWorkflow = createWorkflow([
createNode({ id: 'node1', name: 'Merge' }),
createNode({ id: 'node2', name: 'Code1' }),
createNode({ id: 'node3', name: 'Code2' }),
createNode({ id: 'node4', name: 'Set' }),
]);
// Multiple nodes connecting to merge node
existingWorkflow.connections = {
node2: {
main: [[createConnection('node2', 'node1', 'main', 0)]], // to input 0
},
node3: {
main: [[createConnection('node3', 'node1', 'main', 1)]], // to input 1
},
node1: {
main: [[createConnection('node1', 'node4')]],
},
};
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfig('remove_node', 'test-call-9');
const result = await removeNodeTool.invoke(
{
nodeId: 'node1',
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectNodeRemoved(content, 'node1');
expectToolSuccess(content, 'Successfully removed node "Merge"');
expect(content.update.messages[0]?.kwargs.content).toContain('Removed 3 connections');
});
it('should handle node with self-connections', async () => {
const existingWorkflow = createWorkflow([
createNode({ id: 'node1', name: 'Loop Node' }),
createNode({ id: 'node2', name: 'Other Node' }),
]);
// Node with self-connection (loop) and external connection
existingWorkflow.connections = {
node1: {
main: [
[
createConnection('node1', 'node1'), // self-connection
createConnection('node1', 'node2'), // external connection
],
],
},
};
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfig('remove_node', 'test-call-10');
const result = await removeNodeTool.invoke(
{
nodeId: 'node1',
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectNodeRemoved(content, 'node1');
expectToolSuccess(content, 'Successfully removed node "Loop Node"');
// Should count both self-connection and external connection
expect(content.update.messages[0]?.kwargs.content).toContain('Removed 3 connections');
});
it('should handle removing node by exact ID match', async () => {
const existingWorkflow = createWorkflow([
createNode({ id: 'test-uuid-123', name: 'My Node', type: 'n8n-nodes-base.set' }),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfigWithWriter('remove_node', 'test-call-11');
const result = await removeNodeTool.invoke(
{
nodeId: 'test-uuid-123',
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectNodeRemoved(content, 'test-uuid-123');
expectToolSuccess(content, 'Successfully removed node "My Node" (n8n-nodes-base.set)');
// Verify progress messages contain the exact node ID
const progressMessage = findProgressMessage(
extractProgressMessages(mockConfig.writer),
'running',
'progress',
);
expect(progressMessage?.updates[0]?.data?.message).toBe('Removing node test-uuid-123');
});
it('should handle different connection types', async () => {
const existingWorkflow = createWorkflow([
createNode({
id: 'node1',
name: 'Vector Store',
type: '@n8n/n8n-nodes-langchain.vectorStore',
}),
createNode({
id: 'node2',
name: 'Embeddings',
type: '@n8n/n8n-nodes-langchain.embeddingsOpenAi',
}),
createNode({
id: 'node3',
name: 'Document Loader',
type: '@n8n/n8n-nodes-langchain.documentLoader',
}),
]);
// Mixed connection types
existingWorkflow.connections = {
node3: {
ai_document: [[createConnection('node3', 'node1', 'ai_document')]],
},
node2: {
ai_embedding: [[createConnection('node2', 'node1', 'ai_embedding')]],
},
};
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfig('remove_node', 'test-call-12');
const result = await removeNodeTool.invoke(
{
nodeId: 'node1',
},
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectNodeRemoved(content, 'node1');
expectToolSuccess(content, 'Successfully removed node "Vector Store"');
expect(content.update.messages[0]?.kwargs.content).toContain('Removed 2 connections');
});
it('should return correct output structure', async () => {
const existingWorkflow = createWorkflow([
createNode({
id: 'node-to-remove',
name: 'Test Node',
type: 'n8n-nodes-base.httpRequest',
}),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfigWithWriter('remove_node', 'test-call-13');
await removeNodeTool.invoke(
{
nodeId: 'node-to-remove',
},
mockConfig,
);
// Check the completed progress message has correct output structure
const completeMessage = findProgressMessage(
extractProgressMessages(mockConfig.writer),
'completed',
);
expect(completeMessage?.updates[0]?.data).toEqual({
removedNodeId: 'node-to-remove',
removedNodeName: 'Test Node',
removedNodeType: 'n8n-nodes-base.httpRequest',
connectionsRemoved: 0,
message: 'Successfully removed node "Test Node" (n8n-nodes-base.httpRequest)',
});
});
});
});

View File

@@ -0,0 +1,774 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { getCurrentTaskInput } from '@langchain/langgraph';
import type { INode, INodeTypeDescription } from 'n8n-workflow';
import {
createNode,
createWorkflow,
nodeTypes,
parseToolResult,
extractProgressMessages,
findProgressMessage,
createToolConfigWithWriter,
createToolConfig,
setupWorkflowState,
expectToolSuccess,
expectToolError,
expectNodeUpdated,
buildUpdateNodeInput,
mockParameterUpdaterChain,
type ParsedToolContent,
} from '../../../test/test-utils';
import { createUpdateNodeParametersTool } from '../update-node-parameters.tool';
// Mock LangGraph dependencies
jest.mock('@langchain/langgraph', () => ({
getCurrentTaskInput: jest.fn(),
Command: jest.fn().mockImplementation((params: Record<string, unknown>) => ({
content: JSON.stringify(params),
})),
}));
// Mock the parameter updater chain
jest.mock('../../../src/chains/parameter-updater', () => ({
createParameterUpdaterChain: jest.fn(),
}));
describe('UpdateNodeParametersTool', () => {
let nodeTypesList: INodeTypeDescription[];
let updateNodeParametersTool: ReturnType<typeof createUpdateNodeParametersTool>;
const mockGetCurrentTaskInput = getCurrentTaskInput as jest.MockedFunction<
typeof getCurrentTaskInput
>;
let mockLLM: jest.Mocked<BaseChatModel>;
let mockChain: ReturnType<typeof mockParameterUpdaterChain>;
beforeEach(() => {
jest.clearAllMocks();
// Setup mock LLM
mockLLM = {
invoke: jest.fn(),
} as unknown as jest.Mocked<BaseChatModel>;
// Setup mock parameter updater chain
mockChain = mockParameterUpdaterChain();
// eslint-disable-next-line @typescript-eslint/no-require-imports, @typescript-eslint/no-unsafe-assignment
const parameterUpdaterModule = require('../../../src/chains/parameter-updater');
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access, @typescript-eslint/no-unsafe-call
parameterUpdaterModule.createParameterUpdaterChain.mockReturnValue(mockChain);
nodeTypesList = [nodeTypes.code, nodeTypes.httpRequest, nodeTypes.webhook, nodeTypes.setNode];
updateNodeParametersTool = createUpdateNodeParametersTool(nodeTypesList, mockLLM);
});
afterEach(() => {
jest.clearAllMocks();
});
describe('invoke', () => {
it('should update node parameters successfully', async () => {
const existingWorkflow = createWorkflow([
createNode({
id: 'node1',
name: 'HTTP Request',
type: 'n8n-nodes-base.httpRequest',
parameters: {
method: 'GET',
url: 'https://example.com',
},
}),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
// Mock chain response
mockChain.invoke.mockResolvedValue({
parameters: {
method: 'POST',
url: 'https://api.example.com',
headers: {
pairs: [
{
name: 'Content-Type',
value: 'application/json',
},
],
},
},
});
const mockConfig = createToolConfigWithWriter('update_node_parameters', 'test-call-1');
const result = await updateNodeParametersTool.invoke(
buildUpdateNodeInput('node1', ['Change method to POST', 'Add Content-Type header']),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectNodeUpdated(content, 'node1', {
parameters: expect.objectContaining({
method: 'POST',
url: 'https://api.example.com',
}),
});
expectToolSuccess(
content,
'Successfully updated parameters for node "HTTP Request" (n8n-nodes-base.httpRequest):',
);
expect(content.update.messages[0]?.kwargs.content).toContain('- Change method to POST');
expect(content.update.messages[0]?.kwargs.content).toContain('- Add Content-Type header');
// Check progress messages
const progressCalls = extractProgressMessages(mockConfig.writer);
expect(progressCalls.length).toBeGreaterThanOrEqual(3);
const startMessage = findProgressMessage(progressCalls, 'running', 'input');
expect(startMessage).toBeDefined();
expect(startMessage?.updates[0]?.data).toMatchObject({
nodeId: 'node1',
changes: ['Change method to POST', 'Add Content-Type header'],
});
const completeMessage = findProgressMessage(progressCalls, 'completed');
expect(completeMessage).toBeDefined();
expect(completeMessage?.updates[0]?.data).toMatchObject({
nodeId: 'node1',
nodeName: 'HTTP Request',
nodeType: 'n8n-nodes-base.httpRequest',
appliedChanges: ['Change method to POST', 'Add Content-Type header'],
});
});
it('should handle expression parameters correctly', async () => {
const existingWorkflow = createWorkflow([
createNode({
id: 'node1',
name: 'Set',
type: 'n8n-nodes-base.set',
parameters: {},
}),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
// Mock chain response with expression
mockChain.invoke.mockResolvedValue({
parameters: {
values: {
// eslint-disable-next-line id-denylist
string: [
{
name: 'status',
value: '={{ $json.response.status }}',
},
],
},
},
});
const mockConfig = createToolConfig('update_node_parameters', 'test-call-2');
const result = await updateNodeParametersTool.invoke(
buildUpdateNodeInput('node1', ['Set status field from response']),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectNodeUpdated(content, 'node1', {
parameters: expect.objectContaining({
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
values: expect.objectContaining({
// eslint-disable-next-line id-denylist, @typescript-eslint/no-unsafe-assignment
string: expect.arrayContaining([
expect.objectContaining({
name: 'status',
value: '={{ $json.response.status }}',
}),
]),
}),
}),
});
});
it('should merge parameters instead of replacing them', async () => {
const existingWorkflow = createWorkflow([
createNode({
id: 'node1',
name: 'HTTP Request',
type: 'n8n-nodes-base.httpRequest',
parameters: {
method: 'GET',
url: 'https://example.com',
authentication: 'genericCredentialType',
genericAuthType: 'httpBasicAuth',
},
}),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
// Mock chain response - only updating URL
mockChain.invoke.mockResolvedValue({
parameters: {
url: 'https://api.example.com/v2',
},
});
const mockConfig = createToolConfig('update_node_parameters', 'test-call-3');
const result = await updateNodeParametersTool.invoke(
buildUpdateNodeInput('node1', ['Update URL to v2 endpoint']),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
// Should keep existing parameters and only update URL
expectNodeUpdated(content, 'node1', {
parameters: expect.objectContaining({
method: 'GET', // preserved
url: 'https://api.example.com/v2', // updated
authentication: 'genericCredentialType', // preserved
genericAuthType: 'httpBasicAuth', // preserved
}),
});
});
it('should handle non-existent node', async () => {
const existingWorkflow = createWorkflow([createNode({ id: 'node1', name: 'Code' })]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfig('update_node_parameters', 'test-call-4');
const result = await updateNodeParametersTool.invoke(
buildUpdateNodeInput('non-existent', ['Some change']),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectToolError(content, 'Error: Node with ID "non-existent" not found in workflow');
});
it('should handle unknown node type', async () => {
const existingWorkflow = createWorkflow([
createNode({ id: 'node1', name: 'Unknown', type: 'unknown.type' }),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
const mockConfig = createToolConfig('update_node_parameters', 'test-call-5');
const result = await updateNodeParametersTool.invoke(
buildUpdateNodeInput('node1', ['Some change']),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectToolError(content, 'Error: Node type "unknown.type" not found');
});
it('should handle LLM returning invalid parameters', async () => {
const existingWorkflow = createWorkflow([
createNode({ id: 'node1', name: 'Code', type: 'n8n-nodes-base.code' }),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
// Mock chain returning invalid response
mockChain.invoke.mockResolvedValue({ parameters: null } as unknown as {
parameters: Record<string, unknown>;
});
const mockConfig = createToolConfig('update_node_parameters', 'test-call-6');
const result = await updateNodeParametersTool.invoke(
buildUpdateNodeInput('node1', ['Add code']),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectToolError(content, 'Error: Invalid parameters structure returned from LLM');
});
it('should handle validation errors', async () => {
setupWorkflowState(mockGetCurrentTaskInput);
const mockConfig = createToolConfig('update_node_parameters', 'test-call-7');
try {
await updateNodeParametersTool.invoke(
{
nodeId: 'node1',
// Missing changes array
} as Parameters<typeof updateNodeParametersTool.invoke>[0],
mockConfig,
);
expect(true).toBe(false);
} catch (error) {
expect(error).toBeDefined();
expect(String(error)).toContain('Received tool input did not match expected schema');
}
});
it('should handle empty changes array', async () => {
setupWorkflowState(mockGetCurrentTaskInput);
const mockConfig = createToolConfig('update_node_parameters', 'test-call-8');
try {
await updateNodeParametersTool.invoke(
{
nodeId: 'node1',
changes: [],
},
mockConfig,
);
expect(true).toBe(false);
} catch (error) {
expect(error).toBeDefined();
expect(String(error)).toContain('Received tool input did not match expected schema');
}
});
it('should fix expression prefixes', async () => {
const existingWorkflow = createWorkflow([
createNode({
id: 'node1',
name: 'Set',
type: 'n8n-nodes-base.set',
parameters: {},
}),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
// Mock chain response with missing = prefix
mockChain.invoke.mockResolvedValue({
parameters: {
values: {
// eslint-disable-next-line id-denylist
string: [
{
name: 'value',
value: '{{ $json.data }}', // Missing = prefix
},
],
},
},
});
const mockConfig = createToolConfig('update_node_parameters', 'test-call-9');
const result = await updateNodeParametersTool.invoke(
buildUpdateNodeInput('node1', ['Add value from data']),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
// Should fix the expression prefix
expectNodeUpdated(content, 'node1', {
parameters: expect.objectContaining({
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
values: expect.objectContaining({
// eslint-disable-next-line id-denylist, @typescript-eslint/no-unsafe-assignment
string: expect.arrayContaining([
expect.objectContaining({
name: 'value',
value: '={{ $json.data }}', // Fixed with = prefix
}),
]),
}),
}),
});
});
it('should handle complex nested parameters', async () => {
const existingWorkflow = createWorkflow([
createNode({
id: 'node1',
name: 'HTTP Request',
type: 'n8n-nodes-base.httpRequest',
parameters: {
method: 'POST',
url: 'https://api.example.com',
},
}),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
// Mock complex nested response
mockChain.invoke.mockResolvedValue({
parameters: {
method: 'POST',
url: 'https://api.example.com',
headers: {
pairs: [
{
name: 'Authorization',
value: 'Bearer {{$credentials.apiKey}}',
},
{
name: 'X-Custom-Header',
value: '={{ $node["Webhook"].json.customValue }}',
},
],
},
body: {
contentType: 'json',
jsonBody: '={{ JSON.stringify({\n "user": $json.user,\n "action": "update"\n}) }}',
},
},
});
const mockConfig = createToolConfig('update_node_parameters', 'test-call-10');
const result = await updateNodeParametersTool.invoke(
buildUpdateNodeInput('node1', [
'Add authorization header with API key',
'Add custom header from webhook',
'Set JSON body with user data',
]),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectToolSuccess(content, 'Successfully updated parameters');
const updatedNode = content.update.workflowOperations?.[0]?.updates as Partial<INode>;
// Type-safe access to nested properties
const headers = updatedNode?.parameters?.headers as { pairs?: unknown[] } | undefined;
const body = updatedNode?.parameters?.body as { contentType?: string } | undefined;
expect(headers?.pairs).toHaveLength(2);
expect(body?.contentType).toBe('json');
});
it('should handle parameters validation gracefully', async () => {
// Create a custom node type with a required parameter
const customNodeType = {
...nodeTypes.httpRequest,
properties: [
{
displayName: 'URL',
name: 'url',
type: 'string' as const,
required: true,
default: '', // Empty default, so it's truly required
},
{
displayName: 'Method',
name: 'method',
type: 'options' as const,
options: [
{ name: 'GET', value: 'GET' },
{ name: 'POST', value: 'POST' },
],
default: 'GET',
},
],
};
// Create tool with custom node type
const customTool = createUpdateNodeParametersTool([customNodeType], mockLLM);
const existingWorkflow = createWorkflow([
createNode({
id: 'node1',
name: 'HTTP Request',
type: 'n8n-nodes-base.httpRequest',
parameters: { url: 'https://example.com' }, // Has URL initially
}),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
// Mock response that removes the required URL parameter
mockChain.invoke.mockResolvedValue({
parameters: {
method: 'POST',
// URL will be removed during merge, making it invalid
url: '', // Empty string for required field
},
});
const mockConfig = createToolConfigWithWriter('update_node_parameters', 'test-call-11');
const result = await customTool.invoke(
buildUpdateNodeInput('node1', ['Change method to POST and clear URL']),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
// Should still succeed even with validation issues
expectToolSuccess(content, 'Successfully updated parameters');
// The parameter update should still happen
expectNodeUpdated(content, 'node1', {
parameters: expect.objectContaining({
method: 'POST',
}),
});
});
it('should handle LLM chain errors gracefully', async () => {
const existingWorkflow = createWorkflow([
createNode({ id: 'node1', name: 'Code', type: 'n8n-nodes-base.code' }),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
// Mock chain throwing error
mockChain.invoke.mockRejectedValue(new Error('LLM service unavailable'));
const mockConfig = createToolConfig('update_node_parameters', 'test-call-12');
const result = await updateNodeParametersTool.invoke(
buildUpdateNodeInput('node1', ['Add JavaScript code']),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectToolError(content, 'Error: Failed to update node parameters: LLM service unavailable');
});
it('should handle "Received tool input did not match expected schema" error from parametersChain', async () => {
const existingWorkflow = createWorkflow([
createNode({
id: 'node1',
name: 'HTTP Request',
type: 'n8n-nodes-base.httpRequest',
parameters: {
method: 'GET',
url: 'https://example.com',
},
}),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
// Mock chain throwing schema validation error
const schemaError = new Error('Received tool input did not match expected schema');
mockChain.invoke.mockRejectedValue(schemaError);
const mockConfig = createToolConfigWithWriter(
'update_node_parameters',
'test-call-schema-error',
);
const result = await updateNodeParametersTool.invoke(
buildUpdateNodeInput('node1', ['Change method to POST']),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
// The error should be wrapped as a ToolExecutionError
expectToolError(
content,
'Error: Failed to update node parameters: Received tool input did not match expected schema',
);
// Verify the error was passed through the reporter
const progressCalls = extractProgressMessages(mockConfig.writer);
const errorMessage = findProgressMessage(progressCalls, 'error');
expect(errorMessage).toBeDefined();
expect(errorMessage?.updates[0]?.type).toBe('error');
});
it('should pass correct context to parameter updater chain', async () => {
const existingWorkflow = createWorkflow([
createNode({
id: 'test-node',
name: 'My HTTP Request',
type: 'n8n-nodes-base.httpRequest',
parameters: {
method: 'GET',
url: 'https://old.example.com',
},
}),
]);
const mockState = {
prompt: 'Test workflow prompt',
executionData: { test: 'data' },
};
mockGetCurrentTaskInput.mockReturnValue({
workflowJSON: existingWorkflow,
...mockState,
});
mockChain.invoke.mockResolvedValue({
parameters: { url: 'https://new.example.com' },
});
const mockConfig = createToolConfig('update_node_parameters', 'test-call-13');
await updateNodeParametersTool.invoke(
buildUpdateNodeInput('test-node', ['Update URL']),
mockConfig,
);
// Verify chain was called with correct context
expect(mockChain.invoke).toHaveBeenCalledWith(
expect.objectContaining({
execution_data: 'NO EXECUTION DATA YET',
execution_schema: 'NO SCHEMA',
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
workflow_json: expect.objectContaining({
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
nodes: expect.arrayContaining([
expect.objectContaining({
id: 'test-node',
}),
]),
}),
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
node_definition: expect.any(String),
node_id: 'test-node',
node_name: 'My HTTP Request',
node_type: 'n8n-nodes-base.httpRequest',
current_parameters: JSON.stringify(
{
method: 'GET',
url: 'https://old.example.com',
},
null,
2,
),
changes: '1. Update URL',
}),
);
// Verify createParameterUpdaterChain was called with correct config
// eslint-disable-next-line @typescript-eslint/no-require-imports, @typescript-eslint/no-unsafe-assignment
const paramUpdaterModule = require('../../../src/chains/parameter-updater');
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
expect(paramUpdaterModule.createParameterUpdaterChain).toHaveBeenCalledWith(
mockLLM,
expect.objectContaining({
nodeType: 'n8n-nodes-base.httpRequest',
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
nodeDefinition: expect.any(Object),
requestedChanges: ['Update URL'],
}),
undefined, // Logger
);
});
it('should handle webhook node parameters', async () => {
const existingWorkflow = createWorkflow([
createNode({
id: 'webhook1',
name: 'Webhook',
type: 'n8n-nodes-base.webhook',
parameters: {
path: 'webhook',
},
}),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
// Mock chain response
mockChain.invoke.mockResolvedValue({
parameters: {
path: 'api/v2/webhook',
httpMethod: 'POST',
responseMode: 'onReceived',
},
});
const mockConfig = createToolConfig('update_node_parameters', 'test-call-14');
const result = await updateNodeParametersTool.invoke(
buildUpdateNodeInput('webhook1', ['Change path to api/v2/webhook', 'Accept POST requests']),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
expectNodeUpdated(content, 'webhook1', {
parameters: expect.objectContaining({
path: 'api/v2/webhook',
httpMethod: 'POST',
responseMode: 'onReceived',
}),
});
});
it('should format multiple changes correctly', async () => {
const existingWorkflow = createWorkflow([
createNode({ id: 'node1', name: 'Code', type: 'n8n-nodes-base.code' }),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
mockChain.invoke.mockResolvedValue({ parameters: { jsCode: 'console.log("test");' } });
const mockConfig = createToolConfig('update_node_parameters', 'test-call-15');
const changes = ['Add console log statement', 'Log the word "test"', 'Use JavaScript syntax'];
const result = await updateNodeParametersTool.invoke(
buildUpdateNodeInput('node1', changes),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
const message = content.update.messages[0]?.kwargs.content;
expectToolSuccess(content, 'Successfully updated parameters');
// Check all changes are listed
expect(message).toContain('- Add console log statement');
expect(message).toContain('- Log the word "test"');
expect(message).toContain('- Use JavaScript syntax');
});
it('should properly wrap chain schema errors as ToolExecutionError', async () => {
const existingWorkflow = createWorkflow([
createNode({
id: 'test-node',
name: 'Set Node',
type: 'n8n-nodes-base.set',
parameters: {},
}),
]);
setupWorkflowState(mockGetCurrentTaskInput, existingWorkflow);
// Mock chain throwing a detailed schema validation error
const schemaError = new Error(
'Received tool input did not match expected schema: Invalid parameters structure',
);
mockChain.invoke.mockRejectedValue(schemaError);
const mockConfig = createToolConfigWithWriter(
'update_node_parameters',
'test-schema-validation',
);
const result = await updateNodeParametersTool.invoke(
buildUpdateNodeInput('test-node', ['Add field mapping']),
mockConfig,
);
const content = parseToolResult<ParsedToolContent>(result);
// Check that the error is properly formatted
expectToolError(
content,
/Failed to update node parameters.*Received tool input did not match expected schema/,
);
// Verify the error reporter received a ToolExecutionError
const progressCalls = extractProgressMessages(mockConfig.writer);
const errorCall = progressCalls.find((call) => call.status === 'error');
expect(errorCall).toBeDefined();
// The error should be a ToolExecutionError with proper metadata
const errorData = errorCall?.updates[0]?.data;
expect(errorData).toBeDefined();
// Since the error is converted to a plain object for reporting, check the message
expect(JSON.stringify(errorData)).toContain('Failed to update node parameters');
expect(JSON.stringify(errorData)).toContain(
'Received tool input did not match expected schema',
);
});
});
});

View File

@@ -0,0 +1,214 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { tool } from '@langchain/core/tools';
import type { INode, INodeTypeDescription, INodeParameters, Logger } from 'n8n-workflow';
import { z } from 'zod';
import { createParameterUpdaterChain } from '../chains/parameter-updater';
import { ValidationError, ParameterUpdateError, ToolExecutionError } from '../errors';
import { createProgressReporter, reportProgress } from './helpers/progress';
import { createSuccessResponse, createErrorResponse } from './helpers/response';
import { getCurrentWorkflow, getWorkflowState, updateNodeInWorkflow } from './helpers/state';
import {
validateNodeExists,
findNodeType,
createNodeNotFoundError,
createNodeTypeNotFoundError,
} from './helpers/validation';
import {
extractNodeParameters,
formatChangesForPrompt,
updateNodeWithParameters,
mergeParameters,
fixExpressionPrefixes,
} from './utils/parameter-update.utils';
import type { UpdateNodeParametersOutput } from '../types/tools';
/**
* Schema for update node parameters input
*/
const updateNodeParametersSchema = z.object({
nodeId: z.string().describe('The ID of the node to update'),
changes: z
.array(z.string())
.min(1)
.describe(
'Array of natural language changes to apply to the node parameters (e.g., "Set the URL to call the weather API", "Add an API key header")',
),
});
/**
* Build a success message for the parameter update
*/
function buildSuccessMessage(node: INode, changes: string[]): string {
const changesList = changes.map((c) => `- ${c}`).join('\n');
return `Successfully updated parameters for node "${node.name}" (${node.type}):\n${changesList}`;
}
/**
* Factory function to create the update node parameters tool
*/
export function createUpdateNodeParametersTool(
nodeTypes: INodeTypeDescription[],
llm: BaseChatModel,
logger?: Logger,
) {
return tool(
async (input, config) => {
const reporter = createProgressReporter(config, 'update_node_parameters');
try {
// Validate input using Zod schema
const validatedInput = updateNodeParametersSchema.parse(input);
const { nodeId, changes } = validatedInput;
// Report tool start
reporter.start(validatedInput);
// Get current state
const state = getWorkflowState();
const workflow = getCurrentWorkflow(state);
// Find the node
const node = validateNodeExists(nodeId, workflow.nodes);
if (!node) {
const error = createNodeNotFoundError(nodeId);
reporter.error(error);
return createErrorResponse(config, error);
}
// Find the node type
const nodeType = findNodeType(node.type, nodeTypes);
if (!nodeType) {
const error = createNodeTypeNotFoundError(node.type);
reporter.error(error);
return createErrorResponse(config, error);
}
// Report progress
reportProgress(reporter, `Updating parameters for node "${node.name}"`, {
nodeId,
changes,
});
try {
// Get current parameters
const currentParameters = extractNodeParameters(node);
// Format inputs for the chain
const formattedChanges = formatChangesForPrompt(changes);
// Get the node's properties definition as JSON
const nodePropertiesJson = JSON.stringify(nodeType.properties || [], null, 2);
// Call the parameter updater chain with dynamic prompt building
const parametersChain = createParameterUpdaterChain(
llm,
{
nodeType: node.type,
nodeDefinition: nodeType,
requestedChanges: changes,
},
logger,
);
const newParameters = (await parametersChain.invoke({
workflow_json: workflow,
execution_schema: state.workflowContext?.executionSchema ?? 'NO SCHEMA',
execution_data: state.workflowContext?.executionData ?? 'NO EXECUTION DATA YET',
node_id: nodeId,
node_name: node.name,
node_type: node.type,
current_parameters: JSON.stringify(currentParameters, null, 2),
node_definition: nodePropertiesJson,
changes: formattedChanges,
})) as INodeParameters;
// Ensure newParameters is a valid object
if (!newParameters || typeof newParameters !== 'object') {
throw new ParameterUpdateError('Invalid parameters returned from LLM', {
nodeId,
nodeType: node.type,
});
}
// Ensure parameters property exists and is valid
if (!newParameters.parameters || typeof newParameters.parameters !== 'object') {
throw new ParameterUpdateError('Invalid parameters structure returned from LLM', {
nodeId,
nodeType: node.type,
});
}
// Fix expression prefixes in the new parameters
const fixedParameters = fixExpressionPrefixes(newParameters.parameters);
// Merge the new parameters with existing ones
const updatedParameters = mergeParameters(
currentParameters,
fixedParameters as INodeParameters,
);
// Create updated node
const updatedNode = updateNodeWithParameters(node, updatedParameters);
// Build success message
const message = buildSuccessMessage(node, changes);
// Report completion
const output: UpdateNodeParametersOutput = {
nodeId,
nodeName: node.name,
nodeType: node.type,
updatedParameters,
appliedChanges: changes,
message,
};
reporter.complete(output);
// Return success with state updates
const stateUpdates = updateNodeInWorkflow(state, nodeId, updatedNode);
return createSuccessResponse(config, message, stateUpdates);
} catch (error) {
if (error instanceof ParameterUpdateError) {
reporter.error(error);
return createErrorResponse(config, error);
}
const toolError = new ToolExecutionError(
`Failed to update node parameters: ${error instanceof Error ? error.message : 'Unknown error'}`,
{
toolName: 'update_node_parameters',
cause: error instanceof Error ? error : undefined,
},
);
reporter.error(toolError);
return createErrorResponse(config, toolError);
}
} catch (error) {
// Handle validation or unexpected errors
if (error instanceof z.ZodError) {
const validationError = new ValidationError('Invalid input parameters', {
extra: { errors: error.errors },
});
reporter.error(validationError);
return createErrorResponse(config, validationError);
}
const toolError = new ToolExecutionError(
error instanceof Error ? error.message : 'Unknown error occurred',
{
toolName: 'update_node_parameters',
cause: error instanceof Error ? error : undefined,
},
);
reporter.error(toolError);
return createErrorResponse(config, toolError);
}
},
{
name: 'update_node_parameters',
description:
'Update the parameters of an existing node in the workflow based on natural language changes. This tool intelligently modifies only the specified parameters while preserving others. Examples: "Set the URL to https://api.example.com", "Add authentication header", "Change method to POST", "Set the condition to check if status equals success".',
schema: updateNodeParametersSchema,
},
);
}

View File

@@ -0,0 +1,68 @@
import type { INodeParameters } from 'n8n-workflow';
/**
* Whitelist of parameter names that commonly affect node connections
* These parameters often control which inputs/outputs are available
*/
export const CONNECTION_AFFECTING_PARAMETERS = new Set([
'mode',
'operation',
'resource',
'action',
'method',
'textSplittingMode',
'useReranker',
'outputFormat',
'inputType',
'outputType',
'connectionMode',
'dataType',
'triggerMode',
]);
/**
* Validate that the provided parameters only contain connection-affecting parameters
* @param parameters - The parameters to validate
* @returns Object with validation result and filtered parameters
*/
export function validateConnectionParameters(parameters: INodeParameters): {
valid: boolean;
filtered: INodeParameters;
warnings: string[];
} {
const filtered: INodeParameters = {};
const warnings: string[] = [];
for (const [key, value] of Object.entries(parameters)) {
if (CONNECTION_AFFECTING_PARAMETERS.has(key)) {
filtered[key] = value;
} else {
warnings.push(
`Parameter "${key}" is not a connection-affecting parameter and will be ignored`,
);
}
}
return {
valid: Object.keys(filtered).length > 0,
filtered,
warnings,
};
}
/**
* Extract only connection-affecting parameters from a node's current parameters
* @param parameters - The node's full parameters
* @returns Only the connection-affecting parameters
*/
export function extractConnectionParameters(parameters: INodeParameters): INodeParameters {
const connectionParams: INodeParameters = {};
for (const [key, value] of Object.entries(parameters)) {
if (CONNECTION_AFFECTING_PARAMETERS.has(key)) {
connectionParams[key] = value;
}
}
return connectionParams;
}

View File

@@ -0,0 +1,562 @@
import type {
INode,
INodeTypeDescription,
IConnections,
IConnection,
NodeConnectionType,
} from 'n8n-workflow';
import { NodeConnectionTypes } from 'n8n-workflow';
import type {
ConnectionValidationResult,
InferConnectionTypeResult,
} from '../../types/connections';
import { isSubNode } from '../../utils/node-helpers';
/**
* Extract connection types from an expression string
* Looks for patterns like type: "ai_embedding", type: 'main', etc.
* Also detects array patterns like ["main", ...] or ['main', ...]
* @param expression - The expression string to parse
* @returns Array of unique connection types found
*/
function extractConnectionTypesFromExpression(expression: string): NodeConnectionType[] {
const types = new Set<string>();
// Pattern to match type: "value" or type: 'value' or type: NodeConnectionTypes.Value
const patterns = [/type\s*:\s*["']([^"']+)["']/g, /type\s*:\s*NodeConnectionTypes\.(\w+)/g];
// Additional patterns to detect "main" in arrays
const arrayMainPatterns = [
/\[\s*["']main["']/i, // ["main" or ['main'
/\[\s*NodeConnectionTypes\.Main/i, // [NodeConnectionTypes.Main
/return\s+\[\s*["']main["']/i, // return ["main" or return ['main'
/return\s+\[\s*NodeConnectionTypes\.Main/i, // return [NodeConnectionTypes.Main
];
// Check for array patterns containing "main"
for (const pattern of arrayMainPatterns) {
if (pattern.test(expression)) {
types.add(NodeConnectionTypes.Main);
break;
}
}
for (const pattern of patterns) {
let match;
pattern.lastIndex = 0; // Reset regex state
while ((match = pattern.exec(expression)) !== null) {
const type = match[1];
if (type) {
// Convert lowercase 'main' to proper case if needed
const normalizedType = type.toLowerCase() === 'main' ? NodeConnectionTypes.Main : type;
types.add(normalizedType);
}
}
}
return Array.from(types) as NodeConnectionType[];
}
/**
* Validate if a connection between two nodes is valid
* @param sourceNode - The source node
* @param targetNode - The target node
* @param connectionType - The type of connection
* @param nodeTypes - Array of all node type descriptions
* @returns Validation result with potential swap recommendation
*/
export function validateConnection(
sourceNode: INode,
targetNode: INode,
connectionType: string,
nodeTypes: INodeTypeDescription[],
): ConnectionValidationResult {
const sourceNodeType = nodeTypes.find((nt) => nt.name === sourceNode.type);
const targetNodeType = nodeTypes.find((nt) => nt.name === targetNode.type);
if (!sourceNodeType || !targetNodeType) {
return {
valid: false,
error: 'One or both node types not found',
};
}
const sourceIsSubNode = isSubNode(sourceNodeType, sourceNode);
const targetIsSubNode = isSubNode(targetNodeType, targetNode);
// For AI connections, validate and potentially suggest swapping
if (connectionType.startsWith('ai_')) {
// AI connections require a sub-node
if (!sourceIsSubNode && !targetIsSubNode) {
return {
valid: false,
error: `Connection type "${connectionType}" requires a sub-node, but both nodes are main nodes`,
};
}
// If target is sub-node but source is not, suggest swapping
if (targetIsSubNode && !sourceIsSubNode) {
return {
valid: true,
shouldSwap: true,
swappedSource: targetNode,
swappedTarget: sourceNode,
};
}
// Validate that the sub-node supports the connection type
if (sourceIsSubNode) {
const supportsConnectionType = nodeHasOutputType(sourceNodeType, connectionType);
if (!supportsConnectionType) {
return {
valid: false,
error: `Sub-node "${sourceNode.name}" does not support output type "${connectionType}"`,
};
}
}
}
return { valid: true };
}
/**
* Check if a node has a specific output type
* @param nodeType - The node type description
* @param connectionType - The connection type to check
* @returns True if the node supports the output type
*/
export function nodeHasOutputType(nodeType: INodeTypeDescription, connectionType: string): boolean {
if (typeof nodeType.outputs === 'string') {
return nodeType.outputs === connectionType || nodeType.outputs.includes(connectionType);
}
if (!nodeType.outputs || !Array.isArray(nodeType.outputs)) {
return false;
}
return nodeType.outputs.some((output) => {
if (typeof output === 'string') {
return output === connectionType || output.includes(connectionType);
}
return output.type === connectionType;
});
}
/**
* Check if a node accepts a specific input type
* @param nodeType - The node type description
* @param connectionType - The connection type to check
* @returns True if the node accepts the input type
*/
export function nodeAcceptsInputType(
nodeType: INodeTypeDescription,
connectionType: string,
): boolean {
if (typeof nodeType.inputs === 'string') {
return nodeType.inputs === connectionType || nodeType.inputs.includes(connectionType);
}
if (!nodeType.inputs || !Array.isArray(nodeType.inputs)) {
return false;
}
return nodeType.inputs.some((input) => {
if (typeof input === 'string') {
return input === connectionType || input.includes(connectionType);
}
return input.type === connectionType;
});
}
/**
* Create or update a connection in the workflow
* @param connections - Current connections object
* @param sourceNodeName - Name of the source node
* @param targetNodeName - Name of the target node
* @param connectionType - Type of connection
* @param sourceOutputIndex - Output index on source node (default: 0)
* @param targetInputIndex - Input index on target node (default: 0)
* @returns Updated connections object
*/
export function createConnection(
connections: IConnections,
sourceNodeName: string,
targetNodeName: string,
connectionType: NodeConnectionType,
sourceOutputIndex: number = 0,
targetInputIndex: number = 0,
): IConnections {
// Ensure source node exists in connections
if (!connections[sourceNodeName]) {
connections[sourceNodeName] = {};
}
// Ensure connection type exists
if (!connections[sourceNodeName][connectionType]) {
connections[sourceNodeName][connectionType] = [];
}
const connectionArray = connections[sourceNodeName][connectionType];
// Ensure the array has enough elements for the source output index
while (connectionArray.length <= sourceOutputIndex) {
connectionArray.push([]);
}
// Add the connection
const newConnection: IConnection = {
node: targetNodeName,
type: connectionType,
index: targetInputIndex,
};
// Ensure the array at sourceOutputIndex exists
connectionArray[sourceOutputIndex] ??= [];
// Check if connection already exists
const existingConnection = connectionArray[sourceOutputIndex].find(
(conn) => conn.node === targetNodeName && conn.index === targetInputIndex,
);
if (!existingConnection) {
connectionArray[sourceOutputIndex].push(newConnection);
}
return connections;
}
/**
* Remove a connection from the workflow
* @param connections - Current connections object
* @param sourceNodeName - Name of the source node
* @param targetNodeName - Name of the target node
* @param connectionType - Type of connection
* @param sourceOutputIndex - Output index on source node
* @param targetInputIndex - Input index on target node
* @returns Updated connections object
*/
export function removeConnection(
connections: IConnections,
sourceNodeName: string,
targetNodeName: string,
connectionType: string,
sourceOutputIndex?: number,
targetInputIndex?: number,
): IConnections {
if (!connections[sourceNodeName]?.[connectionType]) {
return connections;
}
const connectionArray = connections[sourceNodeName][connectionType];
// If indices are specified, remove specific connection
if (sourceOutputIndex !== undefined) {
if (connectionArray[sourceOutputIndex]) {
connectionArray[sourceOutputIndex] = connectionArray[sourceOutputIndex].filter(
(conn) =>
conn.node !== targetNodeName ||
(targetInputIndex !== undefined && conn.index !== targetInputIndex),
);
}
} else {
// Remove all connections to target node
for (let i = 0; i < connectionArray.length; i++) {
if (connectionArray[i]) {
connectionArray[i] = connectionArray[i]!.filter((conn) => conn.node !== targetNodeName);
}
}
}
// Clean up empty arrays
connections[sourceNodeName][connectionType] = connectionArray.filter(
(arr) => arr && arr.length > 0,
);
// Clean up empty connection types
if (connections[sourceNodeName][connectionType].length === 0) {
delete connections[sourceNodeName][connectionType];
}
// Clean up empty source nodes
if (Object.keys(connections[sourceNodeName]).length === 0) {
delete connections[sourceNodeName];
}
return connections;
}
/**
* Get all connections for a specific node
* @param connections - Current connections object
* @param nodeName - Name of the node
* @param direction - 'source' for outgoing, 'target' for incoming
* @returns Array of connections
*/
export function getNodeConnections(
connections: IConnections,
nodeName: string,
direction: 'source' | 'target',
): Array<{ node: string; type: string; sourceIndex?: number; targetIndex?: number }> {
const result: Array<{ node: string; type: string; sourceIndex?: number; targetIndex?: number }> =
[];
if (direction === 'source') {
// Get outgoing connections
const nodeConnections = connections[nodeName];
if (nodeConnections) {
for (const [connectionType, connectionArray] of Object.entries(nodeConnections)) {
connectionArray.forEach((outputConnections, sourceIndex) => {
if (outputConnections) {
outputConnections.forEach((conn) => {
result.push({
node: conn.node,
type: connectionType,
sourceIndex,
targetIndex: conn.index,
});
});
}
});
}
}
} else {
// Get incoming connections
for (const [sourceNode, nodeConnections] of Object.entries(connections)) {
for (const [connectionType, connectionArray] of Object.entries(nodeConnections)) {
connectionArray.forEach((outputConnections, sourceIndex) => {
if (outputConnections) {
outputConnections.forEach((conn) => {
if (conn.node === nodeName) {
result.push({
node: sourceNode,
type: connectionType,
sourceIndex,
targetIndex: conn.index,
});
}
});
}
});
}
}
}
return result;
}
/**
* Format a connection for display
* @param sourceNode - Source node name
* @param targetNode - Target node name
* @param connectionType - Connection type
* @param swapped - Whether nodes were swapped
* @returns Formatted connection message
*/
export function formatConnectionMessage(
sourceNode: string,
targetNode: string,
connectionType: string,
swapped: boolean = false,
): string {
if (swapped) {
return `Auto-corrected connection: ${sourceNode} (${connectionType}) → ${targetNode}. (Note: Swapped nodes to ensure sub-node is the source)`;
}
return `Connected: ${sourceNode}${targetNode} (${connectionType})`;
}
/**
* Get all output types from a node
* @param nodeType - The node type description
* @returns Array of output types the node supports
*/
function getNodeOutputTypes(nodeType: INodeTypeDescription): NodeConnectionType[] {
// Handle expression-based outputs
if (typeof nodeType.outputs === 'string') {
// console.log(`[getNodeOutputTypes] Expression-based outputs for ${nodeType.name}`);
const extracted = extractConnectionTypesFromExpression(nodeType.outputs);
if (extracted.length > 0) {
return extracted;
}
// If no types found in expression, return empty array
// console.log('[getNodeOutputTypes] No types found in expression');
return [];
}
if (!nodeType.outputs || !Array.isArray(nodeType.outputs)) {
return [];
}
return nodeType.outputs.map((output) => {
if (typeof output === 'string') {
return output;
}
return output.type;
});
}
/**
* Get all input types from a node
* @param nodeType - The node type description
* @returns Array of input types the node accepts
*/
function getNodeInputTypes(nodeType: INodeTypeDescription, node?: INode): NodeConnectionType[] {
// Handle expression-based inputs
if (typeof nodeType.inputs === 'string') {
// console.log(`[getNodeInputTypes] Expression-based inputs for ${nodeType.name}`);
// Special handling for Vector Store in retrieve-as-tool mode
// When in this mode, it only accepts AI inputs (no main input)
if (
node &&
nodeType.name.includes('vectorStore') &&
node.parameters?.mode === 'retrieve-as-tool'
) {
// console.log('[getNodeInputTypes] Vector Store in retrieve-as-tool mode - only AI inputs');
// Extract only AI connection types from the expression
const extracted = extractConnectionTypesFromExpression(nodeType.inputs);
return extracted.filter((type) => type.startsWith('ai_'));
}
const extracted = extractConnectionTypesFromExpression(nodeType.inputs);
if (extracted.length > 0) {
return extracted;
}
// If no types found in expression, return empty array
// console.log('[getNodeInputTypes] No types found in expression');
return [];
}
if (!nodeType.inputs || !Array.isArray(nodeType.inputs)) {
return [];
}
return nodeType.inputs.map((input) => {
if (typeof input === 'string') {
return input;
}
return input.type;
});
}
/**
* Infer the connection type between two nodes based on their inputs and outputs
* @param sourceNode - The source node
* @param targetNode - The target node
* @param sourceNodeType - The source node type description
* @param targetNodeType - The target node type description
* @returns The inferred connection type or possible types
*/
// eslint-disable-next-line complexity
export function inferConnectionType(
sourceNode: INode,
targetNode: INode,
sourceNodeType: INodeTypeDescription,
targetNodeType: INodeTypeDescription,
): InferConnectionTypeResult {
// Get available output and input types
const sourceOutputTypes = getNodeOutputTypes(sourceNodeType);
const targetInputTypes = getNodeInputTypes(targetNodeType, targetNode);
const sourceInputTypes = getNodeInputTypes(sourceNodeType, sourceNode);
// For nodes with dynamic inputs/outputs, check if they're currently acting as sub-nodes
// A node acts as a sub-node if it currently has no main inputs based on its parameters
const sourceHasMainInput = sourceInputTypes.includes(NodeConnectionTypes.Main);
const targetHasMainInput = targetInputTypes.includes(NodeConnectionTypes.Main);
// Use the dynamic check for nodes with expression-based inputs
const sourceIsSubNode =
isSubNode(sourceNodeType, sourceNode) ||
(typeof sourceNodeType.inputs === 'string' && !sourceHasMainInput);
const targetIsSubNode =
isSubNode(targetNodeType, targetNode) ||
(typeof targetNodeType.inputs === 'string' && !targetHasMainInput);
// Find matching connection types
const matchingTypes = sourceOutputTypes.filter((outputType) =>
targetInputTypes.includes(outputType),
);
// console.log(`Matching types: [${matchingTypes.join(', ')}]`);
// Handle AI connections (sub-node to main node)
if (sourceIsSubNode && !targetIsSubNode) {
// console.log('Scenario: Sub-node to main node (AI connection)');
// Find AI connection types in the matches
const aiConnectionTypes = matchingTypes.filter((type) => type.startsWith('ai_'));
if (aiConnectionTypes.length === 1) {
return { connectionType: aiConnectionTypes[0] };
} else if (aiConnectionTypes.length > 1) {
// Multiple AI connection types possible
return {
possibleTypes: aiConnectionTypes,
error: `Multiple AI connection types possible: ${aiConnectionTypes.join(', ')}. Please specify which one to use.`,
};
}
}
// Handle reversed AI connections (main node to sub-node - needs swap)
if (!sourceIsSubNode && targetIsSubNode) {
// console.log('Scenario: Main node to sub-node (needs swap)');
// Check if target has any AI outputs that source accepts as inputs
const targetOutputTypes = getNodeOutputTypes(targetNodeType);
const sourceInputTypes = getNodeInputTypes(sourceNodeType, sourceNode);
const reverseAiMatches = targetOutputTypes
.filter((type) => type.startsWith('ai_'))
.filter((type) => sourceInputTypes.includes(type));
if (reverseAiMatches.length === 1) {
return {
connectionType: reverseAiMatches[0],
requiresSwap: true,
};
} else if (reverseAiMatches.length > 1) {
return {
possibleTypes: reverseAiMatches,
requiresSwap: true,
error: `Multiple AI connection types possible (requires swap): ${reverseAiMatches.join(', ')}. Please specify which one to use.`,
};
}
}
// Handle main connections
if (!sourceIsSubNode && !targetIsSubNode) {
if (matchingTypes.includes(NodeConnectionTypes.Main)) {
return { connectionType: NodeConnectionTypes.Main };
}
}
// Handle sub-node to sub-node connections
if (sourceIsSubNode && targetIsSubNode) {
// Check for AI document connections or other specific sub-node to sub-node connections
const subNodeConnections = matchingTypes.filter((type) => type.startsWith('ai_'));
if (subNodeConnections.length === 1) {
return { connectionType: subNodeConnections[0] };
} else if (subNodeConnections.length > 1) {
return {
possibleTypes: subNodeConnections,
error: `Multiple connection types possible between sub-nodes: ${subNodeConnections.join(', ')}. Please specify which one to use.`,
};
}
}
// No valid connection found
if (matchingTypes.length === 0) {
return {
error: `No compatible connection types found between "${sourceNode.name}" (outputs: ${sourceOutputTypes.join(', ') || 'none'}) and "${targetNode.name}" (inputs: ${targetInputTypes.join(', ') || 'none'})`,
};
}
// If we have other matching types but couldn't determine the best one
if (matchingTypes.length === 1) {
return { connectionType: matchingTypes[0] };
}
return {
possibleTypes: matchingTypes,
error: `Multiple connection types possible: ${matchingTypes.join(', ')}. Please specify which one to use.`,
};
}

View File

@@ -0,0 +1,103 @@
import type { INode, INodeTypeDescription, NodeParameterValueType } from 'n8n-workflow';
/**
* Generate a unique node name by appending numbers if necessary
* @param baseName - The base name to start with
* @param existingNodes - Array of existing nodes to check against
* @returns A unique node name
*/
export function generateUniqueName(baseName: string, existingNodes: INode[]): string {
let uniqueName = baseName;
let counter = 1;
while (existingNodes.some((n) => n.name === uniqueName)) {
uniqueName = `${baseName}${counter}`;
counter++;
}
return uniqueName;
}
/**
* Get the latest version number for a node type
* @param nodeType - The node type description
* @returns The latest version number
*/
export function getLatestVersion(nodeType: INodeTypeDescription): number {
return (
nodeType.defaultVersion ??
(typeof nodeType.version === 'number'
? nodeType.version
: nodeType.version[nodeType.version.length - 1])
);
}
/**
* Generate a unique node ID
* @returns A unique node identifier
*/
export function generateNodeId(): string {
return crypto.randomUUID();
}
/**
* Generate a webhook ID for nodes that require it
* @returns A unique webhook identifier
*/
export function generateWebhookId(): string {
return crypto.randomUUID();
}
/**
* Check if a node type requires a webhook
* @param nodeType - The node type description
* @returns True if the node requires a webhook
*/
export function requiresWebhook(nodeType: INodeTypeDescription): boolean {
return !!(nodeType.webhooks && nodeType.webhooks.length > 0);
}
/**
* Create a new node instance with all required properties
* @param nodeType - The node type description
* @param name - The name for the node
* @param position - The position of the node
* @param parameters - Optional parameters for the node
* @returns A complete node instance
*/
export function createNodeInstance(
nodeType: INodeTypeDescription,
name: string,
position: [number, number],
parameters: Record<string, NodeParameterValueType> = {},
): INode {
const node: INode = {
id: generateNodeId(),
name,
type: nodeType.name,
typeVersion: getLatestVersion(nodeType),
position,
parameters,
};
// Add webhook ID if required
if (requiresWebhook(nodeType)) {
node.webhookId = generateWebhookId();
}
return node;
}
/**
* Merge provided parameters with node defaults
* @param parameters - User-provided parameters
* @param nodeType - The node type description
* @returns Merged parameters
*/
export function mergeWithDefaults(
parameters: Record<string, NodeParameterValueType>,
nodeType: INodeTypeDescription,
): Record<string, NodeParameterValueType> {
const defaults = nodeType.defaults || {};
return { ...defaults, ...parameters };
}

View File

@@ -0,0 +1,191 @@
import type { INode, INodeTypeDescription } from 'n8n-workflow';
import { isSubNode } from '../../utils/node-helpers';
/**
* Constants for node positioning
*/
export const POSITIONING_CONFIG = {
HORIZONTAL_GAP: 280, // Gap between columns of nodes
MAIN_NODE_Y: 300, // Y position for main nodes
SUB_NODE_Y: 450, // Y position for sub-nodes (below main nodes but not too far)
VERTICAL_SPACING: 120, // Spacing between nodes in the same column
INITIAL_X: 250, // Starting X position
X_PROXIMITY_THRESHOLD: 50, // Threshold for considering nodes at the same X position
SUB_NODE_HORIZONTAL_OFFSET: 0.8, // Multiplier for sub-node horizontal spacing
} as const;
/**
* Calculate position for a new node
* @param existingNodes - Array of existing nodes in the workflow
* @param isSubNodeType - Whether the new node is a sub-node
* @param nodeTypes - Array of all node type descriptions
* @returns The calculated position [x, y]
*/
export function calculateNodePosition(
existingNodes: INode[],
isSubNodeType: boolean,
nodeTypes: INodeTypeDescription[],
): [number, number] {
const { INITIAL_X, MAIN_NODE_Y, SUB_NODE_Y } = POSITIONING_CONFIG;
if (existingNodes.length === 0) {
// First node - position based on whether it's a sub-node or main node
return [INITIAL_X, isSubNodeType ? SUB_NODE_Y : MAIN_NODE_Y];
}
// Separate existing nodes into main and sub-nodes
const { mainNodes, subNodes } = categorizeNodes(existingNodes, nodeTypes);
// Calculate X position
const targetX = calculateXPosition(isSubNodeType, mainNodes, subNodes);
// Calculate Y position
const targetY = calculateYPosition(targetX, existingNodes, isSubNodeType);
return [targetX, targetY];
}
/**
* Categorize nodes into main and sub-nodes
* @param nodes - Array of nodes to categorize
* @param nodeTypes - Array of all node type descriptions
* @returns Object with mainNodes and subNodes arrays
*/
export function categorizeNodes(
nodes: INode[],
nodeTypes: INodeTypeDescription[],
): { mainNodes: INode[]; subNodes: INode[] } {
const mainNodes: INode[] = [];
const subNodes: INode[] = [];
for (const node of nodes) {
const nodeType = nodeTypes.find((nt) => nt.name === node.type);
if (nodeType && isSubNode(nodeType, node)) {
subNodes.push(node);
} else {
mainNodes.push(node);
}
}
return { mainNodes, subNodes };
}
/**
* Calculate the X position for a new node
* @param isSubNodeType - Whether the new node is a sub-node
* @param mainNodes - Array of existing main nodes
* @param subNodes - Array of existing sub-nodes
* @returns The calculated X position
*/
function calculateXPosition(isSubNodeType: boolean, mainNodes: INode[], subNodes: INode[]): number {
const { HORIZONTAL_GAP, INITIAL_X, SUB_NODE_HORIZONTAL_OFFSET } = POSITIONING_CONFIG;
if (isSubNodeType) {
// For sub-nodes, position them under their related main nodes
if (mainNodes.length > 0) {
const minMainX = Math.min(...mainNodes.map((n) => n.position[0]));
// Position sub-nodes horizontally spread out under main nodes
return minMainX + subNodes.length * (HORIZONTAL_GAP * SUB_NODE_HORIZONTAL_OFFSET);
}
// No main nodes yet, use default positioning
return INITIAL_X;
} else {
// For main nodes, position to the right of all existing main nodes
if (mainNodes.length > 0) {
const maxMainX = Math.max(...mainNodes.map((n) => n.position[0]));
return maxMainX + HORIZONTAL_GAP;
}
// First main node
return INITIAL_X;
}
}
/**
* Calculate the Y position for a new node
* @param targetX - The calculated X position
* @param existingNodes - Array of existing nodes
* @param isSubNodeType - Whether the new node is a sub-node
* @returns The calculated Y position
*/
function calculateYPosition(
targetX: number,
existingNodes: INode[],
isSubNodeType: boolean,
): number {
const { MAIN_NODE_Y, SUB_NODE_Y, VERTICAL_SPACING, X_PROXIMITY_THRESHOLD } = POSITIONING_CONFIG;
// Determine base Y position
const baseY = isSubNodeType ? SUB_NODE_Y : MAIN_NODE_Y;
// Check how many nodes are already at the target X position
const nodesAtTargetX = existingNodes.filter(
(n) => Math.abs(n.position[0] - targetX) < X_PROXIMITY_THRESHOLD,
);
// Add vertical offset if there are already nodes at this X position
const verticalOffset = nodesAtTargetX.length * VERTICAL_SPACING;
return baseY + verticalOffset;
}
/**
* Get nodes at a specific position (with tolerance)
* @param nodes - Array of nodes to check
* @param position - The position to check [x, y]
* @param tolerance - Position tolerance (default: 50)
* @returns Array of nodes at the position
*/
export function getNodesAtPosition(
nodes: INode[],
position: [number, number],
tolerance: number = 50,
): INode[] {
return nodes.filter(
(node) =>
Math.abs(node.position[0] - position[0]) < tolerance &&
Math.abs(node.position[1] - position[1]) < tolerance,
);
}
/**
* Find the best position for a node connected to another node
* @param sourceNode - The source node
* @param isTargetSubNode - Whether the target node is a sub-node
* @param existingNodes - Array of existing nodes
* @param nodeTypes - Array of all node type descriptions
* @returns The calculated position [x, y]
*/
export function calculateConnectedNodePosition(
sourceNode: INode,
isTargetSubNode: boolean,
existingNodes: INode[],
_nodeTypes: INodeTypeDescription[],
): [number, number] {
const { HORIZONTAL_GAP, SUB_NODE_Y, VERTICAL_SPACING } = POSITIONING_CONFIG;
if (isTargetSubNode) {
// Position sub-nodes below the source node
const targetX = sourceNode.position[0];
const targetY = SUB_NODE_Y;
// Check for existing sub-nodes at this position
const existingSubNodes = existingNodes.filter(
(node) =>
Math.abs(node.position[0] - targetX) < 50 &&
node.position[1] >= SUB_NODE_Y &&
node.position[1] < SUB_NODE_Y + VERTICAL_SPACING * 5,
);
return [targetX, targetY + existingSubNodes.length * VERTICAL_SPACING];
} else {
// Position main nodes to the right
const targetX = sourceNode.position[0] + HORIZONTAL_GAP;
const targetY = sourceNode.position[1];
// Check for existing nodes at this position
const nodesAtPosition = getNodesAtPosition(existingNodes, [targetX, targetY]);
return [targetX, targetY + nodesAtPosition.length * VERTICAL_SPACING];
}
}

View File

@@ -0,0 +1,112 @@
import type { INode, INodeParameters } from 'n8n-workflow';
/**
* Extract current parameters from a node
*/
export function extractNodeParameters(node: INode): INodeParameters {
return node.parameters || {};
}
/**
* Merge new parameters with existing ones
* New parameters take precedence over existing ones
*/
export function mergeParameters(
existingParams: INodeParameters,
newParams: INodeParameters,
): INodeParameters {
// Deep merge to handle nested structures
return deepMerge(existingParams, newParams);
}
/**
* Deep merge two objects
*/
function deepMerge(target: INodeParameters, source: INodeParameters): INodeParameters {
// Handle null/undefined cases
if (!target) {
return source || {};
}
if (!source) {
return target;
}
const output = { ...target };
if (isObject(target) && isObject(source)) {
Object.keys(source).forEach((key) => {
if (isObject(source[key] as INodeParameters)) {
if (!target || !(key in target) || !target[key]) {
Object.assign(output, { [key]: source[key] });
} else {
output[key] = deepMerge(target[key] as INodeParameters, source[key] as INodeParameters);
}
} else {
Object.assign(output, { [key]: source[key] });
}
});
}
return output;
}
/**
* Check if value is an object (not array or null)
*/
function isObject(item: unknown): item is Record<string, unknown> {
return item !== null && typeof item === 'object' && !Array.isArray(item);
}
/**
* Create a copy of node with updated parameters
*/
export function updateNodeWithParameters(node: INode, newParameters: INodeParameters): INode {
return {
...node,
parameters: newParameters,
};
}
/**
* Format changes array into a readable string for LLM
*/
export function formatChangesForPrompt(changes: string[]): string {
return changes.map((change, index) => `${index + 1}. ${change}`).join('\n');
}
/**
* Fix expression prefixes in parameters
* Ensures expressions containing {{ are properly prefixed with =
*/
export function fixExpressionPrefixes<T>(value: T): T {
// Handle string values
if (typeof value === 'string') {
let updatedValue = value as string;
// Replace {{ $json }} with {{ $json.toJsonString() }}
if (value.includes('{{ $json }}')) {
updatedValue = value.replace('{{ $json }}', '{{ $json.toJsonString() }}');
}
if (updatedValue.includes('{{') && !updatedValue.startsWith('=')) {
return ('=' + updatedValue) as T;
}
}
// Handle array values
if (Array.isArray(value)) {
return value.map((item: unknown) => fixExpressionPrefixes(item)) as T;
}
// Handle object values (but not null)
if (value !== null && typeof value === 'object') {
const fixed: Record<string, unknown> = {};
for (const key in value) {
if (Object.prototype.hasOwnProperty.call(value, key)) {
fixed[key] = fixExpressionPrefixes((value as Record<string, unknown>)[key]);
}
}
return fixed as T;
}
// Return other types unchanged (number, boolean, null, undefined)
return value;
}

View File

@@ -0,0 +1,990 @@
import type { INode, INodeTypeDescription, IConnections, NodeConnectionType } from 'n8n-workflow';
import { NodeConnectionTypes } from 'n8n-workflow';
import {
validateConnection,
nodeHasOutputType,
nodeAcceptsInputType,
createConnection,
removeConnection,
getNodeConnections,
formatConnectionMessage,
inferConnectionType,
} from '../connection.utils';
describe('connection.utils', () => {
// Mock node types
const mockMainNodeType: INodeTypeDescription = {
displayName: 'Main Node',
name: 'n8n-nodes-base.mainNode',
group: ['transform'],
version: 1,
inputs: [NodeConnectionTypes.Main],
outputs: [NodeConnectionTypes.Main],
properties: [],
defaults: { name: 'Main Node' },
description: '',
};
const mockSubNodeType: INodeTypeDescription = {
displayName: 'AI Sub Node',
name: 'n8n-nodes-base.aiSubNode',
group: ['output'],
version: 1,
inputs: [],
outputs: ['ai_embedding', 'ai_tool'],
properties: [],
defaults: { name: 'AI Sub Node' },
description: '',
};
const mockDualNodeType: INodeTypeDescription = {
displayName: 'Dual Node',
name: 'n8n-nodes-base.dualNode',
group: ['transform'],
version: 1,
inputs: [NodeConnectionTypes.Main, 'ai_embedding'],
outputs: [NodeConnectionTypes.Main, 'ai_document'],
properties: [],
defaults: { name: 'Dual Node' },
description: '',
};
const mockExpressionNodeType: INodeTypeDescription = {
displayName: 'Expression Node',
name: 'n8n-nodes-base.expressionNode',
group: ['transform'],
version: 1,
inputs:
"={{ $parameter.mode === 'retrieve-as-tool' ? ['ai_tool'] : ['main', 'ai_embedding'] }}",
outputs: "={{ $parameter.outputType === 'array' ? ['main', 'main'] : ['main'] }}",
properties: [],
defaults: { name: 'Expression Node' },
description: '',
};
const mockComplexExpressionNodeType: INodeTypeDescription = {
displayName: 'Complex Expression Node',
name: 'n8n-nodes-base.complexExpressionNode',
group: ['transform'],
version: 1,
inputs: `={{
(() => {
const types = [];
if ($parameter.acceptMain) {
types.push({ type: NodeConnectionTypes.Main });
}
if ($parameter.acceptAI) {
types.push({ type: 'ai_embedding' });
}
return types;
})()
}}`,
outputs: "={{ [{ type: NodeConnectionTypes.Main }, { type: 'ai_tool' }] }}",
properties: [],
defaults: { name: 'Complex Expression Node' },
description: '',
};
const mockTriggerNodeType: INodeTypeDescription = {
displayName: 'Trigger Node',
name: 'n8n-nodes-base.trigger',
group: ['trigger'],
version: 1,
inputs: [],
outputs: [NodeConnectionTypes.Main],
properties: [],
defaults: { name: 'Trigger' },
description: '',
};
const mockIfNodeType: INodeTypeDescription = {
displayName: 'If Node',
name: 'n8n-nodes-base.if',
group: ['transform'],
version: 1,
inputs: [NodeConnectionTypes.Main],
outputs: ['main', 'main'],
outputNames: ['true', 'false'],
properties: [],
defaults: { name: 'If' },
description: '',
};
const mockVectorStoreNodeType: INodeTypeDescription = {
displayName: 'Vector Store',
name: 'n8n-nodes-base.vectorStore',
group: ['input'],
version: 1,
inputs: `={{
(() => {
if ($parameter.mode === 'retrieve-as-tool') {
return [{ type: 'ai_tool' }];
}
return [{ type: 'main' }, { type: 'ai_document' }, { type: 'ai_embedding' }];
})()
}}`,
outputs: [NodeConnectionTypes.Main],
properties: [],
defaults: { name: 'Vector Store' },
description: '',
};
// Mock nodes
const mockMainNode: INode = {
id: 'node1',
name: 'Main Node',
type: 'n8n-nodes-base.mainNode',
typeVersion: 1,
position: [0, 0],
parameters: {},
};
const mockSubNode: INode = {
id: 'node2',
name: 'AI Sub Node',
type: 'n8n-nodes-base.aiSubNode',
typeVersion: 1,
position: [100, 0],
parameters: {},
};
const mockDualNode: INode = {
id: 'node3',
name: 'Dual Node',
type: 'n8n-nodes-base.dualNode',
typeVersion: 1,
position: [200, 0],
parameters: {},
};
const mockNodeTypes = [
mockMainNodeType,
mockSubNodeType,
mockDualNodeType,
mockExpressionNodeType,
mockComplexExpressionNodeType,
mockTriggerNodeType,
mockIfNodeType,
mockVectorStoreNodeType,
];
describe('validateConnection', () => {
it('should validate main connection between main nodes', () => {
const result = validateConnection(
mockMainNode,
mockDualNode,
NodeConnectionTypes.Main,
mockNodeTypes,
);
expect(result).toEqual({ valid: true });
});
it('should validate AI connection from sub-node to main node', () => {
const result = validateConnection(mockSubNode, mockDualNode, 'ai_embedding', mockNodeTypes);
expect(result).toEqual({ valid: true });
});
it('should suggest swapping when AI connection goes from main to sub-node', () => {
const result = validateConnection(mockDualNode, mockSubNode, 'ai_embedding', mockNodeTypes);
expect(result).toEqual({
valid: true,
shouldSwap: true,
swappedSource: mockSubNode,
swappedTarget: mockDualNode,
});
});
it('should reject AI connection between two main nodes', () => {
const result = validateConnection(mockMainNode, mockDualNode, 'ai_embedding', mockNodeTypes);
expect(result.valid).toBe(false);
expect(result.error).toContain('requires a sub-node');
});
it('should reject connection when sub-node does not support output type', () => {
const result = validateConnection(
mockSubNode,
mockDualNode,
'ai_document', // Sub-node doesn't output this
mockNodeTypes,
);
expect(result.valid).toBe(false);
expect(result.error).toContain('does not support output type');
});
it('should handle unknown node types', () => {
const unknownNode: INode = {
...mockMainNode,
type: 'n8n-nodes-base.unknown',
};
const result = validateConnection(
unknownNode,
mockDualNode,
NodeConnectionTypes.Main,
mockNodeTypes,
);
expect(result.valid).toBe(false);
expect(result.error).toContain('node types not found');
});
it('should handle expression-based nodes with sub-node detection', () => {
const expressionNode: INode = {
id: 'expr1',
name: 'Expression Node',
type: 'n8n-nodes-base.expressionNode',
typeVersion: 1,
position: [0, 0],
parameters: { mode: 'retrieve-as-tool' },
};
// Expression node in tool mode is detected as sub-node (by isSubNode)
// But it doesn't support the output type 'ai_tool'
const result = validateConnection(expressionNode, mockMainNode, 'ai_tool', mockNodeTypes);
expect(result.valid).toBe(false);
expect(result.error).toContain('does not support output type');
});
});
describe('nodeHasOutputType', () => {
it('should find output type in array', () => {
expect(nodeHasOutputType(mockSubNodeType, 'ai_embedding')).toBe(true);
expect(nodeHasOutputType(mockSubNodeType, 'ai_tool')).toBe(true);
expect(nodeHasOutputType(mockSubNodeType, 'ai_document')).toBe(false);
});
it('should handle string outputs', () => {
const stringOutputNode: INodeTypeDescription = {
...mockMainNodeType,
outputs: ['main'],
};
expect(nodeHasOutputType(stringOutputNode, NodeConnectionTypes.Main)).toBe(true);
expect(nodeHasOutputType(stringOutputNode, 'ai_embedding')).toBe(false);
});
it('should handle expression outputs containing type', () => {
const expressionOutputNode: INodeTypeDescription = {
...mockMainNodeType,
outputs: "={{ $parameter.mode === 'tool' ? ['ai_tool'] : ['main'] }}",
};
expect(nodeHasOutputType(expressionOutputNode, 'ai_tool')).toBe(true);
expect(nodeHasOutputType(expressionOutputNode, NodeConnectionTypes.Main)).toBe(true);
});
it('should handle object outputs', () => {
const objectOutputNode: INodeTypeDescription = {
...mockMainNodeType,
outputs: [{ type: NodeConnectionTypes.Main }, { type: 'ai_embedding' }],
};
expect(nodeHasOutputType(objectOutputNode, NodeConnectionTypes.Main)).toBe(true);
expect(nodeHasOutputType(objectOutputNode, 'ai_embedding')).toBe(true);
});
it('should handle nodes without outputs', () => {
const noOutputNode: INodeTypeDescription = {
...mockMainNodeType,
outputs: [],
};
expect(nodeHasOutputType(noOutputNode, NodeConnectionTypes.Main)).toBe(false);
});
it('should handle empty array outputs', () => {
const emptyOutputNode: INodeTypeDescription = {
...mockMainNodeType,
outputs: [],
};
expect(nodeHasOutputType(emptyOutputNode, NodeConnectionTypes.Main)).toBe(false);
});
it('should handle mixed array outputs (strings and objects)', () => {
const mixedOutputNode: INodeTypeDescription = {
...mockMainNodeType,
outputs: [NodeConnectionTypes.Main, { type: 'ai_embedding' }, 'ai_tool'],
};
expect(nodeHasOutputType(mixedOutputNode, NodeConnectionTypes.Main)).toBe(true);
expect(nodeHasOutputType(mixedOutputNode, 'ai_embedding')).toBe(true);
expect(nodeHasOutputType(mixedOutputNode, 'ai_tool')).toBe(true);
});
});
describe('nodeAcceptsInputType', () => {
it('should find input type in array', () => {
expect(nodeAcceptsInputType(mockDualNodeType, NodeConnectionTypes.Main)).toBe(true);
expect(nodeAcceptsInputType(mockDualNodeType, 'ai_embedding')).toBe(true);
expect(nodeAcceptsInputType(mockDualNodeType, 'ai_tool')).toBe(false);
});
it('should handle string inputs', () => {
const stringInputNode: INodeTypeDescription = {
...mockMainNodeType,
inputs: ['main'],
};
expect(nodeAcceptsInputType(stringInputNode, NodeConnectionTypes.Main)).toBe(true);
expect(nodeAcceptsInputType(stringInputNode, 'ai_embedding')).toBe(false);
});
it('should handle expression inputs containing type', () => {
expect(nodeAcceptsInputType(mockExpressionNodeType, 'ai_tool')).toBe(true);
expect(nodeAcceptsInputType(mockExpressionNodeType, NodeConnectionTypes.Main)).toBe(true);
expect(nodeAcceptsInputType(mockExpressionNodeType, 'ai_embedding')).toBe(true);
});
it('should handle object inputs', () => {
const objectInputNode: INodeTypeDescription = {
...mockMainNodeType,
inputs: [{ type: NodeConnectionTypes.Main }, { type: 'ai_embedding' }],
};
expect(nodeAcceptsInputType(objectInputNode, NodeConnectionTypes.Main)).toBe(true);
expect(nodeAcceptsInputType(objectInputNode, 'ai_embedding')).toBe(true);
});
it('should handle nodes without inputs', () => {
const noInputNode: INodeTypeDescription = {
...mockMainNodeType,
inputs: [],
};
expect(nodeAcceptsInputType(noInputNode, NodeConnectionTypes.Main)).toBe(false);
});
it('should handle empty array inputs', () => {
const emptyInputNode: INodeTypeDescription = {
...mockMainNodeType,
inputs: [],
};
expect(nodeAcceptsInputType(emptyInputNode, NodeConnectionTypes.Main)).toBe(false);
});
it('should handle complex expressions with NodeConnectionTypes', () => {
// The complex expression has type: 'ai_embedding' in the string
// so it should find that type
expect(nodeAcceptsInputType(mockComplexExpressionNodeType, NodeConnectionTypes.Main)).toBe(
false,
);
expect(nodeAcceptsInputType(mockComplexExpressionNodeType, 'ai_embedding')).toBe(true);
});
});
describe('createConnection', () => {
it('should create a new connection', () => {
const connections: IConnections = {};
const result = createConnection(connections, 'node1', 'node2', NodeConnectionTypes.Main);
expect(result).toEqual({
node1: {
main: [[{ node: 'node2', type: NodeConnectionTypes.Main, index: 0 }]],
},
});
});
it('should add to existing connections', () => {
const connections: IConnections = {
node1: {
main: [[{ node: 'node3', type: NodeConnectionTypes.Main, index: 0 }]],
},
};
const result = createConnection(connections, 'node1', 'node2', NodeConnectionTypes.Main);
expect(result.node1.main[0]).toHaveLength(2);
expect(result.node1.main[0]).toContainEqual({
node: 'node2',
type: NodeConnectionTypes.Main,
index: 0,
});
});
it('should handle custom indices', () => {
const connections: IConnections = {};
const result = createConnection(
connections,
'node1',
'node2',
NodeConnectionTypes.Main,
1, // sourceOutputIndex
2, // targetInputIndex
);
expect(result.node1.main).toHaveLength(2);
expect(result.node1.main[0]).toEqual([]);
expect(result.node1.main[1]).toEqual([
{
node: 'node2',
type: NodeConnectionTypes.Main,
index: 2,
},
]);
});
it('should not duplicate existing connections', () => {
const connections: IConnections = {
node1: {
main: [[{ node: 'node2', type: NodeConnectionTypes.Main, index: 0 }]],
},
};
const result = createConnection(connections, 'node1', 'node2', NodeConnectionTypes.Main);
expect(result.node1.main[0]).toHaveLength(1);
});
it('should handle different connection types', () => {
const connections: IConnections = {};
const result1 = createConnection(connections, 'node1', 'node2', NodeConnectionTypes.Main);
const result2 = createConnection(
result1,
'node1',
'node3',
'ai_embedding' as NodeConnectionType,
);
expect(result2.node1).toHaveProperty('main');
expect(result2.node1).toHaveProperty('ai_embedding');
expect(result2.node1.main[0]).toHaveLength(1);
expect(result2.node1.ai_embedding[0]).toHaveLength(1);
});
it('should handle null/undefined in connection arrays', () => {
const connections: IConnections = {
node1: {
main: [null, []],
},
};
const result = createConnection(connections, 'node1', 'node2', NodeConnectionTypes.Main, 0);
expect(result.node1.main[0]).toEqual([
{
node: 'node2',
type: NodeConnectionTypes.Main,
index: 0,
},
]);
});
});
describe('removeConnection', () => {
it('should remove a specific connection', () => {
const connections: IConnections = {
node1: {
main: [
[
{ node: 'node2', type: NodeConnectionTypes.Main, index: 0 },
{ node: 'node3', type: NodeConnectionTypes.Main, index: 0 },
],
],
},
};
const result = removeConnection(
connections,
'node1',
'node2',
NodeConnectionTypes.Main,
0,
0,
);
expect(result.node1.main[0]).toHaveLength(1);
expect(result.node1.main[0]?.[0].node).toBe('node3');
});
it('should remove all connections to a target node', () => {
const connections: IConnections = {
node1: {
main: [
[{ node: 'node2', type: NodeConnectionTypes.Main, index: 0 }],
[{ node: 'node2', type: NodeConnectionTypes.Main, index: 1 }],
],
},
};
const result = removeConnection(connections, 'node1', 'node2', NodeConnectionTypes.Main);
expect(result.node1).toBeUndefined();
});
it('should clean up empty connection types', () => {
const connections: IConnections = {
node1: {
main: [[{ node: 'node2', type: NodeConnectionTypes.Main, index: 0 }]],
},
};
const result = removeConnection(connections, 'node1', 'node2', NodeConnectionTypes.Main);
expect(result.node1).toBeUndefined();
});
it('should handle non-existent connections gracefully', () => {
const connections: IConnections = {
node1: {
main: [[{ node: 'node2', type: NodeConnectionTypes.Main, index: 0 }]],
},
};
const result = removeConnection(
connections,
'node1',
'node3', // non-existent
NodeConnectionTypes.Main,
);
expect(result).toEqual(connections);
});
it('should handle non-existent source node', () => {
const connections: IConnections = {};
const result = removeConnection(connections, 'node1', 'node2', NodeConnectionTypes.Main);
expect(result).toEqual({});
});
it('should handle different connection types', () => {
const connections: IConnections = {
node1: {
main: [[{ node: 'node2', type: NodeConnectionTypes.Main, index: 0 }]],
ai_embedding: [[{ node: 'node3', type: 'ai_embedding' as NodeConnectionType, index: 0 }]],
},
};
const result = removeConnection(connections, 'node1', 'node3', 'ai_embedding');
expect(result.node1).toHaveProperty('main');
expect(result.node1).not.toHaveProperty('ai_embedding');
});
it('should handle null/undefined in connection arrays', () => {
const connections: IConnections = {
node1: {
main: [null, [{ node: 'node2', type: NodeConnectionTypes.Main, index: 0 }]],
},
};
const result = removeConnection(connections, 'node1', 'node2', NodeConnectionTypes.Main);
expect(result.node1).toBeUndefined();
});
});
describe('getNodeConnections', () => {
const complexConnections: IConnections = {
node1: {
main: [
[{ node: 'node2', type: NodeConnectionTypes.Main, index: 0 }],
[{ node: 'node3', type: NodeConnectionTypes.Main, index: 0 }],
],
ai_embedding: [[{ node: 'node4', type: 'ai_embedding' as NodeConnectionType, index: 1 }]],
},
node2: {
main: [[{ node: 'node3', type: NodeConnectionTypes.Main, index: 1 }]],
},
};
it('should get outgoing connections', () => {
const result = getNodeConnections(complexConnections, 'node1', 'source');
expect(result).toHaveLength(3);
expect(result).toContainEqual({
node: 'node2',
type: NodeConnectionTypes.Main,
sourceIndex: 0,
targetIndex: 0,
});
expect(result).toContainEqual({
node: 'node3',
type: NodeConnectionTypes.Main,
sourceIndex: 1,
targetIndex: 0,
});
expect(result).toContainEqual({
node: 'node4',
type: 'ai_embedding',
sourceIndex: 0,
targetIndex: 1,
});
});
it('should get incoming connections', () => {
const result = getNodeConnections(complexConnections, 'node3', 'target');
expect(result).toHaveLength(2);
expect(result).toContainEqual({
node: 'node1',
type: NodeConnectionTypes.Main,
sourceIndex: 1,
targetIndex: 0,
});
expect(result).toContainEqual({
node: 'node2',
type: NodeConnectionTypes.Main,
sourceIndex: 0,
targetIndex: 1,
});
});
it('should return empty array for nodes without connections', () => {
const result = getNodeConnections(complexConnections, 'node5', 'source');
expect(result).toEqual([]);
});
it('should handle null/undefined in connection arrays', () => {
const connections: IConnections = {
node1: {
main: [null, [{ node: 'node2', type: NodeConnectionTypes.Main, index: 0 }]],
},
};
const result = getNodeConnections(connections, 'node1', 'source');
expect(result).toHaveLength(1);
expect(result[0]).toEqual({
node: 'node2',
type: NodeConnectionTypes.Main,
sourceIndex: 1,
targetIndex: 0,
});
});
});
describe('formatConnectionMessage', () => {
it('should format normal connection', () => {
const result = formatConnectionMessage('Node A', 'Node B', NodeConnectionTypes.Main);
expect(result).toBe('Connected: Node A → Node B (main)');
});
it('should format swapped connection', () => {
const result = formatConnectionMessage('Node A', 'Node B', 'ai_embedding', true);
expect(result).toBe(
'Auto-corrected connection: Node A (ai_embedding) → Node B. (Note: Swapped nodes to ensure sub-node is the source)',
);
});
it('should handle different connection types', () => {
const result = formatConnectionMessage('AI Node', 'Main Node', 'ai_tool');
expect(result).toBe('Connected: AI Node → Main Node (ai_tool)');
});
});
describe('inferConnectionType', () => {
it('should infer main connection between main nodes', () => {
const result = inferConnectionType(
mockMainNode,
mockDualNode,
mockMainNodeType,
mockDualNodeType,
);
expect(result).toEqual({ connectionType: NodeConnectionTypes.Main });
});
it('should infer AI connection from sub-node to main node', () => {
const result = inferConnectionType(
mockSubNode,
mockDualNode,
mockSubNodeType,
mockDualNodeType,
);
expect(result).toEqual({ connectionType: 'ai_embedding' });
});
it('should suggest swap for main node to sub-node', () => {
// For swap to work, the target (sub-node) must output something the source accepts
const aiOutputSubNode: INodeTypeDescription = {
...mockSubNodeType,
name: 'n8n-nodes-base.aiOutputSub',
inputs: [],
outputs: ['ai_embedding'],
};
const targetNode: INode = {
id: 'sub1',
name: 'AI Output Sub',
type: aiOutputSubNode.name,
typeVersion: 1,
position: [0, 0],
parameters: {},
};
// Use dualNode as source since it accepts ai_embedding
const result = inferConnectionType(
mockDualNode,
targetNode,
mockDualNodeType,
aiOutputSubNode,
);
expect(result.requiresSwap).toBe(true);
expect(result.connectionType).toBe('ai_embedding');
});
it('should handle multiple possible AI connections', () => {
const multiOutputSubNode: INodeTypeDescription = {
...mockSubNodeType,
outputs: ['ai_embedding', 'ai_tool'],
};
const multiInputMainNode: INodeTypeDescription = {
...mockDualNodeType,
inputs: [NodeConnectionTypes.Main, 'ai_embedding', 'ai_tool'],
};
const result = inferConnectionType(
mockSubNode,
mockDualNode,
multiOutputSubNode,
multiInputMainNode,
);
expect(result.possibleTypes).toEqual(['ai_embedding', 'ai_tool']);
expect(result.error).toContain('Multiple AI connection types possible');
});
it('should handle no compatible connections', () => {
const incompatibleNode: INodeTypeDescription = {
...mockMainNodeType,
outputs: ['ai_document'],
inputs: ['ai_tool'],
};
const result = inferConnectionType(
mockMainNode,
mockDualNode,
mockMainNodeType,
incompatibleNode,
);
expect(result.error).toContain('No compatible connection types found');
});
it('should handle expression-based nodes', () => {
const exprNode: INode = {
...mockMainNode,
type: 'n8n-nodes-base.expressionNode',
parameters: { mode: 'retrieve-as-tool' },
};
const result = inferConnectionType(
exprNode,
mockDualNode,
mockExpressionNodeType,
mockDualNodeType,
);
// Expression node in tool mode acts as sub-node
expect(result.connectionType).toBeDefined();
});
it('should handle If node with multiple outputs', () => {
const ifNode: INode = {
id: 'if1',
name: 'If',
type: 'n8n-nodes-base.if',
typeVersion: 1,
position: [0, 0],
parameters: {},
};
const result = inferConnectionType(ifNode, mockMainNode, mockIfNodeType, mockMainNodeType);
expect(result).toEqual({ connectionType: NodeConnectionTypes.Main });
});
it('should handle trigger nodes with no inputs', () => {
const triggerNode: INode = {
id: 'trigger1',
name: 'Trigger',
type: 'n8n-nodes-base.trigger',
typeVersion: 1,
position: [0, 0],
parameters: {},
};
const result = inferConnectionType(
triggerNode,
mockMainNode,
mockTriggerNodeType,
mockMainNodeType,
);
expect(result).toEqual({ connectionType: NodeConnectionTypes.Main });
});
it('should handle sub-node to sub-node connections', () => {
const aiDocSubNode: INodeTypeDescription = {
...mockSubNodeType,
outputs: ['ai_document'],
};
const aiDocAcceptorSubNode: INodeTypeDescription = {
...mockSubNodeType,
name: 'n8n-nodes-base.aiDocAcceptor',
inputs: ['ai_document'],
outputs: ['ai_tool'],
};
const sourceNode: INode = {
...mockSubNode,
type: aiDocSubNode.name,
};
const targetNode: INode = {
...mockSubNode,
id: 'node3',
name: 'AI Doc Acceptor',
type: aiDocAcceptorSubNode.name,
};
const result = inferConnectionType(
sourceNode,
targetNode,
aiDocSubNode,
aiDocAcceptorSubNode,
);
expect(result).toEqual({ connectionType: 'ai_document' });
});
it('should handle vector store with mode-based inputs', () => {
const vectorStoreNode: INode = {
id: 'vs1',
name: 'Vector Store',
type: 'n8n-nodes-base.vectorStore',
typeVersion: 1,
position: [0, 0],
parameters: { mode: 'retrieve-as-tool' },
};
const aiToolOutputNode: INodeTypeDescription = {
...mockSubNodeType,
outputs: ['ai_tool'],
};
const aiToolNode: INode = {
...mockSubNode,
type: aiToolOutputNode.name,
};
const result = inferConnectionType(
aiToolNode,
vectorStoreNode,
aiToolOutputNode,
mockVectorStoreNodeType,
);
// Vector Store in tool mode only accepts ai_tool, and source outputs it
// This should work as an AI connection
expect(result.connectionType).toBe('ai_tool');
});
it('should handle complex expression with array return', () => {
const complexNode: INode = {
id: 'complex1',
name: 'Complex',
type: 'n8n-nodes-base.complexExpressionNode',
typeVersion: 1,
position: [0, 0],
parameters: { acceptMain: true, acceptAI: true },
};
const result = inferConnectionType(
complexNode,
mockMainNode,
mockComplexExpressionNodeType,
mockMainNodeType,
);
expect(result).toEqual({ connectionType: NodeConnectionTypes.Main });
});
it('should handle expression returning array with main', () => {
const arrayExprNode: INodeTypeDescription = {
...mockMainNodeType,
name: 'n8n-nodes-base.arrayExpr',
outputs: '={{ ["main", "main"] }}',
};
const arrayNode: INode = {
id: 'array1',
name: 'Array',
type: arrayExprNode.name,
typeVersion: 1,
position: [0, 0],
parameters: {},
};
const result = inferConnectionType(arrayNode, mockMainNode, arrayExprNode, mockMainNodeType);
expect(result).toEqual({ connectionType: NodeConnectionTypes.Main });
});
it('should handle single matching type fallback', () => {
const customNode: INodeTypeDescription = {
...mockMainNodeType,
// @ts-expect-error Testing custom types
outputs: ['custom_type'],
};
const customAcceptorNode: INodeTypeDescription = {
...mockMainNodeType,
// @ts-expect-error Testing custom types
inputs: ['custom_type'],
};
const sourceNode: INode = {
...mockMainNode,
type: customNode.name,
};
const targetNode: INode = {
...mockMainNode,
id: 'custom2',
type: customAcceptorNode.name,
};
const result = inferConnectionType(sourceNode, targetNode, customNode, customAcceptorNode);
expect(result).toEqual({ connectionType: 'custom_type' });
});
it('should handle multiple non-AI matching types', () => {
const multiNode1: INodeTypeDescription = {
...mockMainNodeType,
// @ts-expect-error Testing custom types
outputs: ['type1', 'type2'],
};
const multiNode2: INodeTypeDescription = {
...mockMainNodeType,
// @ts-expect-error Testing custom types
inputs: ['type1', 'type2'],
};
const sourceNode: INode = {
...mockMainNode,
type: multiNode1.name,
};
const targetNode: INode = {
...mockMainNode,
id: 'multi2',
type: multiNode2.name,
};
const result = inferConnectionType(sourceNode, targetNode, multiNode1, multiNode2);
expect(result.possibleTypes).toEqual(['type1', 'type2']);
expect(result.error).toContain('Multiple connection types possible');
});
});
describe('expression parsing edge cases', () => {
it('should handle malformed expressions gracefully', () => {
const malformedNode: INodeTypeDescription = {
...mockMainNodeType,
// @ts-expect-error Testing ivalid type
inputs: '={{ this is not valid javascript',
};
expect(nodeAcceptsInputType(malformedNode, NodeConnectionTypes.Main)).toBe(false);
});
it('should handle empty expressions', () => {
const emptyExprNode: INodeTypeDescription = {
...mockMainNodeType,
// @ts-expect-error Testing ivalid type
inputs: '',
};
expect(nodeAcceptsInputType(emptyExprNode, NodeConnectionTypes.Main)).toBe(false);
});
it('should handle expressions with no types', () => {
const noTypeExprNode: INodeTypeDescription = {
...mockMainNodeType,
inputs: '={{ $parameter.something }}',
};
expect(nodeAcceptsInputType(noTypeExprNode, NodeConnectionTypes.Main)).toBe(false);
});
it('should handle expressions with return statements', () => {
const returnExprNode: INodeTypeDescription = {
...mockMainNodeType,
outputs: "={{ return ['main', 'ai_tool'] }}",
};
expect(nodeHasOutputType(returnExprNode, NodeConnectionTypes.Main)).toBe(true);
expect(nodeHasOutputType(returnExprNode, 'ai_tool')).toBe(true);
});
});
});

View File

@@ -0,0 +1,353 @@
/* eslint-disable id-denylist */
import type { INode } from 'n8n-workflow';
import {
extractNodeParameters,
mergeParameters,
updateNodeWithParameters,
formatChangesForPrompt,
fixExpressionPrefixes,
} from '../parameter-update.utils';
describe('parameter-update.utils', () => {
// Mock node for testing
const mockNode: INode = {
id: 'node1',
name: 'Test Node',
type: 'n8n-nodes-base.httpRequest',
typeVersion: 1,
position: [0, 0],
parameters: {
url: 'https://example.com',
method: 'GET',
authentication: 'none',
},
};
describe('extractNodeParameters', () => {
it('should extract parameters from node', () => {
const params = extractNodeParameters(mockNode);
expect(params).toEqual({
url: 'https://example.com',
method: 'GET',
authentication: 'none',
});
});
it('should return empty object for node without parameters', () => {
const nodeWithoutParams: INode = {
...mockNode,
// @ts-expect-error Testing ivalid parameters
parameters: undefined,
};
const params = extractNodeParameters(nodeWithoutParams);
expect(params).toEqual({});
});
});
describe('mergeParameters', () => {
it('should merge parameters with new values taking precedence', () => {
const existing = { url: 'https://old.com', method: 'GET' };
const newParams = { url: 'https://new.com', headers: { 'X-Test': 'value' } };
const result = mergeParameters(existing, newParams);
expect(result).toEqual({
url: 'https://new.com',
method: 'GET',
headers: { 'X-Test': 'value' },
});
});
it('should handle null/undefined parameters', () => {
// @ts-expect-error Testing ivalid parameters
expect(mergeParameters(null, { url: 'test' })).toEqual({ url: 'test' });
// @ts-expect-error Testing ivalid parameters
expect(mergeParameters({ url: 'test' }, null)).toEqual({ url: 'test' });
// @ts-expect-error Testing ivalid parameters
expect(mergeParameters(null, null)).toEqual({});
// @ts-expect-error Testing ivalid parameters
expect(mergeParameters(undefined, { url: 'test' })).toEqual({ url: 'test' });
});
it('should deep merge nested objects', () => {
const existing = {
headers: {
'Content-Type': 'application/json',
'X-Old': 'old-value',
},
options: {
timeout: 5000,
retry: 3,
},
};
const newParams = {
headers: {
'Content-Type': 'application/xml',
'X-New': 'new-value',
},
options: {
timeout: 10000,
},
};
const result = mergeParameters(existing, newParams);
expect(result).toEqual({
headers: {
'Content-Type': 'application/xml',
'X-Old': 'old-value',
'X-New': 'new-value',
},
options: {
timeout: 10000,
retry: 3,
},
});
});
it('should handle arrays without deep merging', () => {
const existing = { items: ['a', 'b'] };
const newParams = { items: ['c', 'd'] };
const result = mergeParameters(existing, newParams);
expect(result).toEqual({ items: ['c', 'd'] });
});
it('should handle mixed types correctly', () => {
const existing = {
string: 'old',
number: 1,
boolean: true,
object: { key: 'old' },
array: [1, 2],
null: null,
};
const newParams = {
string: 'new',
number: 2,
boolean: false,
object: { key: 'new', extra: 'value' },
array: [3, 4, 5],
null: 'not-null',
};
const result = mergeParameters(existing, newParams);
expect(result).toEqual({
string: 'new',
number: 2,
boolean: false,
object: { key: 'new', extra: 'value' },
array: [3, 4, 5],
null: 'not-null',
});
});
it('should preserve existing keys not in new parameters', () => {
const existing = { a: 1, b: 2, c: 3 };
const newParams = { b: 20, d: 4 };
const result = mergeParameters(existing, newParams);
expect(result).toEqual({ a: 1, b: 20, c: 3, d: 4 });
});
it('should handle empty objects', () => {
expect(mergeParameters({}, {})).toEqual({});
expect(mergeParameters({ a: 1 }, {})).toEqual({ a: 1 });
expect(mergeParameters({}, { a: 1 })).toEqual({ a: 1 });
});
it('should handle deeply nested structures', () => {
const existing = {
level1: {
level2: {
level3: {
value: 'old',
keep: 'this',
},
},
},
};
const newParams = {
level1: {
level2: {
level3: {
value: 'new',
},
newLevel: 'added',
},
},
};
const result = mergeParameters(existing, newParams);
expect(result).toEqual({
level1: {
level2: {
level3: {
value: 'new',
keep: 'this',
},
newLevel: 'added',
},
},
});
});
});
describe('updateNodeWithParameters', () => {
it('should create new node with updated parameters', () => {
const newParams = { url: 'https://new.com', method: 'POST' };
const updated = updateNodeWithParameters(mockNode, newParams);
expect(updated).not.toBe(mockNode); // New object
expect(updated.parameters).toEqual(newParams);
expect(updated.id).toBe(mockNode.id);
expect(updated.name).toBe(mockNode.name);
expect(updated.type).toBe(mockNode.type);
});
it('should preserve all other node properties', () => {
const nodeWithExtra = {
...mockNode,
credentials: { httpAuth: { id: '123', name: 'Auth' } },
disabled: true,
};
const updated = updateNodeWithParameters(nodeWithExtra, { new: 'params' });
expect(updated.credentials).toEqual(nodeWithExtra.credentials);
expect(updated.disabled).toBe(true);
});
});
describe('formatChangesForPrompt', () => {
it('should format changes array into numbered list', () => {
const changes = ['Update URL', 'Change method to POST', 'Add authentication'];
const formatted = formatChangesForPrompt(changes);
expect(formatted).toBe('1. Update URL\n2. Change method to POST\n3. Add authentication');
});
it('should handle empty array', () => {
const formatted = formatChangesForPrompt([]);
expect(formatted).toBe('');
});
it('should handle single change', () => {
const formatted = formatChangesForPrompt(['Only change']);
expect(formatted).toBe('1. Only change');
});
});
describe('fixExpressionPrefixes', () => {
it('should add = prefix to strings containing {{', () => {
expect(fixExpressionPrefixes('{{ $json.data }}')).toBe('={{ $json.data }}');
expect(fixExpressionPrefixes('Some text {{ $json.field }}')).toBe(
'=Some text {{ $json.field }}',
);
});
it('should not add prefix to strings already starting with =', () => {
expect(fixExpressionPrefixes('={{ $json.data }}')).toBe('={{ $json.data }}');
});
it('should not modify strings without {{', () => {
expect(fixExpressionPrefixes('normal string')).toBe('normal string');
expect(fixExpressionPrefixes('{ json: true }')).toBe('{ json: true }');
});
it('should replace {{ $json }} with {{ $json.toJsonString() }}', () => {
expect(fixExpressionPrefixes('{{ $json }}')).toBe('={{ $json.toJsonString() }}');
expect(fixExpressionPrefixes('Value: {{ $json }}')).toBe(
'=Value: {{ $json.toJsonString() }}',
);
});
it('should handle arrays recursively', () => {
const input = ['normal', '{{ $json.field }}', '={{ existing }}', ['nested {{ $json }}']];
const expected = [
'normal',
'={{ $json.field }}',
'={{ existing }}',
['=nested {{ $json.toJsonString() }}'],
];
expect(fixExpressionPrefixes(input)).toEqual(expected);
});
it('should handle objects recursively', () => {
const input = {
normal: 'value',
expression: '{{ $json.data }}',
nested: {
field: '{{ $json.nested }}',
array: ['{{ $json.item }}'],
},
existing: '={{ $json.existing }}',
};
const expected = {
normal: 'value',
expression: '={{ $json.data }}',
nested: {
field: '={{ $json.nested }}',
array: ['={{ $json.item }}'],
},
existing: '={{ $json.existing }}',
};
expect(fixExpressionPrefixes(input)).toEqual(expected);
});
it('should handle primitive types without modification', () => {
expect(fixExpressionPrefixes(123)).toBe(123);
expect(fixExpressionPrefixes(true)).toBe(true);
expect(fixExpressionPrefixes(false)).toBe(false);
expect(fixExpressionPrefixes(null)).toBe(null);
expect(fixExpressionPrefixes(undefined)).toBe(undefined);
});
it('should handle complex nested structures', () => {
const input = {
headers: {
Authorization: 'Bearer {{$credentials.apiKey}}',
'Content-Type': 'application/json',
},
body: {
data: '{{ $json.payload }}',
nested: {
value: '{{ $json }}',
array: [{ expr: '{{ $json.item }}' }, 'normal'],
},
},
};
const expected = {
headers: {
Authorization: '=Bearer {{$credentials.apiKey}}',
'Content-Type': 'application/json',
},
body: {
data: '={{ $json.payload }}',
nested: {
value: '={{ $json.toJsonString() }}',
array: [{ expr: '={{ $json.item }}' }, 'normal'],
},
},
};
expect(fixExpressionPrefixes(input)).toEqual(expected);
});
it('should handle edge cases', () => {
// Empty string
expect(fixExpressionPrefixes('')).toBe('');
// String with multiple {{
expect(fixExpressionPrefixes('{{ $json.a }} and {{ $json.b }}')).toBe(
'={{ $json.a }} and {{ $json.b }}',
);
// Malformed expressions
expect(fixExpressionPrefixes('{{ incomplete')).toBe('={{ incomplete');
expect(fixExpressionPrefixes('}} reversed {{')).toBe('=}} reversed {{');
});
});
});

View File

@@ -0,0 +1,94 @@
import type { DynamicStructuredTool } from '@langchain/core/tools';
import type { INodeTypeDescription } from 'n8n-workflow';
import type { WorkflowState } from '@/workflow-state';
/**
* LLM configuration for the workflow builder
*/
export interface LLMConfig {
openAIApiKey?: string;
model: string;
temperature?: number;
}
/**
* Options for parameter updater chain
*/
export interface ParameterUpdaterOptions {
nodeType: string;
nodeDefinition: INodeTypeDescription;
requestedChanges: string[];
}
/**
* Configuration for mapping node types to required prompt sections
*/
export interface NodePromptConfig {
/** Node type patterns that require specific guides */
nodeTypePatterns: {
set: string[];
if: string[];
httpRequest: string[];
tool: string[];
};
/** Keywords that trigger inclusion of specific guides */
parameterKeywords: {
resourceLocator: string[];
textExpressions: string[];
};
/** Maximum number of examples to include */
maxExamples: number;
/** Token budget for dynamic sections */
targetTokenBudget: number;
}
/**
* Advanced configuration for fine-tuning prompt generation
*/
export interface PromptGenerationOptions {
/** Include examples in the prompt */
includeExamples?: boolean;
/** Override the maximum number of examples */
maxExamples?: number;
/** Force inclusion of specific guides */
forceInclude?: {
setNode?: boolean;
ifNode?: boolean;
httpRequest?: boolean;
toolNodes?: boolean;
resourceLocator?: boolean;
textFields?: boolean;
};
/** Custom token budget */
tokenBudget?: number;
/** Enable verbose logging */
verbose?: boolean;
}
/**
* Context for building prompts
*/
export interface PromptBuilderContext {
nodeType: string;
nodeDefinition: INodeTypeDescription;
requestedChanges: string[];
hasResourceLocatorParams?: boolean;
options?: PromptGenerationOptions;
config?: NodePromptConfig;
}
/**
* Options for tool executor
*/
export interface ToolExecutorOptions {
state: typeof WorkflowState.State;
toolMap: Map<string, DynamicStructuredTool>;
}

View File

@@ -0,0 +1,46 @@
import type { INode, NodeConnectionType } from 'n8n-workflow';
/**
* Result of creating a connection between nodes
*/
export interface ConnectionResult {
sourceNode: string;
targetNode: string;
connectionType: string;
swapped: boolean;
message: string;
}
/**
* Result of connection validation
*/
export interface ConnectionValidationResult {
valid: boolean;
error?: string;
shouldSwap?: boolean;
swappedSource?: INode;
swappedTarget?: INode;
}
/**
* Connection operation result
*/
export interface ConnectionOperationResult {
success: boolean;
sourceNode: string;
targetNode: string;
connectionType: string;
swapped: boolean;
message: string;
error?: string;
}
/**
* Result of inferring connection type
*/
export interface InferConnectionTypeResult {
connectionType?: NodeConnectionType;
possibleTypes?: NodeConnectionType[];
requiresSwap?: boolean;
error?: string;
}

View File

@@ -0,0 +1,10 @@
// Re-export all types from their respective modules
export * from './workflow';
export * from './messages';
export * from './tools';
export * from './connections';
export * from './streaming';
export * from './nodes';
export * from './config';
export * from './utils';

View File

@@ -0,0 +1,72 @@
/**
* Quick reply option for chat messages
*/
export interface QuickReplyOption {
text: string;
type: string;
isFeedback?: boolean;
}
/**
* Assistant chat message
*/
export interface AssistantChatMessage {
role: 'assistant';
type: 'message';
text: string;
step?: string;
codeSnippet?: string;
}
/**
* Assistant summary message
*/
export interface AssistantSummaryMessage {
role: 'assistant';
type: 'summary';
title: string;
content: string;
}
/**
* End session event message
*/
export interface EndSessionMessage {
role: 'assistant';
type: 'event';
eventName: 'end-session';
}
/**
* Agent suggestion message
*/
export interface AgentChatMessage {
role: 'assistant';
type: 'agent-suggestion';
title: string;
text: string;
}
/**
* Prompt validation message
*/
export interface PromptValidationMessage {
role: 'assistant';
type: 'prompt-validation';
isWorkflowPrompt: boolean;
id: string;
}
/**
* Union type for all possible message responses
*/
export type MessageResponse =
| ((
| AssistantChatMessage
| AssistantSummaryMessage
| AgentChatMessage
| PromptValidationMessage
) & {
quickReplies?: QuickReplyOption[];
})
| EndSessionMessage;

View File

@@ -0,0 +1,38 @@
import type { INodeParameters, INodeProperties, INodeTypeDescription } from 'n8n-workflow';
/**
* Detailed information about a node type
*/
export interface NodeDetails {
name: string;
displayName: string;
description: string;
properties: INodeProperties[];
subtitle?: string;
inputs: INodeTypeDescription['inputs'];
outputs: INodeTypeDescription['outputs'];
}
/**
* Node search result with scoring
*/
export interface NodeSearchResult {
name: string;
displayName: string;
description: string;
score: number;
inputs: INodeTypeDescription['inputs'];
outputs: INodeTypeDescription['outputs'];
}
/**
* Information about a node that was added to the workflow
*/
export interface AddedNode {
id: string;
name: string;
type: string;
displayName?: string;
parameters?: INodeParameters;
position: [number, number];
}

View File

@@ -0,0 +1,62 @@
/**
* Agent message chunk for streaming
*/
export interface AgentMessageChunk {
role: 'assistant';
type: 'message';
text: string;
}
/**
* Tool progress chunk for streaming
*/
export interface ToolProgressChunk {
type: 'tool';
toolName: string;
status: string;
[key: string]: unknown;
}
/**
* Workflow update chunk for streaming
*/
export interface WorkflowUpdateChunk {
role: 'assistant';
type: 'workflow-updated';
codeSnippet: string;
}
/**
* Execution request chunk for streaming
*/
export interface ExecutionRequestChunk {
role: 'assistant';
type: 'execution-requested';
reason: string;
}
/**
* Union type for all stream chunks
*/
export type StreamChunk =
| AgentMessageChunk
| ToolProgressChunk
| WorkflowUpdateChunk
| ExecutionRequestChunk;
/**
* Stream output containing messages
*/
export interface StreamOutput {
messages: StreamChunk[];
}
/**
* Configuration for stream processing
*/
export interface StreamProcessorConfig {
/** Thread configuration for retrieving state */
threadConfig: { configurable: { thread_id: string } };
/** List of tool names that trigger workflow updates */
workflowUpdateTools?: string[];
}

View File

@@ -0,0 +1,125 @@
import type { INodeParameters } from 'n8n-workflow';
import type { ZodIssue } from 'zod';
import type { AddedNode, NodeDetails, NodeSearchResult } from './nodes';
/**
* Types of progress updates
*/
export type ProgressUpdateType = 'input' | 'output' | 'progress' | 'error';
/**
* Progress update during tool execution
*/
export interface ProgressUpdate<T = Record<string, unknown>> {
type: ProgressUpdateType;
data: T;
timestamp?: string;
}
/**
* Tool progress message for streaming updates
*/
export interface ToolProgressMessage<TToolName extends string = string> {
type: 'tool';
toolName: TToolName;
toolCallId?: string;
status: 'running' | 'completed' | 'error';
updates: ProgressUpdate[];
}
/**
* Tool execution error
*/
export interface ToolError {
message: string;
code?: string;
details?: ZodIssue[] | Record<string, unknown>;
}
/**
* Progress reporter interface for tools
*/
export interface ProgressReporter {
start: <T>(input: T) => void;
progress: (message: string, data?: Record<string, unknown>) => void;
complete: <T>(output: T) => void;
error: (error: ToolError) => void;
createBatchReporter: (scope: string) => BatchReporter;
}
/**
* Batch progress reporter for multi-item operations
*/
export interface BatchReporter {
init: (total: number) => void;
next: (itemDescription: string) => void;
complete: () => void;
}
/**
* Output type for update node parameters tool
*/
export interface UpdateNodeParametersOutput {
nodeId: string;
nodeName: string;
nodeType: string;
updatedParameters: INodeParameters;
appliedChanges: string[];
message: string;
}
/**
* Output type for add node tool
*/
export interface AddNodeOutput {
addedNode: AddedNode;
message: string;
}
/**
* Output type for connect nodes tool
*/
export interface ConnectNodesOutput {
sourceNode: string;
targetNode: string;
connectionType: string;
swapped: boolean;
message: string;
found: {
sourceNode: boolean;
targetNode: boolean;
};
}
/**
* Output type for remove node tool
*/
export interface RemoveNodeOutput {
removedNodeId: string;
removedNodeName: string;
removedNodeType: string;
connectionsRemoved: number;
message: string;
}
/**
* Output type for node details tool
*/
export interface NodeDetailsOutput {
details: NodeDetails;
found: boolean;
message: string;
}
/**
* Output type for node search tool
*/
export interface NodeSearchOutput {
results: Array<{
query: string;
results: NodeSearchResult[];
}>;
totalResults: number;
message: string;
}

View File

@@ -0,0 +1,8 @@
import type { WorkflowState } from '../workflow-state';
/**
* Type for state updater functions
*/
export type StateUpdater<TState = typeof WorkflowState.State> =
| Partial<TState>
| ((state: TState) => Partial<TState>);

View File

@@ -0,0 +1,17 @@
import type { IWorkflowBase, INode, IConnections } from 'n8n-workflow';
/**
* Simplified workflow representation containing only nodes and connections
*/
export type SimpleWorkflow = Pick<IWorkflowBase, 'nodes' | 'connections'>;
/**
* Workflow operation types that can be applied to the workflow state
*/
export type WorkflowOperation =
| { type: 'clear' }
| { type: 'removeNode'; nodeIds: string[] }
| { type: 'addNodes'; nodes: INode[] }
| { type: 'updateNode'; nodeId: string; updates: Partial<INode> }
| { type: 'setConnections'; connections: IConnections }
| { type: 'mergeConnections'; connections: IConnections };

View File

@@ -0,0 +1,76 @@
import { type INode, NodeConnectionTypes, type INodeTypeDescription } from 'n8n-workflow';
/**
* Determines if a node is a sub-node (has no main input connections)
* Sub-nodes are nodes that only have AI inputs or no inputs at all
* @param nodeType - The node type description to check
* @returns true if the node is a sub-node, false otherwise
*/
export function isSubNode(nodeType: INodeTypeDescription, node?: INode): boolean {
if (node?.parameters?.mode === 'retrieve-as-tool') {
return true;
}
// Treating agent as main node always
if (nodeType.name === '@n8n/n8n-nodes-langchain.agent') {
return false;
}
// If no inputs at all, it's definitely a sub-node
if (!nodeType.inputs || (Array.isArray(nodeType.inputs) && nodeType.inputs.length === 0)) {
return true;
}
// Handle array of inputs
if (Array.isArray(nodeType.inputs)) {
// Check if ALL inputs are AI connections (no main inputs)
const hasMainInput = nodeType.inputs.some((input) => {
if (typeof input === 'string') {
return input === NodeConnectionTypes.Main || input.toLowerCase() === 'main';
}
// It's an INodeInputConfiguration object
return input.type === NodeConnectionTypes.Main || input.type.toLowerCase() === 'main';
});
return !hasMainInput;
}
// Handle expression-based inputs (dynamic)
if (typeof nodeType.inputs === 'string') {
// Check if the expression contains any indication of main input
// We need to check for any pattern that would result in a main connection
const mainInputPatterns = [
'NodeConnectionTypes.Main',
'type: "main"',
"type: 'main'",
'type:"main"',
"type:'main'",
'type: `main`',
'type: NodeConnectionTypes.Main',
'type:NodeConnectionTypes.Main',
'{ displayName: "", type: "main"',
"{ displayName: '', type: 'main'",
'{ displayName: "", type: NodeConnectionTypes.Main',
"{ displayName: '', type: NodeConnectionTypes.Main",
// Patterns for arrays that include "main" as first element
'return ["main"',
"return ['main'",
'return [`main`',
'return[["main"',
"return[['main'",
'return [[`main`',
// Pattern for spread operations that include main
'["main", ...',
"['main', ...",
'[`main`, ...',
];
// If any main input pattern is found, it's NOT a sub-node
const hasMainInput = mainInputPatterns.some(
(pattern) =>
typeof nodeType.inputs === 'string' &&
nodeType.inputs.toLowerCase().includes(pattern.toLowerCase()),
);
return !hasMainInput;
}
// If we can't determine, assume it's not a sub-node (safer default)
return false;
}

View File

@@ -0,0 +1,162 @@
import type { INode, IConnections } from 'n8n-workflow';
import type { SimpleWorkflow, WorkflowOperation } from '../types/workflow';
import type { WorkflowState } from '../workflow-state';
/**
* Apply a list of operations to a workflow
*/
// eslint-disable-next-line complexity
export function applyOperations(
workflow: SimpleWorkflow,
operations: WorkflowOperation[],
): SimpleWorkflow {
// Start with a copy of the current workflow
let result: SimpleWorkflow = {
nodes: [...workflow.nodes],
connections: { ...workflow.connections },
};
// Apply each operation in sequence
for (const operation of operations) {
switch (operation.type) {
case 'clear':
result = { nodes: [], connections: {} };
break;
case 'removeNode': {
const nodesToRemove = new Set(operation.nodeIds);
// Filter out removed nodes
result.nodes = result.nodes.filter((node) => !nodesToRemove.has(node.id));
// Clean up connections
const cleanedConnections: IConnections = {};
// Copy connections, excluding those from/to removed nodes
for (const [sourceId, nodeConnections] of Object.entries(result.connections)) {
if (!nodesToRemove.has(sourceId)) {
cleanedConnections[sourceId] = {};
for (const [connectionType, outputs] of Object.entries(nodeConnections)) {
if (Array.isArray(outputs)) {
cleanedConnections[sourceId][connectionType] = outputs.map((outputConnections) => {
if (Array.isArray(outputConnections)) {
return outputConnections.filter((conn) => !nodesToRemove.has(conn.node));
}
return outputConnections;
});
}
}
}
}
result.connections = cleanedConnections;
break;
}
case 'addNodes': {
// Create a map for quick lookup
const nodeMap = new Map<string, INode>();
result.nodes.forEach((node) => nodeMap.set(node.id, node));
// Add or update nodes
operation.nodes.forEach((node) => {
nodeMap.set(node.id, node);
});
result.nodes = Array.from(nodeMap.values());
break;
}
case 'updateNode': {
result.nodes = result.nodes.map((node) => {
if (node.id === operation.nodeId) {
return { ...node, ...operation.updates };
}
return node;
});
break;
}
case 'setConnections': {
// Replace connections entirely
result.connections = operation.connections;
break;
}
case 'mergeConnections': {
// Merge connections additively
for (const [sourceId, nodeConnections] of Object.entries(operation.connections)) {
if (!result.connections[sourceId]) {
result.connections[sourceId] = nodeConnections;
} else {
// Merge connections for this source node
for (const [connectionType, newOutputs] of Object.entries(nodeConnections)) {
if (!result.connections[sourceId][connectionType]) {
result.connections[sourceId][connectionType] = newOutputs;
} else {
// Merge arrays of connections
const existingOutputs = result.connections[sourceId][connectionType];
if (Array.isArray(newOutputs) && Array.isArray(existingOutputs)) {
// Merge each output index
for (let i = 0; i < Math.max(newOutputs.length, existingOutputs.length); i++) {
if (!newOutputs[i]) continue;
if (!existingOutputs[i]) {
existingOutputs[i] = newOutputs[i];
} else if (Array.isArray(newOutputs[i]) && Array.isArray(existingOutputs[i])) {
// Merge connections at this output index, avoiding duplicates
const existingSet = new Set(
existingOutputs[i]!.map((conn) =>
JSON.stringify({ node: conn.node, type: conn.type, index: conn.index }),
),
);
newOutputs[i]!.forEach((conn) => {
const connStr = JSON.stringify({
node: conn.node,
type: conn.type,
index: conn.index,
});
if (!existingSet.has(connStr)) {
existingOutputs[i]!.push(conn);
}
});
}
}
}
}
}
}
}
break;
}
}
}
return result;
}
/**
* Process operations node for the LangGraph workflow
* This node applies accumulated operations to the workflow state
*/
export function processOperations(state: typeof WorkflowState.State) {
const { workflowJSON, workflowOperations } = state;
// If no operations to process, return unchanged
if (!workflowOperations || workflowOperations.length === 0) {
return {};
}
// Apply all operations to get the new workflow
const newWorkflow = applyOperations(workflowJSON, workflowOperations);
// Return updated state with cleared operations
return {
workflowJSON: newWorkflow,
workflowOperations: null, // Clear processed operations
};
}

View File

@@ -0,0 +1,208 @@
import { AIMessage, HumanMessage, ToolMessage } from '@langchain/core/messages';
import type {
AgentMessageChunk,
ToolProgressChunk,
WorkflowUpdateChunk,
StreamOutput,
} from '../types/streaming';
/**
* Tools which should trigger canvas updates
*/
export const DEFAULT_WORKFLOW_UPDATE_TOOLS = [
'add_nodes',
'connect_nodes',
'update_node_parameters',
'remove_node',
];
/**
* Process a single chunk from the LangGraph stream
*/
// eslint-disable-next-line complexity
export function processStreamChunk(streamMode: string, chunk: unknown): StreamOutput | null {
if (streamMode === 'updates') {
// Handle agent message updates
const agentChunk = chunk as {
agent?: { messages?: Array<{ content: string | Array<{ type: string; text: string }> }> };
compact_messages?: {
messages?: Array<{ content: string | Array<{ type: string; text: string }> }>;
};
delete_messages?: {
messages?: Array<{ content: string | Array<{ type: string; text: string }> }>;
};
process_operations?: {
workflowJSON?: unknown;
workflowOperations?: unknown;
};
};
if ((agentChunk?.delete_messages?.messages ?? []).length > 0) {
const messageChunk: AgentMessageChunk = {
role: 'assistant',
type: 'message',
text: 'Deleted, refresh?',
};
return { messages: [messageChunk] };
}
if ((agentChunk?.compact_messages?.messages ?? []).length > 0) {
const lastMessage =
agentChunk.compact_messages!.messages![agentChunk.compact_messages!.messages!.length - 1];
const messageChunk: AgentMessageChunk = {
role: 'assistant',
type: 'message',
text: lastMessage.content as string,
};
return { messages: [messageChunk] };
}
if ((agentChunk?.agent?.messages ?? []).length > 0) {
const lastMessage = agentChunk.agent!.messages![agentChunk.agent!.messages!.length - 1];
if (lastMessage.content) {
let content: string;
// Handle array content (multi-part messages)
if (Array.isArray(lastMessage.content)) {
content = lastMessage.content
.filter((c) => c.type === 'text')
.map((b) => b.text)
.join('\n');
} else {
content = lastMessage.content;
}
const messageChunk: AgentMessageChunk = {
role: 'assistant',
type: 'message',
text: content,
};
return { messages: [messageChunk] };
}
}
// Handle process_operations updates - emit workflow update after operations are processed
if (agentChunk?.process_operations) {
// Check if operations were processed (indicated by cleared operations array)
const update = agentChunk.process_operations;
if (update.workflowJSON && update.workflowOperations !== undefined) {
// Create workflow update chunk
const workflowUpdateChunk: WorkflowUpdateChunk = {
role: 'assistant',
type: 'workflow-updated',
codeSnippet: JSON.stringify(update.workflowJSON, null, 2),
};
return { messages: [workflowUpdateChunk] };
}
}
} else if (streamMode === 'custom') {
// Handle custom tool updates
const toolChunk = chunk as ToolProgressChunk;
if (toolChunk?.type === 'tool') {
const output: StreamOutput = { messages: [toolChunk] };
// Don't emit workflow updates here - they'll be emitted after process_operations
return output;
}
}
return null;
}
/**
* Create a stream processor that yields formatted chunks
*/
export async function* createStreamProcessor(
stream: AsyncGenerator<[string, unknown], void, unknown>,
): AsyncGenerator<StreamOutput> {
for await (const [streamMode, chunk] of stream) {
const output = processStreamChunk(streamMode, chunk);
if (output) {
yield output;
}
}
}
export function formatMessages(
messages: Array<AIMessage | HumanMessage | ToolMessage>,
): Array<Record<string, unknown>> {
const formattedMessages: Array<Record<string, unknown>> = [];
for (const msg of messages) {
if (msg instanceof HumanMessage) {
formattedMessages.push({
role: 'user',
type: 'message',
text: msg.content,
});
} else if (msg instanceof AIMessage) {
// Add the AI message content if it exists
if (msg.content) {
if (Array.isArray(msg.content)) {
// Handle array content (multi-part messages)
const textMessages = msg.content.filter((c) => c.type === 'text');
textMessages.forEach((textMessage) => {
if (textMessage.type !== 'text') {
return;
}
formattedMessages.push({
role: 'assistant',
type: 'message',
text: textMessage.text,
});
});
} else {
formattedMessages.push({
role: 'assistant',
type: 'message',
text: msg.content,
});
}
}
// Handle tool calls in AI messages
if (msg.tool_calls && msg.tool_calls.length > 0) {
// Add tool messages for each tool call
for (const toolCall of msg.tool_calls) {
formattedMessages.push({
id: toolCall.id,
toolCallId: toolCall.id,
role: 'assistant',
type: 'tool',
toolName: toolCall.name,
status: 'completed',
updates: [
{
type: 'input',
data: toolCall.args || {},
},
],
});
}
}
} else if (msg instanceof ToolMessage) {
// Find the tool message by ID and add the output
const toolCallId = msg.tool_call_id;
for (let i = formattedMessages.length - 1; i >= 0; i--) {
const m = formattedMessages[i];
if (m.type === 'tool' && m.id === toolCallId) {
// Add output to updates array
m.updates ??= [];
(m.updates as Array<Record<string, unknown>>).push({
type: 'output',
data: typeof msg.content === 'string' ? { result: msg.content } : msg.content,
});
break;
}
}
}
}
return formattedMessages;
}

View File

@@ -0,0 +1,447 @@
import type { INode, IConnections } from 'n8n-workflow';
import { createNode, createWorkflow } from '../../../test/test-utils';
import type { SimpleWorkflow, WorkflowOperation } from '../../types/workflow';
import type { WorkflowState } from '../../workflow-state';
import { applyOperations, processOperations } from '../operations-processor';
describe('operations-processor', () => {
describe('applyOperations', () => {
let baseWorkflow: SimpleWorkflow;
let node1: INode;
let node2: INode;
let node3: INode;
beforeEach(() => {
node1 = createNode({ id: 'node1', name: 'Node 1', position: [100, 100] });
node2 = createNode({ id: 'node2', name: 'Node 2', position: [300, 100] });
node3 = createNode({ id: 'node3', name: 'Node 3', position: [500, 100] });
baseWorkflow = {
nodes: [node1, node2, node3],
connections: {
node1: {
main: [[{ node: 'node2', type: 'main', index: 0 }]],
},
node2: {
main: [[{ node: 'node3', type: 'main', index: 0 }]],
},
},
};
});
describe('clear operation', () => {
it('should reset workflow to empty state', () => {
const operations: WorkflowOperation[] = [{ type: 'clear' }];
const result = applyOperations(baseWorkflow, operations);
expect(result.nodes).toEqual([]);
expect(result.connections).toEqual({});
});
it('should clear even complex workflows', () => {
// Add more connections
baseWorkflow.connections.node1.ai_tool = [[{ node: 'node3', type: 'ai_tool', index: 0 }]];
const operations: WorkflowOperation[] = [{ type: 'clear' }];
const result = applyOperations(baseWorkflow, operations);
expect(result.nodes).toEqual([]);
expect(result.connections).toEqual({});
});
});
describe('removeNode operation', () => {
it('should remove single node and its connections', () => {
const operations: WorkflowOperation[] = [{ type: 'removeNode', nodeIds: ['node2'] }];
const result = applyOperations(baseWorkflow, operations);
expect(result.nodes).toHaveLength(2);
expect(result.nodes.find((n) => n.id === 'node2')).toBeUndefined();
// node1's connection to node2 is filtered out, leaving empty array
expect(result.connections.node1).toEqual({ main: [[]] });
// node2 is removed entirely as source
expect(result.connections.node2).toBeUndefined();
});
it('should remove multiple nodes', () => {
const operations: WorkflowOperation[] = [
{ type: 'removeNode', nodeIds: ['node1', 'node3'] },
];
const result = applyOperations(baseWorkflow, operations);
expect(result.nodes).toHaveLength(1);
expect(result.nodes[0].id).toBe('node2');
// node2's connection to node3 is filtered out, leaving empty array
expect(result.connections).toEqual({
node2: { main: [[]] },
});
});
it('should handle non-existent node IDs gracefully', () => {
const operations: WorkflowOperation[] = [{ type: 'removeNode', nodeIds: ['non-existent'] }];
const result = applyOperations(baseWorkflow, operations);
expect(result.nodes).toHaveLength(3);
expect(result.connections).toEqual(baseWorkflow.connections);
});
it('should clean up connections to removed nodes', () => {
// Add more complex connections
baseWorkflow.connections.node3 = {
main: [[{ node: 'node2', type: 'main', index: 1 }]],
};
const operations: WorkflowOperation[] = [{ type: 'removeNode', nodeIds: ['node2'] }];
const result = applyOperations(baseWorkflow, operations);
// node1's connection to node2 is filtered out, leaving empty array
expect(result.connections.node1).toEqual({ main: [[]] });
expect(result.connections.node3).toBeDefined();
expect(result.connections.node3.main[0]).toEqual([]);
});
});
describe('addNodes operation', () => {
it('should add new nodes', () => {
const newNode = createNode({ id: 'node4', name: 'Node 4', position: [700, 100] });
const operations: WorkflowOperation[] = [{ type: 'addNodes', nodes: [newNode] }];
const result = applyOperations(baseWorkflow, operations);
expect(result.nodes).toHaveLength(4);
expect(result.nodes.find((n) => n.id === 'node4')).toEqual(newNode);
});
it('should update existing nodes by ID', () => {
const updatedNode = createNode({
id: 'node2',
name: 'Updated Node 2',
position: [400, 200],
});
const operations: WorkflowOperation[] = [{ type: 'addNodes', nodes: [updatedNode] }];
const result = applyOperations(baseWorkflow, operations);
expect(result.nodes).toHaveLength(3);
const node = result.nodes.find((n) => n.id === 'node2');
expect(node?.name).toBe('Updated Node 2');
expect(node?.position).toEqual([400, 200]);
});
it('should handle multiple nodes with same ID (keep latest)', () => {
const node2a = createNode({ id: 'node2', name: 'Node 2A' });
const node2b = createNode({ id: 'node2', name: 'Node 2B' });
const operations: WorkflowOperation[] = [{ type: 'addNodes', nodes: [node2a, node2b] }];
const result = applyOperations(baseWorkflow, operations);
expect(result.nodes).toHaveLength(3);
const node = result.nodes.find((n) => n.id === 'node2');
expect(node?.name).toBe('Node 2B');
});
});
describe('updateNode operation', () => {
it('should update existing node properties', () => {
const operations: WorkflowOperation[] = [
{
type: 'updateNode',
nodeId: 'node2',
updates: { name: 'Updated Name', position: [350, 150] },
},
];
const result = applyOperations(baseWorkflow, operations);
const node = result.nodes.find((n) => n.id === 'node2');
expect(node?.name).toBe('Updated Name');
expect(node?.position).toEqual([350, 150]);
expect(node?.type).toBe('n8n-nodes-base.code'); // preserved
});
it('should handle non-existent node gracefully', () => {
const operations: WorkflowOperation[] = [
{
type: 'updateNode',
nodeId: 'non-existent',
updates: { name: 'Should not apply' },
},
];
const result = applyOperations(baseWorkflow, operations);
expect(result.nodes).toEqual(baseWorkflow.nodes);
});
it('should preserve other properties during partial update', () => {
const operations: WorkflowOperation[] = [
{
type: 'updateNode',
nodeId: 'node1',
updates: { disabled: true },
},
];
const result = applyOperations(baseWorkflow, operations);
const node = result.nodes.find((n) => n.id === 'node1');
expect(node?.disabled).toBe(true);
expect(node?.name).toBe('Node 1');
expect(node?.position).toEqual([100, 100]);
});
});
describe('setConnections operation', () => {
it('should replace all connections', () => {
const newConnections: IConnections = {
node3: {
main: [[{ node: 'node1', type: 'main', index: 0 }]],
},
};
const operations: WorkflowOperation[] = [
{ type: 'setConnections', connections: newConnections },
];
const result = applyOperations(baseWorkflow, operations);
expect(result.connections).toEqual(newConnections);
});
it('should handle empty connections object', () => {
const operations: WorkflowOperation[] = [{ type: 'setConnections', connections: {} }];
const result = applyOperations(baseWorkflow, operations);
expect(result.connections).toEqual({});
});
});
describe('mergeConnections operation', () => {
it('should add new connections to existing ones', () => {
const newConnections: IConnections = {
node3: {
main: [[{ node: 'node1', type: 'main', index: 0 }]],
},
};
const operations: WorkflowOperation[] = [
{ type: 'mergeConnections', connections: newConnections },
];
const result = applyOperations(baseWorkflow, operations);
// Original connections preserved
expect(result.connections.node1).toEqual(baseWorkflow.connections.node1);
expect(result.connections.node2).toEqual(baseWorkflow.connections.node2);
// New connection added
expect(result.connections.node3).toEqual(newConnections.node3);
});
it('should avoid duplicate connections', () => {
const duplicateConnections: IConnections = {
node1: {
main: [[{ node: 'node2', type: 'main', index: 0 }]], // Already exists
},
};
const operations: WorkflowOperation[] = [
{ type: 'mergeConnections', connections: duplicateConnections },
];
const result = applyOperations(baseWorkflow, operations);
expect(result.connections.node1.main[0]).toHaveLength(1);
});
it('should merge complex multi-output connections', () => {
// Setup base with multi-output
baseWorkflow.connections.node1.main = [
[{ node: 'node2', type: 'main', index: 0 }],
[{ node: 'node3', type: 'main', index: 0 }],
];
const newConnections: IConnections = {
node1: {
main: [
[{ node: 'node3', type: 'main', index: 1 }], // New connection at output 0
[], // Nothing at output 1
[{ node: 'node2', type: 'main', index: 1 }], // New output 2
],
},
};
const operations: WorkflowOperation[] = [
{ type: 'mergeConnections', connections: newConnections },
];
const result = applyOperations(baseWorkflow, operations);
expect(result.connections.node1.main[0]).toHaveLength(2); // Original + new
expect(result.connections.node1.main[1]).toHaveLength(1); // Original only
expect(result.connections.node1.main[2]).toHaveLength(1); // New only
});
it('should handle arrays with null/undefined gracefully', () => {
baseWorkflow.connections.node1.main = [null, [{ node: 'node2', type: 'main', index: 0 }]];
const newConnections: IConnections = {
node1: {
main: [[{ node: 'node3', type: 'main', index: 0 }]],
},
};
const operations: WorkflowOperation[] = [
{ type: 'mergeConnections', connections: newConnections },
];
const result = applyOperations(baseWorkflow, operations);
expect(result.connections.node1.main[0]).toEqual([
{ node: 'node3', type: 'main', index: 0 },
]);
expect(result.connections.node1.main[1]).toEqual([
{ node: 'node2', type: 'main', index: 0 },
]);
});
});
describe('multiple operations', () => {
it('should apply operations in sequence', () => {
const newNode = createNode({ id: 'node4', name: 'Node 4' });
const operations: WorkflowOperation[] = [
{ type: 'addNodes', nodes: [newNode] },
{ type: 'removeNode', nodeIds: ['node1'] },
{
type: 'updateNode',
nodeId: 'node2',
updates: { name: 'Updated Node 2' },
},
{
type: 'mergeConnections',
connections: {
node4: {
main: [[{ node: 'node2', type: 'main', index: 0 }]],
},
},
},
];
const result = applyOperations(baseWorkflow, operations);
expect(result.nodes).toHaveLength(3);
expect(result.nodes.find((n) => n.id === 'node1')).toBeUndefined();
expect(result.nodes.find((n) => n.id === 'node4')).toBeDefined();
expect(result.nodes.find((n) => n.id === 'node2')?.name).toBe('Updated Node 2');
expect(result.connections.node4).toBeDefined();
});
it('should handle clear followed by other operations', () => {
const newNode = createNode({ id: 'newNode', name: 'New Node' });
const operations: WorkflowOperation[] = [
{ type: 'clear' },
{ type: 'addNodes', nodes: [newNode] },
];
const result = applyOperations(baseWorkflow, operations);
expect(result.nodes).toHaveLength(1);
expect(result.nodes[0].id).toBe('newNode');
expect(result.connections).toEqual({});
});
});
describe('edge cases', () => {
it('should handle empty operations array', () => {
const result = applyOperations(baseWorkflow, []);
expect(result.nodes).toEqual(baseWorkflow.nodes);
expect(result.connections).toEqual(baseWorkflow.connections);
});
it('should handle operations on empty workflow', () => {
const emptyWorkflow = createWorkflow([]);
const operations: WorkflowOperation[] = [
{ type: 'removeNode', nodeIds: ['any'] },
{ type: 'updateNode', nodeId: 'any', updates: { name: 'test' } },
];
const result = applyOperations(emptyWorkflow, operations);
expect(result.nodes).toEqual([]);
expect(result.connections).toEqual({});
});
});
});
describe('processOperations', () => {
const createState = (
workflowJSON: SimpleWorkflow,
workflowOperations: WorkflowOperation[] | null = null,
): typeof WorkflowState.State => ({
workflowJSON,
workflowOperations,
messages: [],
workflowContext: {},
});
it('should process operations and clear them', () => {
const workflow = createWorkflow([createNode()]);
const operations: WorkflowOperation[] = [
{ type: 'addNodes', nodes: [createNode({ id: 'node2' })] },
];
const state = createState(workflow, operations);
const result = processOperations(state);
expect(result.workflowJSON).toBeDefined();
expect(result.workflowJSON?.nodes).toHaveLength(2);
expect(result.workflowOperations).toBeNull();
});
it('should handle null operations array', () => {
const workflow = createWorkflow([createNode()]);
const state = createState(workflow, null);
const result = processOperations(state);
expect(result).toEqual({});
});
it('should handle empty operations array', () => {
const workflow = createWorkflow([createNode()]);
const state = createState(workflow, []);
const result = processOperations(state);
expect(result).toEqual({});
});
it('should apply multiple operations correctly', () => {
const node1 = createNode({ id: 'node1' });
const node2 = createNode({ id: 'node2' });
const workflow = createWorkflow([node1]);
const operations: WorkflowOperation[] = [
{ type: 'addNodes', nodes: [node2] },
{
type: 'setConnections',
connections: {
node1: {
main: [[{ node: 'node2', type: 'main', index: 0 }]],
},
},
},
];
const state = createState(workflow, operations);
const result = processOperations(state);
expect(result.workflowJSON?.nodes).toHaveLength(2);
expect(result.workflowJSON?.connections.node1).toBeDefined();
expect(result.workflowOperations).toBeNull();
});
});
});

View File

@@ -0,0 +1,475 @@
import { AIMessage, HumanMessage, ToolMessage } from '@langchain/core/messages';
import type {
AgentMessageChunk,
ToolProgressChunk,
WorkflowUpdateChunk,
StreamOutput,
} from '../../types/streaming';
import { processStreamChunk, createStreamProcessor, formatMessages } from '../stream-processor';
describe('stream-processor', () => {
describe('processStreamChunk', () => {
describe('updates mode', () => {
it('should process agent messages with text content', () => {
const chunk = {
agent: {
messages: [{ content: 'Hello, this is a test message' }],
},
};
const result = processStreamChunk('updates', chunk);
expect(result).toBeDefined();
expect(result?.messages).toHaveLength(1);
const message = result?.messages[0] as AgentMessageChunk;
expect(message.role).toBe('assistant');
expect(message.type).toBe('message');
expect(message.text).toBe('Hello, this is a test message');
});
it('should process agent messages with array content (multi-part)', () => {
const chunk = {
agent: {
messages: [
{
content: [
{ type: 'text', text: 'Part 1' },
{ type: 'text', text: 'Part 2' },
{ type: 'image', url: 'http://example.com/image.png' },
],
},
],
},
};
const result = processStreamChunk('updates', chunk);
expect(result).toBeDefined();
expect(result?.messages).toHaveLength(1);
const message = result?.messages[0] as AgentMessageChunk;
expect(message.text).toBe('Part 1\nPart 2');
});
it('should handle delete_messages with refresh message', () => {
const chunk = {
delete_messages: {
messages: [{ content: 'Some deleted message' }],
},
};
const result = processStreamChunk('updates', chunk);
expect(result).toBeDefined();
expect(result?.messages).toHaveLength(1);
const message = result?.messages[0] as AgentMessageChunk;
expect(message.text).toBe('Deleted, refresh?');
});
it('should handle compact_messages returning last message', () => {
const chunk = {
compact_messages: {
messages: [
{ content: 'First message' },
{ content: 'Second message' },
{ content: 'Last message to display' },
],
},
};
const result = processStreamChunk('updates', chunk);
expect(result).toBeDefined();
expect(result?.messages).toHaveLength(1);
const message = result?.messages[0] as AgentMessageChunk;
expect(message.text).toBe('Last message to display');
});
it('should handle process_operations with workflow update', () => {
const workflowData = {
nodes: [{ id: 'node1', name: 'Test Node' }],
connections: {},
};
const chunk = {
process_operations: {
workflowJSON: workflowData,
workflowOperations: null, // Cleared after processing
},
};
const result = processStreamChunk('updates', chunk);
expect(result).toBeDefined();
expect(result?.messages).toHaveLength(1);
const message = result?.messages[0] as WorkflowUpdateChunk;
expect(message.role).toBe('assistant');
expect(message.type).toBe('workflow-updated');
expect(message.codeSnippet).toBe(JSON.stringify(workflowData, null, 2));
});
it('should ignore chunks without relevant content', () => {
const chunk = {
agent: {
messages: [{ content: '' }], // Empty content
},
};
const result = processStreamChunk('updates', chunk);
expect(result).toBeNull();
});
it('should ignore process_operations without workflowJSON', () => {
const chunk = {
process_operations: {
workflowOperations: [],
},
};
const result = processStreamChunk('updates', chunk);
expect(result).toBeNull();
});
it('should handle empty messages arrays', () => {
const chunk = {
agent: {
messages: [],
},
};
const result = processStreamChunk('updates', chunk);
expect(result).toBeNull();
});
});
describe('custom mode', () => {
it('should process tool progress chunks', () => {
const toolChunk: ToolProgressChunk = {
id: 'tool-1',
toolCallId: 'call-1',
type: 'tool',
role: 'assistant',
toolName: 'add_nodes',
status: 'running',
updates: [
{
type: 'input',
data: { nodeType: 'n8n-nodes-base.code' },
},
],
};
const result = processStreamChunk('custom', toolChunk);
expect(result).toBeDefined();
expect(result?.messages).toHaveLength(1);
expect(result?.messages[0]).toEqual(toolChunk);
});
it('should ignore non-tool chunks in custom mode', () => {
const chunk = {
type: 'something-else',
data: 'test',
};
const result = processStreamChunk('custom', chunk);
expect(result).toBeNull();
});
});
describe('unknown modes', () => {
it('should return null for unknown stream modes', () => {
const chunk = { data: 'test' };
const result = processStreamChunk('unknown-mode', chunk);
expect(result).toBeNull();
});
});
});
describe('createStreamProcessor', () => {
it('should yield only non-null outputs', async () => {
async function* mockStream(): AsyncGenerator<[string, unknown], void, unknown> {
yield ['updates', { agent: { messages: [{ content: 'Test' }] } }];
yield ['updates', { agent: { messages: [{ content: '' }] } }]; // Will produce null
yield ['updates', { agent: { messages: [{ content: 'Test 2' }] } }];
}
const processor = createStreamProcessor(mockStream());
const results: StreamOutput[] = [];
for await (const output of processor) {
results.push(output);
}
expect(results).toHaveLength(2);
expect((results[0].messages[0] as AgentMessageChunk).text).toBe('Test');
expect((results[1].messages[0] as AgentMessageChunk).text).toBe('Test 2');
});
it('should process multiple chunks in sequence', async () => {
async function* mockStream(): AsyncGenerator<[string, unknown], void, unknown> {
yield ['updates', { agent: { messages: [{ content: 'Message 1' }] } }];
yield ['custom', { type: 'tool', toolName: 'test_tool' } as ToolProgressChunk];
yield ['updates', { delete_messages: { messages: [{ content: 'deleted' }] } }];
}
const processor = createStreamProcessor(mockStream());
const results: StreamOutput[] = [];
for await (const output of processor) {
results.push(output);
}
expect(results).toHaveLength(3);
expect((results[0].messages[0] as AgentMessageChunk).text).toBe('Message 1');
expect((results[1].messages[0] as ToolProgressChunk).toolName).toBe('test_tool');
expect((results[2].messages[0] as AgentMessageChunk).text).toBe('Deleted, refresh?');
});
it('should handle empty stream', async () => {
async function* mockStream(): AsyncGenerator<[string, unknown], void, unknown> {
// Empty generator
}
const processor = createStreamProcessor(mockStream());
const results: StreamOutput[] = [];
for await (const output of processor) {
results.push(output);
}
expect(results).toHaveLength(0);
});
});
describe('formatMessages', () => {
it('should format HumanMessage correctly', () => {
const messages = [new HumanMessage('Hello from user')];
const result = formatMessages(messages);
expect(result).toHaveLength(1);
expect(result[0]).toEqual({
role: 'user',
type: 'message',
text: 'Hello from user',
});
});
it('should format AIMessage with text content', () => {
const messages = [new AIMessage('Response from AI')];
const result = formatMessages(messages);
expect(result).toHaveLength(1);
expect(result[0]).toEqual({
role: 'assistant',
type: 'message',
text: 'Response from AI',
});
});
it('should format AIMessage with tool_calls', () => {
const aiMessage = new AIMessage('');
aiMessage.tool_calls = [
{
id: 'call-1',
name: 'add_nodes',
args: { nodeType: 'n8n-nodes-base.code' },
type: 'tool_call',
},
{
id: 'call-2',
name: 'connect_nodes',
args: { sourceNodeId: 'node1', targetNodeId: 'node2' },
type: 'tool_call',
},
];
const messages = [aiMessage];
const result = formatMessages(messages);
expect(result).toHaveLength(2); // Two tool messages
expect(result[0]).toEqual({
id: 'call-1',
toolCallId: 'call-1',
role: 'assistant',
type: 'tool',
toolName: 'add_nodes',
status: 'completed',
updates: [
{
type: 'input',
data: { nodeType: 'n8n-nodes-base.code' },
},
],
});
expect(result[1]).toEqual({
id: 'call-2',
toolCallId: 'call-2',
role: 'assistant',
type: 'tool',
toolName: 'connect_nodes',
status: 'completed',
updates: [
{
type: 'input',
data: { sourceNodeId: 'node1', targetNodeId: 'node2' },
},
],
});
});
it('should format ToolMessage and match with tool calls', () => {
const aiMessage = new AIMessage('');
aiMessage.tool_calls = [
{
id: 'call-1',
name: 'add_nodes',
args: { nodeType: 'n8n-nodes-base.code' },
type: 'tool_call',
},
];
const toolMessage = new ToolMessage({
content: 'Successfully added node',
tool_call_id: 'call-1',
});
const messages = [aiMessage, toolMessage];
const result = formatMessages(messages);
expect(result).toHaveLength(1);
expect(result[0]).toEqual({
id: 'call-1',
toolCallId: 'call-1',
role: 'assistant',
type: 'tool',
toolName: 'add_nodes',
status: 'completed',
updates: [
{
type: 'input',
data: { nodeType: 'n8n-nodes-base.code' },
},
{
type: 'output',
data: { result: 'Successfully added node' },
},
],
});
});
it('should handle ToolMessage with object content', () => {
const aiMessage = new AIMessage('');
aiMessage.tool_calls = [
{
id: 'call-1',
name: 'get_node_details',
args: { nodeName: 'Code' },
type: 'tool_call',
},
];
const toolMessage = new ToolMessage({
// @ts-expect-error Lnagchain types are not propagated
content: { nodeId: 'node1', nodeType: 'n8n-nodes-base.code' },
tool_call_id: 'call-1',
});
const messages = [aiMessage, toolMessage];
const result = formatMessages(messages);
expect(result[0].updates).toHaveLength(2);
// @ts-expect-error Lnagchain types are not propagated
expect(result[0].updates?.[1]).toEqual({
type: 'output',
data: { nodeId: 'node1', nodeType: 'n8n-nodes-base.code' },
});
});
it('should handle mixed message types in sequence', () => {
const aiMessage1 = new AIMessage('I will help you');
const humanMessage = new HumanMessage('Please add a node');
const aiMessage2 = new AIMessage('');
aiMessage2.tool_calls = [
{
id: 'call-1',
name: 'add_nodes',
args: { nodeType: 'n8n-nodes-base.code' },
type: 'tool_call',
},
];
const toolMessage = new ToolMessage({
content: 'Node added',
tool_call_id: 'call-1',
});
const messages = [aiMessage1, humanMessage, aiMessage2, toolMessage];
const result = formatMessages(messages);
expect(result).toHaveLength(3);
expect(result[0].type).toBe('message');
expect(result[0].role).toBe('assistant');
expect(result[1].type).toBe('message');
expect(result[1].role).toBe('user');
expect(result[2].type).toBe('tool');
expect(result[2].updates).toHaveLength(2); // input and output
});
it('should handle AIMessage with both content and tool_calls', () => {
const aiMessage = new AIMessage('I will add a node for you');
aiMessage.tool_calls = [
{
id: 'call-1',
name: 'add_nodes',
args: { nodeType: 'n8n-nodes-base.code' },
type: 'tool_call',
},
];
const messages = [aiMessage];
const result = formatMessages(messages);
expect(result).toHaveLength(2);
expect(result[0]).toEqual({
role: 'assistant',
type: 'message',
text: 'I will add a node for you',
});
expect(result[1].type).toBe('tool');
});
it('should handle tool calls without args', () => {
const aiMessage = new AIMessage('');
aiMessage.tool_calls = [
{
id: 'call-1',
name: 'clear_workflow',
args: {},
type: 'tool_call',
},
];
const messages = [aiMessage];
const result = formatMessages(messages);
// @ts-expect-error Lnagchain types are not propagated
expect(result[0].updates?.[0]).toEqual({
type: 'input',
data: {},
});
});
});
});

View File

@@ -0,0 +1,707 @@
import type { BaseMessage } from '@langchain/core/messages';
import { AIMessage, HumanMessage, ToolMessage } from '@langchain/core/messages';
import type { DynamicStructuredTool } from '@langchain/core/tools';
import { ToolInputParsingException } from '@langchain/core/tools';
import type { Command as CommandType } from '@langchain/langgraph';
import { createWorkflow, createNode } from '../../../test/test-utils';
import type { ToolExecutorOptions } from '../../types/config';
import type { WorkflowOperation } from '../../types/workflow';
import type { WorkflowState } from '../../workflow-state';
import { executeToolsInParallel } from '../tool-executor';
// Type for our mocked Command
type MockedCommand = CommandType & { _isCommand: boolean };
// Mock LangGraph dependencies
jest.mock('@langchain/langgraph', () => {
// Mock Command class
class MockCommand {
_isCommand = true;
update: unknown;
constructor(params: { update: unknown }) {
this.update = params.update;
}
}
return {
isCommand: jest.fn((obj: unknown) => {
return (
obj instanceof MockCommand || (obj && (obj as { _isCommand?: boolean })._isCommand === true)
);
}),
Command: MockCommand,
};
});
// Get properly typed Command from mock
const MockCommand = jest.requireMock<{
Command: new (params: { update: unknown }) => MockedCommand;
}>('@langchain/langgraph').Command;
describe('tool-executor', () => {
describe('executeToolsInParallel', () => {
// Helper to create mock state
const createState = (messages: BaseMessage[]): typeof WorkflowState.State => ({
workflowJSON: createWorkflow([]),
workflowOperations: null,
messages,
workflowContext: {},
});
// Helper to create mock tool
const createMockTool = (result: unknown) =>
({
invoke: jest.fn().mockResolvedValue(result),
name: 'mock-tool',
description: 'Mock tool',
schema: {},
func: jest.fn(),
}) as unknown as DynamicStructuredTool;
beforeEach(() => {
jest.clearAllMocks();
});
it('should execute single tool successfully', async () => {
const toolMessage = new ToolMessage({
content: 'Tool executed successfully',
tool_call_id: 'call-1',
});
const mockTool = createMockTool(toolMessage);
const aiMessage = new AIMessage('');
aiMessage.tool_calls = [
{
id: 'call-1',
name: 'test_tool',
args: { param: 'value' },
type: 'tool_call',
},
];
const state = createState([new HumanMessage('Test'), aiMessage]);
const toolMap = new Map<string, DynamicStructuredTool>([['test_tool', mockTool]]);
const options: ToolExecutorOptions = { state, toolMap };
const result = await executeToolsInParallel(options);
expect(mockTool.invoke).toHaveBeenCalledWith(
{ param: 'value' },
{
toolCall: {
id: 'call-1',
name: 'test_tool',
args: { param: 'value' },
},
},
);
expect(result.messages).toHaveLength(1);
expect(result.messages?.[0]).toBe(toolMessage);
expect(result.workflowOperations).toBeUndefined();
});
it('should execute multiple tools in parallel', async () => {
const toolMessage1 = new ToolMessage({
content: 'Tool 1 result',
tool_call_id: 'call-1',
});
const toolMessage2 = new ToolMessage({
content: 'Tool 2 result',
tool_call_id: 'call-2',
});
const mockTool1 = createMockTool(toolMessage1);
const mockTool2 = createMockTool(toolMessage2);
const aiMessage = new AIMessage('');
aiMessage.tool_calls = [
{
id: 'call-1',
name: 'tool1',
args: { param: 'value1' },
type: 'tool_call',
},
{
id: 'call-2',
name: 'tool2',
args: { param: 'value2' },
type: 'tool_call',
},
];
const state = createState([aiMessage]);
const toolMap = new Map<string, DynamicStructuredTool>([
['tool1', mockTool1],
['tool2', mockTool2],
]);
const options: ToolExecutorOptions = { state, toolMap };
const result = await executeToolsInParallel(options);
expect(mockTool1.invoke).toHaveBeenCalled();
expect(mockTool2.invoke).toHaveBeenCalled();
expect(result.messages).toHaveLength(2);
expect(result.messages).toContain(toolMessage1);
expect(result.messages).toContain(toolMessage2);
});
it('should handle tool returning Command with state updates', async () => {
const operations: WorkflowOperation[] = [
{ type: 'addNodes', nodes: [createNode({ id: 'node1' })] },
];
const stateUpdate: Partial<typeof WorkflowState.State> = {
workflowOperations: operations,
messages: [new ToolMessage({ content: 'Added node', tool_call_id: 'call-1' })],
};
const command = new MockCommand({ update: stateUpdate });
const mockTool = createMockTool(command);
const aiMessage = new AIMessage('');
aiMessage.tool_calls = [
{
id: 'call-1',
name: 'add_nodes',
args: { nodeType: 'n8n-nodes-base.code' },
type: 'tool_call',
},
];
const state = createState([aiMessage]);
const toolMap = new Map<string, DynamicStructuredTool>([['add_nodes', mockTool]]);
const options: ToolExecutorOptions = { state, toolMap };
const result = await executeToolsInParallel(options);
expect(result.messages).toHaveLength(1);
expect(result.workflowOperations).toEqual(operations);
});
it('should handle tool returning regular messages', async () => {
const toolMessage = new ToolMessage({
content: 'Regular message',
tool_call_id: 'call-1',
});
const mockTool = createMockTool(toolMessage);
const aiMessage = new AIMessage('');
aiMessage.tool_calls = [
{
id: 'call-1',
name: 'test_tool',
args: {},
type: 'tool_call',
},
];
const state = createState([aiMessage]);
const toolMap = new Map<string, DynamicStructuredTool>([['test_tool', mockTool]]);
const options: ToolExecutorOptions = { state, toolMap };
const result = await executeToolsInParallel(options);
expect(result.messages).toEqual([toolMessage]);
expect(result.workflowOperations).toBeUndefined();
});
it('should collect all workflow operations from multiple tools', async () => {
const operations1: WorkflowOperation[] = [
{ type: 'addNodes', nodes: [createNode({ id: 'node1' })] },
];
const operations2: WorkflowOperation[] = [{ type: 'setConnections', connections: {} }];
const command1 = new MockCommand({
update: {
workflowOperations: operations1,
messages: [],
},
});
const command2 = new MockCommand({
update: {
workflowOperations: operations2,
messages: [],
},
});
const mockTool1 = createMockTool(command1);
const mockTool2 = createMockTool(command2);
const aiMessage = new AIMessage('');
aiMessage.tool_calls = [
{
id: 'call-1',
name: 'tool1',
args: {},
type: 'tool_call',
},
{
id: 'call-2',
name: 'tool2',
args: {},
type: 'tool_call',
},
];
const state = createState([aiMessage]);
const toolMap = new Map<string, DynamicStructuredTool>([
['tool1', mockTool1],
['tool2', mockTool2],
]);
const options: ToolExecutorOptions = { state, toolMap };
const result = await executeToolsInParallel(options);
expect(result.workflowOperations).toHaveLength(2);
expect(result.workflowOperations).toEqual([...operations1, ...operations2]);
});
it('should merge messages from both direct returns and state updates', async () => {
const directMessage = new ToolMessage({
content: 'Direct message',
tool_call_id: 'call-1',
});
const stateMessage = new ToolMessage({
content: 'State message',
tool_call_id: 'call-2',
});
const command = new MockCommand({
update: {
messages: [stateMessage],
workflowOperations: [],
},
});
const mockTool1 = createMockTool(directMessage);
const mockTool2 = createMockTool(command);
const aiMessage = new AIMessage('');
aiMessage.tool_calls = [
{
id: 'call-1',
name: 'tool1',
args: {},
type: 'tool_call',
},
{
id: 'call-2',
name: 'tool2',
args: {},
type: 'tool_call',
},
];
const state = createState([aiMessage]);
const toolMap = new Map<string, DynamicStructuredTool>([
['tool1', mockTool1],
['tool2', mockTool2],
]);
const options: ToolExecutorOptions = { state, toolMap };
const result = await executeToolsInParallel(options);
expect(result.messages).toHaveLength(2);
expect(result.messages).toContain(directMessage);
expect(result.messages).toContain(stateMessage);
});
describe('error handling', () => {
it('should throw when last message is not AIMessage', async () => {
const state = createState([new HumanMessage('Test')]);
const toolMap = new Map<string, DynamicStructuredTool>();
const options: ToolExecutorOptions = { state, toolMap };
await expect(executeToolsInParallel(options)).rejects.toThrow(
'Most recent message must be an AIMessage with tool calls',
);
});
it('should throw when AIMessage has no tool_calls', async () => {
const aiMessage = new AIMessage('No tool calls');
const state = createState([aiMessage]);
const toolMap = new Map<string, DynamicStructuredTool>();
const options: ToolExecutorOptions = { state, toolMap };
await expect(executeToolsInParallel(options)).rejects.toThrow(
'AIMessage must have tool calls',
);
});
it('should return error message when tool is not found in toolMap', async () => {
const aiMessage = new AIMessage('');
aiMessage.tool_calls = [
{
id: 'call-1',
name: 'non_existent_tool',
args: {},
type: 'tool_call',
},
];
const state = createState([aiMessage]);
const toolMap = new Map<string, DynamicStructuredTool>(); // Empty map
const options: ToolExecutorOptions = { state, toolMap };
const result = await executeToolsInParallel(options);
expect(result.messages).toHaveLength(1);
const message = result.messages![0] as ToolMessage;
expect(message).toBeInstanceOf(ToolMessage);
expect(message.content).toBe(
'Tool non_existent_tool failed: Tool non_existent_tool not found',
);
expect(message.tool_call_id).toBe('call-1');
expect(message.additional_kwargs.error).toBe(true);
});
it('should wrap schema validation errors as ValidationError', async () => {
const mockTool = createMockTool(null);
// Mock tool throwing a ToolInputParsingException
mockTool.invoke = jest
.fn()
.mockRejectedValue(
new ToolInputParsingException('Received tool input did not match expected schema'),
);
const aiMessage = new AIMessage('');
aiMessage.tool_calls = [
{
id: 'call-1',
name: 'test_tool',
args: { invalidParam: 'value' },
type: 'tool_call',
},
];
const state = createState([aiMessage]);
const toolMap = new Map<string, DynamicStructuredTool>([['test_tool', mockTool]]);
const options: ToolExecutorOptions = { state, toolMap };
const result = await executeToolsInParallel(options);
expect(result.messages).toHaveLength(1);
const message = result.messages![0] as ToolMessage;
expect(message).toBeInstanceOf(ToolMessage);
expect(message.content).toBe(
'Invalid input for tool test_tool: Received tool input did not match expected schema',
);
expect(message.tool_call_id).toBe('call-1');
expect(message.additional_kwargs.error).toBe(true);
});
it('should wrap schema validation errors with "expected schema" message as ValidationError', async () => {
const mockTool = createMockTool(null);
// Mock tool throwing a regular Error with schema validation message
mockTool.invoke = jest
.fn()
.mockRejectedValue(new Error('Tool input validation failed: expected schema'));
const aiMessage = new AIMessage('');
aiMessage.tool_calls = [
{
id: 'call-1',
name: 'update_params',
args: { wrongField: 123 },
type: 'tool_call',
},
];
const state = createState([aiMessage]);
const toolMap = new Map<string, DynamicStructuredTool>([['update_params', mockTool]]);
const options: ToolExecutorOptions = { state, toolMap };
const result = await executeToolsInParallel(options);
expect(result.messages).toHaveLength(1);
const message = result.messages![0] as ToolMessage;
expect(message).toBeInstanceOf(ToolMessage);
expect(message.content).toBe(
'Invalid input for tool update_params: Tool input validation failed: expected schema',
);
expect(message.tool_call_id).toBe('call-1');
expect(message.additional_kwargs.error).toBe(true);
});
it('should wrap other tool errors as ToolExecutionError', async () => {
const mockTool = createMockTool(null);
// Mock tool throwing a generic error
mockTool.invoke = jest.fn().mockRejectedValue(new Error('Connection timeout'));
const aiMessage = new AIMessage('');
aiMessage.tool_calls = [
{
id: 'call-1',
name: 'http_request',
args: { url: 'https://example.com' },
type: 'tool_call',
},
];
const state = createState([aiMessage]);
const toolMap = new Map<string, DynamicStructuredTool>([['http_request', mockTool]]);
const options: ToolExecutorOptions = { state, toolMap };
const result = await executeToolsInParallel(options);
expect(result.messages).toHaveLength(1);
const message = result.messages![0] as ToolMessage;
expect(message).toBeInstanceOf(ToolMessage);
expect(message.content).toBe('Tool http_request failed: Connection timeout');
expect(message.tool_call_id).toBe('call-1');
expect(message.additional_kwargs.error).toBe(true);
});
it('should handle non-Error objects thrown by tools', async () => {
const mockTool = createMockTool(null);
// Mock tool throwing a non-Error object
mockTool.invoke = jest.fn().mockRejectedValue('String error');
const aiMessage = new AIMessage('');
aiMessage.tool_calls = [
{
id: 'call-1',
name: 'test_tool',
args: {},
type: 'tool_call',
},
];
const state = createState([aiMessage]);
const toolMap = new Map<string, DynamicStructuredTool>([['test_tool', mockTool]]);
const options: ToolExecutorOptions = { state, toolMap };
const result = await executeToolsInParallel(options);
expect(result.messages).toHaveLength(1);
const message = result.messages![0] as ToolMessage;
expect(message).toBeInstanceOf(ToolMessage);
expect(message.content).toBe('Tool test_tool failed: Unknown error occurred');
expect(message.tool_call_id).toBe('call-1');
expect(message.additional_kwargs.error).toBe(true);
});
it('should handle multiple tools with mixed success and failure', async () => {
const successMessage = new ToolMessage({
content: 'Success',
tool_call_id: 'call-1',
});
const mockSuccessTool = createMockTool(successMessage);
const mockFailureTool = createMockTool(null);
mockFailureTool.invoke = jest
.fn()
.mockRejectedValue(
new ToolInputParsingException('Received tool input did not match expected schema'),
);
const aiMessage = new AIMessage('');
aiMessage.tool_calls = [
{
id: 'call-1',
name: 'success_tool',
args: { valid: true },
type: 'tool_call',
},
{
id: 'call-2',
name: 'failure_tool',
args: { invalid: true },
type: 'tool_call',
},
];
const state = createState([aiMessage]);
const toolMap = new Map<string, DynamicStructuredTool>([
['success_tool', mockSuccessTool],
['failure_tool', mockFailureTool],
]);
const options: ToolExecutorOptions = { state, toolMap };
// Both tools run in parallel, one succeeds and one returns error message
const result = await executeToolsInParallel(options);
expect(result.messages).toHaveLength(2);
// First message should be the success
const successMsg = result.messages![0] as ToolMessage;
expect(successMsg).toBeInstanceOf(ToolMessage);
expect(successMsg.content).toBe('Success');
expect(successMsg.tool_call_id).toBe('call-1');
// Second message should be the error
const errorMsg = result.messages![1] as ToolMessage;
expect(errorMsg).toBeInstanceOf(ToolMessage);
expect(errorMsg.content).toBe(
'Invalid input for tool failure_tool: Received tool input did not match expected schema',
);
expect(errorMsg.tool_call_id).toBe('call-2');
expect(errorMsg.additional_kwargs.error).toBe(true);
});
});
it('should handle tools with no operations (only messages)', async () => {
const message = new ToolMessage({
content: 'Message only',
tool_call_id: 'call-1',
});
const command = new MockCommand({
update: {
messages: [message],
// No workflowOperations
},
});
const mockTool = createMockTool(command);
const aiMessage = new AIMessage('');
aiMessage.tool_calls = [
{
id: 'call-1',
name: 'test_tool',
args: {},
type: 'tool_call',
},
];
const state = createState([aiMessage]);
const toolMap = new Map<string, DynamicStructuredTool>([['test_tool', mockTool]]);
const options: ToolExecutorOptions = { state, toolMap };
const result = await executeToolsInParallel(options);
expect(result.messages).toEqual([message]);
expect(result.workflowOperations).toBeUndefined();
});
it('should handle tools with no messages (only operations)', async () => {
const operations: WorkflowOperation[] = [{ type: 'clear' }];
const command = new MockCommand({
update: {
workflowOperations: operations,
// No messages
},
});
const mockTool = createMockTool(command);
const aiMessage = new AIMessage('');
aiMessage.tool_calls = [
{
id: 'call-1',
name: 'clear_tool',
args: {},
type: 'tool_call',
},
];
const state = createState([aiMessage]);
const toolMap = new Map<string, DynamicStructuredTool>([['clear_tool', mockTool]]);
const options: ToolExecutorOptions = { state, toolMap };
const result = await executeToolsInParallel(options);
expect(result.messages).toEqual([]);
expect(result.workflowOperations).toEqual(operations);
});
it('should handle empty tool_calls array', async () => {
const aiMessage = new AIMessage('');
aiMessage.tool_calls = []; // Empty array
const state = createState([aiMessage]);
const toolMap = new Map<string, DynamicStructuredTool>();
const options: ToolExecutorOptions = { state, toolMap };
await expect(executeToolsInParallel(options)).rejects.toThrow(
'AIMessage must have tool calls',
);
});
it('should handle tool calls without args', async () => {
const toolMessage = new ToolMessage({
content: 'Success',
tool_call_id: 'call-1',
});
const mockTool = createMockTool(toolMessage);
const aiMessage = new AIMessage('');
aiMessage.tool_calls = [
{
id: 'call-1',
name: 'test_tool',
args: {}, // No args
type: 'tool_call',
},
];
const state = createState([aiMessage]);
const toolMap = new Map<string, DynamicStructuredTool>([['test_tool', mockTool]]);
const options: ToolExecutorOptions = { state, toolMap };
const result = await executeToolsInParallel(options);
expect(mockTool.invoke).toHaveBeenCalledWith(
{}, // Empty object when args is undefined
{
toolCall: {
id: 'call-1',
name: 'test_tool',
args: {},
},
},
);
expect(result.messages).toEqual([toolMessage]);
});
it('should handle multiple state updates with various message types', async () => {
const aiResultMessage = new AIMessage('Result from tool');
const toolResultMessage = new ToolMessage({
content: 'Tool result',
tool_call_id: 'call-1',
});
const command = new MockCommand({
update: {
messages: [aiResultMessage, toolResultMessage],
workflowOperations: [{ type: 'clear' }],
},
});
const mockTool = createMockTool(command);
const aiMessage = new AIMessage('');
aiMessage.tool_calls = [
{
id: 'call-1',
name: 'test_tool',
args: {},
type: 'tool_call',
},
];
const state = createState([aiMessage]);
const toolMap = new Map<string, DynamicStructuredTool>([['test_tool', mockTool]]);
const options: ToolExecutorOptions = { state, toolMap };
const result = await executeToolsInParallel(options);
expect(result.messages).toHaveLength(2);
expect(result.messages).toContain(aiResultMessage);
expect(result.messages).toContain(toolResultMessage);
expect(result.workflowOperations).toHaveLength(1);
});
});
});

View File

@@ -0,0 +1,141 @@
import type { BaseMessage } from '@langchain/core/messages';
import { isAIMessage, ToolMessage } from '@langchain/core/messages';
import { ToolInputParsingException } from '@langchain/core/tools';
import { isCommand } from '@langchain/langgraph';
import { ToolExecutionError, WorkflowStateError } from '../errors';
import type { ToolExecutorOptions } from '../types/config';
import type { WorkflowOperation } from '../types/workflow';
import type { WorkflowState } from '../workflow-state';
/**
* PARALLEL TOOL EXECUTION
*
* This executor handles running multiple tools in parallel and collecting their results.
* All workflow modifications are done through operations that are processed by the
* operations processor node.
*
* This executor:
* 1. Executes all tools in parallel
* 2. Collects their operations and messages
* 3. Returns a single update with all operations to be processed
*/
/**
* Execute multiple tools in parallel and collect their state updates
*
* Tools return operations that will be processed by the operations processor node.
* This function executes tools and collects all their operations and messages.
*
* @param options - Contains the current state and tool map
* @returns Combined state updates from all tool executions
*/
export async function executeToolsInParallel(
options: ToolExecutorOptions,
): Promise<Partial<typeof WorkflowState.State>> {
const { state, toolMap } = options;
const lastMessage = state.messages.at(-1);
if (!lastMessage || !isAIMessage(lastMessage)) {
const error = new WorkflowStateError(
'Most recent message must be an AIMessage with tool calls',
);
throw error;
}
const aiMessage = lastMessage;
if (!aiMessage.tool_calls?.length) {
const error = new WorkflowStateError('AIMessage must have tool calls');
throw error;
}
// Execute all tools in parallel
const toolResults = await Promise.all(
aiMessage.tool_calls.map(async (toolCall) => {
try {
const tool = toolMap.get(toolCall.name);
if (!tool) {
throw new ToolExecutionError(`Tool ${toolCall.name} not found`, {
toolName: toolCall.name,
});
}
const result: unknown = await tool.invoke(toolCall.args ?? {}, {
toolCall: {
id: toolCall.id,
name: toolCall.name,
args: toolCall.args ?? {},
},
});
return result;
} catch (error) {
// Handle tool invocation errors by returning a ToolMessage with error
// This ensures the conversation history remains valid (every tool_use has a tool_result)
const errorMessage = error instanceof Error ? error.message : 'Unknown error occurred';
// Create error message content
let errorContent: string;
if (
error instanceof ToolInputParsingException ||
errorMessage.includes('expected schema')
) {
errorContent = `Invalid input for tool ${toolCall.name}: ${errorMessage}`;
} else {
errorContent = `Tool ${toolCall.name} failed: ${errorMessage}`;
}
// Return a ToolMessage with the error to maintain conversation continuity
return new ToolMessage({
content: errorContent,
tool_call_id: toolCall.id ?? '',
// Include error flag so tools can handle errors appropriately
additional_kwargs: { error: true },
});
}
}),
);
// Collect all messages and state updates
const allMessages: BaseMessage[] = [];
const stateUpdates: Array<Partial<typeof WorkflowState.State>> = [];
toolResults.forEach((result) => {
if (isCommand(result)) {
// Tool returned a Command with state updates
const update = result.update as Partial<typeof WorkflowState.State>;
if (update) {
stateUpdates.push(update);
}
} else {
// Tool returned a regular message
allMessages.push(result as BaseMessage);
}
});
// Collect all messages from state updates
stateUpdates.forEach((update) => {
if (update.messages && Array.isArray(update.messages)) {
allMessages.push(...update.messages);
}
});
// Collect all workflow operations
const allOperations: WorkflowOperation[] = [];
for (const update of stateUpdates) {
if (update.workflowOperations && Array.isArray(update.workflowOperations)) {
allOperations.push(...update.workflowOperations);
}
}
// Return the combined update
const finalUpdate: Partial<typeof WorkflowState.State> = {
messages: allMessages,
};
if (allOperations.length > 0) {
finalUpdate.workflowOperations = allOperations;
}
return finalUpdate;
}

View File

@@ -0,0 +1,289 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { AIMessage, ToolMessage } from '@langchain/core/messages';
import { HumanMessage, RemoveMessage } from '@langchain/core/messages';
import type { LangChainTracer } from '@langchain/core/tracers/tracer_langchain';
import { StateGraph, MemorySaver, END } from '@langchain/langgraph';
import type { Logger } from '@n8n/backend-common';
import type {
INodeTypeDescription,
IRunExecutionData,
IWorkflowBase,
NodeExecutionSchema,
} from 'n8n-workflow';
import { conversationCompactChain } from './chains/conversation-compact';
import { LLMServiceError } from './errors';
import { createAddNodeTool } from './tools/add-node.tool';
import { createConnectNodesTool } from './tools/connect-nodes.tool';
import { createNodeDetailsTool } from './tools/node-details.tool';
import { createNodeSearchTool } from './tools/node-search.tool';
import { mainAgentPrompt } from './tools/prompts/main-agent.prompt';
import { createRemoveNodeTool } from './tools/remove-node.tool';
import { createUpdateNodeParametersTool } from './tools/update-node-parameters.tool';
import type { SimpleWorkflow } from './types/workflow';
import { processOperations } from './utils/operations-processor';
import { createStreamProcessor, formatMessages } from './utils/stream-processor';
import { executeToolsInParallel } from './utils/tool-executor';
import { WorkflowState } from './workflow-state';
export interface WorkflowBuilderAgentConfig {
parsedNodeTypes: INodeTypeDescription[];
llmSimpleTask: BaseChatModel;
llmComplexTask: BaseChatModel;
logger?: Logger;
checkpointer?: MemorySaver;
tracer?: LangChainTracer;
}
export interface ChatPayload {
message: string;
workflowContext?: {
executionSchema?: NodeExecutionSchema[];
currentWorkflow?: Partial<IWorkflowBase>;
executionData?: IRunExecutionData['resultData'];
};
}
export class WorkflowBuilderAgent {
private checkpointer: MemorySaver;
private parsedNodeTypes: INodeTypeDescription[];
private llmSimpleTask: BaseChatModel;
private llmComplexTask: BaseChatModel;
private logger?: Logger;
private tracer?: LangChainTracer;
constructor(config: WorkflowBuilderAgentConfig) {
this.parsedNodeTypes = config.parsedNodeTypes;
this.llmSimpleTask = config.llmSimpleTask;
this.llmComplexTask = config.llmComplexTask;
this.logger = config.logger;
this.checkpointer = config.checkpointer ?? new MemorySaver();
this.tracer = config.tracer;
}
private createWorkflow() {
const tools = [
createNodeSearchTool(this.parsedNodeTypes),
createNodeDetailsTool(this.parsedNodeTypes),
createAddNodeTool(this.parsedNodeTypes),
createConnectNodesTool(this.parsedNodeTypes, this.logger),
createRemoveNodeTool(this.logger),
createUpdateNodeParametersTool(this.parsedNodeTypes, this.llmComplexTask, this.logger),
];
// Create a map for quick tool lookup
const toolMap = new Map(tools.map((tool) => [tool.name, tool]));
const callModel = async (state: typeof WorkflowState.State) => {
if (!this.llmSimpleTask) {
throw new LLMServiceError('LLM not setup');
}
if (typeof this.llmSimpleTask.bindTools !== 'function') {
throw new LLMServiceError('LLM does not support tools', {
llmModel: this.llmSimpleTask._llmType(),
});
}
const prompt = await mainAgentPrompt.invoke({
...state,
executionData: state.workflowContext?.executionData ?? {},
executionSchema: state.workflowContext?.executionSchema ?? [],
});
const response = await this.llmSimpleTask.bindTools(tools).invoke(prompt);
return { messages: [response] };
};
const shouldModifyState = ({ messages }: typeof WorkflowState.State) => {
const lastMessage = messages[messages.length - 1] as HumanMessage;
if (lastMessage.content === '/compact') {
return 'compact_messages';
}
if (lastMessage.content === '/clear') {
return 'delete_messages';
}
return 'agent';
};
const shouldContinue = ({ messages }: typeof WorkflowState.State) => {
const lastMessage = messages[messages.length - 1] as AIMessage;
if (lastMessage.tool_calls?.length) {
return 'tools';
}
return END;
};
const customToolExecutor = async (state: typeof WorkflowState.State) => {
return await executeToolsInParallel({ state, toolMap });
};
function deleteMessages(state: typeof WorkflowState.State) {
const messages = state.messages;
const stateUpdate: Partial<typeof WorkflowState.State> = {
workflowOperations: null,
workflowContext: {},
messages: messages.map((m) => new RemoveMessage({ id: m.id! })) ?? [],
workflowJSON: {
nodes: [],
connections: {},
},
};
return stateUpdate;
}
const compactSession = async (state: typeof WorkflowState.State) => {
if (!this.llmSimpleTask) {
throw new LLMServiceError('LLM not setup');
}
const messages = state.messages;
const compactedMessages = await conversationCompactChain(this.llmSimpleTask, messages);
return {
messages: [
...messages.map((m) => new RemoveMessage({ id: m.id! })),
...compactedMessages.newMessages,
],
};
};
const workflow = new StateGraph(WorkflowState)
.addNode('agent', callModel)
.addNode('tools', customToolExecutor)
.addNode('process_operations', processOperations)
.addNode('delete_messages', deleteMessages)
.addNode('compact_messages', compactSession)
.addConditionalEdges('__start__', shouldModifyState)
.addEdge('tools', 'process_operations')
.addEdge('process_operations', 'agent')
.addEdge('delete_messages', END)
.addEdge('compact_messages', END)
.addConditionalEdges('agent', shouldContinue);
return workflow;
}
async getState(workflowId: string, userId?: string) {
const workflow = this.createWorkflow();
const agent = workflow.compile({ checkpointer: this.checkpointer });
return await agent.getState({
configurable: { thread_id: `workflow-${workflowId}-user-${userId ?? new Date().getTime()}` },
});
}
static generateThreadId(workflowId?: string, userId?: string) {
return workflowId
? `workflow-${workflowId}-user-${userId ?? new Date().getTime()}`
: crypto.randomUUID();
}
async *chat(payload: ChatPayload, userId?: string) {
const agent = this.createWorkflow().compile({ checkpointer: this.checkpointer });
const workflowId = payload.workflowContext?.currentWorkflow?.id;
// Generate thread ID from workflowId and userId
// This ensures one session per workflow per user
const threadId = WorkflowBuilderAgent.generateThreadId(workflowId, userId);
// Configure thread for checkpointing
const threadConfig = {
configurable: {
thread_id: threadId,
},
};
// Check if this is a subsequent message
// If so, update the workflowJSON with the current editor state
const existingCheckpoint = await this.checkpointer.getTuple(threadConfig);
let stream;
if (!existingCheckpoint?.checkpoint) {
// First message - use initial state
const initialState: typeof WorkflowState.State = {
messages: [new HumanMessage({ content: payload.message })],
workflowJSON: (payload.workflowContext?.currentWorkflow as SimpleWorkflow) ?? {
nodes: [],
connections: {},
},
workflowOperations: [],
workflowContext: payload.workflowContext,
};
stream = await agent.stream(initialState, {
...threadConfig,
streamMode: ['updates', 'custom'],
recursionLimit: 30,
callbacks: this.tracer ? [this.tracer] : undefined,
});
} else {
// Subsequent message - update the state with current workflow
const stateUpdate: Partial<typeof WorkflowState.State> = {
messages: [new HumanMessage({ content: payload.message })],
workflowOperations: [], // Clear any pending operations from previous message
workflowContext: payload.workflowContext,
workflowJSON: { nodes: [], connections: {} }, // Default to empty workflow
};
if (payload.workflowContext?.currentWorkflow) {
stateUpdate.workflowJSON = payload.workflowContext?.currentWorkflow as SimpleWorkflow;
}
// Stream with just the new message
stream = await agent.stream(stateUpdate, {
...threadConfig,
streamMode: ['updates', 'custom'],
recursionLimit: 80,
callbacks: this.tracer ? [this.tracer] : undefined,
});
}
// Use the stream processor utility to handle chunk processing
const streamProcessor = createStreamProcessor(stream);
for await (const output of streamProcessor) {
yield output;
}
}
async getSessions(workflowId: string | undefined, userId?: string) {
// For now, we'll return the current session if we have a workflowId
// MemorySaver doesn't expose a way to list all threads, so we'll need to
// track this differently if we want to list all sessions
const sessions = [];
if (workflowId) {
const threadId = WorkflowBuilderAgent.generateThreadId(workflowId, userId);
const threadConfig = {
configurable: {
thread_id: threadId,
},
};
try {
// Try to get the checkpoint for this thread
const checkpoint = await this.checkpointer.getTuple(threadConfig);
if (checkpoint?.checkpoint) {
const messages =
(checkpoint.checkpoint.channel_values?.messages as Array<
AIMessage | HumanMessage | ToolMessage
>) ?? [];
sessions.push({
sessionId: threadId,
messages: formatMessages(messages),
lastUpdated: checkpoint.checkpoint.ts,
});
}
} catch (error) {
// Thread doesn't exist yet
this.logger?.debug('No session found for workflow:', { workflowId, error });
}
}
return { sessions };
}
}

View File

@@ -0,0 +1,57 @@
import type { BaseMessage } from '@langchain/core/messages';
import { Annotation, messagesStateReducer } from '@langchain/langgraph';
import type { SimpleWorkflow, WorkflowOperation } from './types/workflow';
import type { ChatPayload } from './workflow-builder-agent';
/**
* Reducer for collecting workflow operations from parallel tool executions.
* This reducer intelligently merges operations, avoiding duplicates and handling special cases.
*/
function operationsReducer(
current: WorkflowOperation[] | null,
update: WorkflowOperation[] | null | undefined,
): WorkflowOperation[] {
if (update === null) {
return [];
}
if (!update || update.length === 0) {
return current ?? [];
}
// For clear operations, we can reset everything
if (update.some((op) => op.type === 'clear')) {
return update.filter((op) => op.type === 'clear').slice(-1); // Keep only the last clear
}
if (!current && !update) {
return [];
}
// Otherwise, append new operations
return [...(current ?? []), ...update];
}
export const WorkflowState = Annotation.Root({
messages: Annotation<BaseMessage[]>({
reducer: messagesStateReducer,
default: () => [],
}),
// // The original prompt from the user.
// The JSON representation of the workflow being built.
// Now a simple field without custom reducer - all updates go through operations
workflowJSON: Annotation<SimpleWorkflow>({
reducer: (x, y) => y ?? x,
default: () => ({ nodes: [], connections: {} }),
}),
// Operations to apply to the workflow - processed by a separate node
workflowOperations: Annotation<WorkflowOperation[] | null>({
reducer: operationsReducer,
default: () => [],
}),
// Whether the user prompt is a workflow prompt.
// Latest workflow context
workflowContext: Annotation<ChatPayload['workflowContext'] | undefined>({
reducer: (x, y) => y ?? x,
}),
});

View File

@@ -0,0 +1,599 @@
import type { ToolRunnableConfig } from '@langchain/core/tools';
import type { LangGraphRunnableConfig } from '@langchain/langgraph';
import { getCurrentTaskInput } from '@langchain/langgraph';
import type { MockProxy } from 'jest-mock-extended';
import { mock } from 'jest-mock-extended';
import type {
INode,
INodeTypeDescription,
INodeParameters,
IConnection,
NodeConnectionType,
} from 'n8n-workflow';
import { jsonParse } from 'n8n-workflow';
import type { ProgressReporter, ToolProgressMessage } from '../src/types/tools';
import type { SimpleWorkflow } from '../src/types/workflow';
export const mockProgress = (): MockProxy<ProgressReporter> => mock<ProgressReporter>();
// Mock state helpers
export const mockStateHelpers = () => ({
getNodes: jest.fn(() => [] as INode[]),
getConnections: jest.fn(() => ({}) as SimpleWorkflow['connections']),
updateNode: jest.fn((_id: string, _updates: Partial<INode>) => undefined),
addNodes: jest.fn((_nodes: INode[]) => undefined),
removeNode: jest.fn((_id: string) => undefined),
addConnections: jest.fn((_connections: IConnection[]) => undefined),
removeConnection: jest.fn((_sourceId: string, _targetId: string, _type?: string) => undefined),
});
export type MockStateHelpers = ReturnType<typeof mockStateHelpers>;
// Simple node creation helper
export const createNode = (overrides: Partial<INode> = {}): INode => ({
id: 'node1',
name: 'TestNode',
type: 'n8n-nodes-base.code',
typeVersion: 1,
position: [0, 0],
...overrides,
// Ensure parameters are properly merged if provided in overrides
parameters: overrides.parameters ?? {},
});
// Simple workflow builder
export const createWorkflow = (nodes: INode[] = []): SimpleWorkflow => {
const workflow: SimpleWorkflow = { nodes, connections: {} };
return workflow;
};
// Create mock node type description
export const createNodeType = (
overrides: Partial<INodeTypeDescription> = {},
): INodeTypeDescription => ({
displayName: overrides.displayName ?? 'Test Node',
name: overrides.name ?? 'test.node',
group: overrides.group ?? ['transform'],
version: overrides.version ?? 1,
description: overrides.description ?? 'Test node description',
defaults: overrides.defaults ?? { name: 'Test Node' },
inputs: overrides.inputs ?? ['main'],
outputs: overrides.outputs ?? ['main'],
properties: overrides.properties ?? [],
...overrides,
});
// Common node types for testing
export const nodeTypes = {
code: createNodeType({
displayName: 'Code',
name: 'n8n-nodes-base.code',
group: ['transform'],
properties: [
{
displayName: 'JavaScript',
name: 'jsCode',
type: 'string',
typeOptions: {
editor: 'codeNodeEditor',
},
default: '',
},
],
}),
httpRequest: createNodeType({
displayName: 'HTTP Request',
name: 'n8n-nodes-base.httpRequest',
group: ['input'],
properties: [
{
displayName: 'URL',
name: 'url',
type: 'string',
default: '',
},
{
displayName: 'Method',
name: 'method',
type: 'options',
options: [
{ name: 'GET', value: 'GET' },
{ name: 'POST', value: 'POST' },
],
default: 'GET',
},
],
}),
webhook: createNodeType({
displayName: 'Webhook',
name: 'n8n-nodes-base.webhook',
group: ['trigger'],
inputs: [],
outputs: ['main'],
webhooks: [
{
name: 'default',
httpMethod: 'POST',
responseMode: 'onReceived',
path: 'webhook',
},
],
properties: [
{
displayName: 'Path',
name: 'path',
type: 'string',
default: 'webhook',
},
],
}),
agent: createNodeType({
displayName: 'AI Agent',
name: '@n8n/n8n-nodes-langchain.agent',
group: ['output'],
inputs: ['ai_agent'],
outputs: ['main'],
properties: [],
}),
openAiModel: createNodeType({
displayName: 'OpenAI Chat Model',
name: '@n8n/n8n-nodes-langchain.lmChatOpenAi',
group: ['output'],
inputs: [],
outputs: ['ai_languageModel'],
properties: [],
}),
setNode: createNodeType({
displayName: 'Set',
name: 'n8n-nodes-base.set',
group: ['transform'],
properties: [
{
displayName: 'Values to Set',
name: 'values',
type: 'collection',
default: {},
},
],
}),
ifNode: createNodeType({
displayName: 'If',
name: 'n8n-nodes-base.if',
group: ['transform'],
inputs: ['main'],
outputs: ['main', 'main'],
outputNames: ['true', 'false'],
properties: [
{
displayName: 'Conditions',
name: 'conditions',
type: 'collection',
default: {},
},
],
}),
mergeNode: createNodeType({
displayName: 'Merge',
name: 'n8n-nodes-base.merge',
group: ['transform'],
inputs: ['main', 'main'],
outputs: ['main'],
inputNames: ['Input 1', 'Input 2'],
properties: [
{
displayName: 'Mode',
name: 'mode',
type: 'options',
options: [
{ name: 'Append', value: 'append' },
{ name: 'Merge By Index', value: 'mergeByIndex' },
{ name: 'Merge By Key', value: 'mergeByKey' },
],
default: 'append',
},
],
}),
vectorStoreNode: createNodeType({
displayName: 'Vector Store',
name: '@n8n/n8n-nodes-langchain.vectorStore',
subtitle: '={{$parameter["mode"] === "retrieve" ? "Retrieve" : "Insert"}}',
group: ['transform'],
inputs: `={{ ((parameter) => {
function getInputs(parameters) {
const mode = parameters?.mode;
const inputs = [];
if (mode === 'retrieve-as-tool') {
inputs.push({
displayName: 'Embedding',
type: 'ai_embedding',
required: true
});
} else {
inputs.push({
displayName: '',
type: 'main'
});
inputs.push({
displayName: 'Embedding',
type: 'ai_embedding',
required: true
});
}
return inputs;
};
return getInputs(parameter)
})($parameter) }}`,
outputs: `={{ ((parameter) => {
function getOutputs(parameters) {
const mode = parameters?.mode;
if (mode === 'retrieve-as-tool') {
return ['ai_tool'];
} else if (mode === 'retrieve') {
return ['ai_document'];
} else {
return ['main'];
}
};
return getOutputs(parameter)
})($parameter) }}`,
properties: [
{
displayName: 'Mode',
name: 'mode',
type: 'options',
options: [
{ name: 'Insert', value: 'insert' },
{ name: 'Retrieve', value: 'retrieve' },
{ name: 'Retrieve (As Tool)', value: 'retrieve-as-tool' },
],
default: 'insert',
},
// Many more properties would be here in reality
],
}),
};
// Helper to create connections
export const createConnection = (
_fromId: string,
toId: string,
type: NodeConnectionType = 'main',
index: number = 0,
) => ({
node: toId,
type,
index,
});
// Generic chain interface
interface Chain<TInput = Record<string, unknown>, TOutput = Record<string, unknown>> {
invoke: (input: TInput) => Promise<TOutput>;
}
// Generic mock chain factory with proper typing
export const mockChain = <
TInput = Record<string, unknown>,
TOutput = Record<string, unknown>,
>(): MockProxy<Chain<TInput, TOutput>> => {
return mock<Chain<TInput, TOutput>>();
};
// Convenience factory for parameter updater chain
export const mockParameterUpdaterChain = () => {
return mockChain<Record<string, unknown>, { parameters: Record<string, unknown> }>();
};
// Helper to assert node parameters
export const expectNodeToHaveParameters = (
node: INode,
expectedParams: Partial<INodeParameters>,
): void => {
expect(node.parameters).toMatchObject(expectedParams);
};
// Helper to assert connections exist
export const expectConnectionToExist = (
connections: SimpleWorkflow['connections'],
fromId: string,
toId: string,
type: string = 'main',
): void => {
expect(connections[fromId]).toBeDefined();
expect(connections[fromId][type]).toBeDefined();
expect(connections[fromId][type]).toContainEqual(
expect.arrayContaining([expect.objectContaining({ node: toId })]),
);
};
// ========== LangGraph Testing Utilities ==========
// Types for mocked Command results
export type MockedCommandResult = { content: string };
// Common parsed content structure for tool results
export interface ParsedToolContent {
update: {
messages: Array<{ kwargs: { content: string } }>;
workflowOperations?: Array<{
type: string;
nodes?: INode[];
[key: string]: unknown;
}>;
};
}
// Setup LangGraph mocks
export const setupLangGraphMocks = () => {
const mockGetCurrentTaskInput = getCurrentTaskInput as jest.MockedFunction<
typeof getCurrentTaskInput
>;
jest.mock('@langchain/langgraph', () => ({
getCurrentTaskInput: jest.fn(),
Command: jest.fn().mockImplementation((params: Record<string, unknown>) => ({
content: JSON.stringify(params),
})),
}));
return { mockGetCurrentTaskInput };
};
// Parse tool result with double-wrapped content handling
export const parseToolResult = <T = ParsedToolContent>(result: unknown): T => {
const parsed = jsonParse<{ content?: string }>((result as MockedCommandResult).content);
return parsed.content ? jsonParse<T>(parsed.content) : (parsed as T);
};
// ========== Progress Message Utilities ==========
// Extract progress messages from mockWriter
export const extractProgressMessages = (
mockWriter: jest.Mock,
): Array<ToolProgressMessage<string>> => {
const progressCalls: Array<ToolProgressMessage<string>> = [];
mockWriter.mock.calls.forEach((call) => {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
const [arg] = call;
progressCalls.push(arg as ToolProgressMessage<string>);
});
return progressCalls;
};
// Find specific progress message by type
export const findProgressMessage = (
messages: Array<ToolProgressMessage<string>>,
status: 'running' | 'completed' | 'error',
updateType?: string,
): ToolProgressMessage<string> | undefined => {
return messages.find(
(msg) => msg.status === status && (!updateType || msg.updates[0]?.type === updateType),
);
};
// ========== Tool Config Helpers ==========
// Create basic tool config
export const createToolConfig = (
toolName: string,
callId: string = 'test-call',
): ToolRunnableConfig => ({
toolCall: { id: callId, name: toolName, args: {} },
});
// Create tool config with writer for progress tracking
export const createToolConfigWithWriter = (
toolName: string,
callId: string = 'test-call',
): ToolRunnableConfig & LangGraphRunnableConfig & { writer: jest.Mock } => {
const mockWriter = jest.fn();
return {
toolCall: { id: callId, name: toolName, args: {} },
writer: mockWriter,
};
};
// ========== Workflow State Helpers ==========
// Setup workflow state with mockGetCurrentTaskInput
export const setupWorkflowState = (
mockGetCurrentTaskInput: jest.MockedFunction<typeof getCurrentTaskInput>,
workflow: SimpleWorkflow = createWorkflow([]),
) => {
mockGetCurrentTaskInput.mockReturnValue({
workflowJSON: workflow,
});
};
// ========== Common Tool Assertions ==========
// Expect tool success message
export const expectToolSuccess = (
content: ParsedToolContent,
expectedMessage: string | RegExp,
): void => {
const message = content.update.messages[0]?.kwargs.content;
expect(message).toBeDefined();
if (typeof expectedMessage === 'string') {
expect(message).toContain(expectedMessage);
} else {
expect(message).toMatch(expectedMessage);
}
};
// Expect tool error message
export const expectToolError = (
content: ParsedToolContent,
expectedError: string | RegExp,
): void => {
const message = content.update.messages[0]?.kwargs.content;
if (typeof expectedError === 'string') {
expect(message).toBe(expectedError);
} else {
expect(message).toMatch(expectedError);
}
};
// Expect workflow operation of specific type
export const expectWorkflowOperation = (
content: ParsedToolContent,
operationType: string,
matcher?: Record<string, unknown>,
): void => {
const operation = content.update.workflowOperations?.[0];
expect(operation).toBeDefined();
expect(operation?.type).toBe(operationType);
if (matcher) {
expect(operation).toMatchObject(matcher);
}
};
// Expect node was added
export const expectNodeAdded = (content: ParsedToolContent, expectedNode: Partial<INode>): void => {
expectWorkflowOperation(content, 'addNodes');
const addedNode = content.update.workflowOperations?.[0]?.nodes?.[0];
expect(addedNode).toBeDefined();
expect(addedNode).toMatchObject(expectedNode);
};
// Expect node was removed
export const expectNodeRemoved = (content: ParsedToolContent, nodeId: string): void => {
expectWorkflowOperation(content, 'removeNode', { nodeIds: [nodeId] });
};
// Expect connections were added
export const expectConnectionsAdded = (
content: ParsedToolContent,
expectedCount?: number,
): void => {
expectWorkflowOperation(content, 'addConnections');
if (expectedCount !== undefined) {
const connections = content.update.workflowOperations?.[0]?.connections;
expect(connections).toHaveLength(expectedCount);
}
};
// Expect node was updated
export const expectNodeUpdated = (
content: ParsedToolContent,
nodeId: string,
expectedUpdates?: Record<string, unknown>,
): void => {
expectWorkflowOperation(content, 'updateNode', {
nodeId,
...(expectedUpdates ? { updates: expect.objectContaining(expectedUpdates) } : {}),
});
};
// ========== Test Data Builders ==========
// Build add node input
export const buildAddNodeInput = (overrides: {
nodeType: string;
name?: string;
connectionParametersReasoning?: string;
connectionParameters?: Record<string, unknown>;
}) => ({
nodeType: overrides.nodeType,
name: overrides.name ?? 'Test Node',
connectionParametersReasoning:
overrides.connectionParametersReasoning ??
'Standard node with static inputs/outputs, no connection parameters needed',
connectionParameters: overrides.connectionParameters ?? {},
});
// Build connect nodes input
export const buildConnectNodesInput = (overrides: {
sourceNodeId: string;
targetNodeId: string;
sourceOutputIndex?: number;
targetInputIndex?: number;
}) => ({
sourceNodeId: overrides.sourceNodeId,
targetNodeId: overrides.targetNodeId,
sourceOutputIndex: overrides.sourceOutputIndex ?? 0,
targetInputIndex: overrides.targetInputIndex ?? 0,
});
// Build node search query
export const buildNodeSearchQuery = (
queryType: 'name' | 'subNodeSearch',
query?: string,
connectionType?: NodeConnectionType,
) => ({
queryType,
...(query && { query }),
...(connectionType && { connectionType }),
});
// Build update node parameters input
export const buildUpdateNodeInput = (nodeId: string, changes: string[]) => ({
nodeId,
changes,
});
// Build node details input
export const buildNodeDetailsInput = (overrides: {
nodeName: string;
withParameters?: boolean;
withConnections?: boolean;
}) => ({
nodeName: overrides.nodeName,
withParameters: overrides.withParameters ?? false,
withConnections: overrides.withConnections ?? true,
});
// Expect node details in response
export const expectNodeDetails = (
content: ParsedToolContent,
expectedDetails: Partial<{
name: string;
displayName: string;
description: string;
subtitle?: string;
}>,
): void => {
const message = content.update.messages[0]?.kwargs.content;
expect(message).toBeDefined();
// Check for expected XML-like tags in formatted output
if (expectedDetails.name) {
expect(message).toContain(`<name>${expectedDetails.name}</name>`);
}
if (expectedDetails.displayName) {
expect(message).toContain(`<display_name>${expectedDetails.displayName}</display_name>`);
}
if (expectedDetails.description) {
expect(message).toContain(`<description>${expectedDetails.description}</description>`);
}
if (expectedDetails.subtitle) {
expect(message).toContain(`<subtitle>${expectedDetails.subtitle}</subtitle>`);
}
};
// Helper to validate XML-like structure in output
export const expectXMLTag = (
content: string,
tagName: string,
expectedValue?: string | RegExp,
): void => {
const tagRegex = new RegExp(`<${tagName}>([\\s\\S]*?)</${tagName}>`);
const match = content.match(tagRegex);
expect(match).toBeDefined();
if (expectedValue) {
if (typeof expectedValue === 'string') {
expect(match?.[1]?.trim()).toBe(expectedValue);
} else {
expect(match?.[1]).toMatch(expectedValue);
}
}
};
// Common reasoning strings
export const REASONING = {
STATIC_NODE: 'Node has static inputs/outputs, no connection parameters needed',
DYNAMIC_AI_NODE: 'AI node has dynamic inputs, setting connection parameters',
TRIGGER_NODE: 'Trigger node, no connection parameters needed',
WEBHOOK_NODE: 'Webhook is a trigger node, no connection parameters needed',
} as const;

View File

@@ -7,5 +7,5 @@
"tsBuildInfoFile": "dist/build.tsbuildinfo"
},
"include": ["src/**/*.ts"],
"exclude": ["src/**/__tests__/**"]
"exclude": ["src/**/__tests__/**", "src/**/test-utils/**", "**/*.test.ts"]
}

View File

@@ -13,5 +13,5 @@
},
"tsBuildInfoFile": "dist/typecheck.tsbuildinfo"
},
"include": ["src/**/*.ts"]
"include": ["src/**/*.ts", "test/**/*.ts", "evaluations/**/*.ts"]
}

View File

@@ -1,5 +0,0 @@
/** @type {import('jest').Config} */
module.exports = {
...require('../../../jest.config'),
testTimeout: 10_000,
};

View File

@@ -1,43 +0,0 @@
{
"name": "@n8n/ai-workflow-builder",
"version": "0.13.0",
"scripts": {
"clean": "rimraf dist .turbo",
"typecheck": "tsc --noEmit",
"build": "tsc -p ./tsconfig.build.json && tsc-alias -p tsconfig.build.json",
"format": "biome format --write src",
"format:check": "biome ci src",
"test": "jest",
"test:watch": "jest --watch",
"lint": "eslint . --quiet",
"lintfix": "eslint . --fix",
"watch": "tsc-watch -p tsconfig.build.json --onCompilationComplete \"tsc-alias -p tsconfig.build.json\""
},
"main": "dist/index.js",
"module": "src/index.ts",
"types": "dist/index.d.ts",
"files": [
"dist/**/*"
],
"exports": {
".": {
"require": "./dist/index.js",
"import": "./src/index.ts",
"types": "./dist/index.d.ts"
}
},
"dependencies": {
"@langchain/anthropic": "catalog:",
"@langchain/core": "catalog:",
"@langchain/langgraph": "0.2.45",
"@langchain/openai": "catalog:",
"@n8n/config": "workspace:*",
"@n8n/di": "workspace:*",
"@n8n_io/ai-assistant-sdk": "catalog:",
"n8n-workflow": "workspace:*",
"zod": "catalog:"
},
"devDependencies": {
"@n8n/typescript-config": "workspace:*"
}
}

View File

@@ -1,375 +0,0 @@
import { dispatchCustomEvent } from '@langchain/core/callbacks/dispatch';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { RunnableConfig } from '@langchain/core/runnables';
import { StateGraph, END, START } from '@langchain/langgraph';
import { Service } from '@n8n/di';
import { AiAssistantClient } from '@n8n_io/ai-assistant-sdk';
import { OperationalError, assert, INodeTypes } from 'n8n-workflow';
import type { IUser, INodeTypeDescription, INode } from 'n8n-workflow';
import { connectionComposerChain } from './chains/connection-composer';
import { nodesSelectionChain } from './chains/node-selector';
import { nodesComposerChain } from './chains/nodes-composer';
import { plannerChain } from './chains/planner';
import { validatorChain } from './chains/validator';
import { anthropicClaude37Sonnet, gpt41mini } from './llm-config';
import type { MessageResponse } from './types';
import { WorkflowState } from './workflow-state';
@Service()
export class AiWorkflowBuilderService {
private parsedNodeTypes: INodeTypeDescription[] = [];
private llmSimpleTask: BaseChatModel | undefined;
private llmComplexTask: BaseChatModel | undefined;
constructor(
private readonly nodeTypes: INodeTypes,
private readonly client?: AiAssistantClient,
) {
this.parsedNodeTypes = this.getNodeTypes();
}
private async setupModels(user?: IUser) {
if (this.llmSimpleTask && this.llmComplexTask) {
return;
}
// If client is provided, use it for API proxy
if (this.client && user) {
const authHeaders = await this.client.generateApiProxyCredentials(user);
// Extract baseUrl from client configuration
const baseUrl = this.client.getApiProxyBaseUrl();
this.llmSimpleTask = await gpt41mini({
baseUrl: baseUrl + '/openai',
// When using api-proxy the key will be populated automatically, we just need to pass a placeholder
apiKey: '-',
headers: {
Authorization: authHeaders.apiKey,
},
});
this.llmComplexTask = await anthropicClaude37Sonnet({
baseUrl: baseUrl + '/anthropic',
apiKey: '-',
headers: {
Authorization: authHeaders.apiKey,
},
});
return;
}
// If no client provided, use environment variables
this.llmSimpleTask = await gpt41mini({
apiKey: process.env.N8N_AI_OPENAI_API_KEY ?? '',
});
this.llmComplexTask = await anthropicClaude37Sonnet({
apiKey: process.env.N8N_AI_ANTHROPIC_KEY ?? '',
});
}
private getNodeTypes(): INodeTypeDescription[] {
const nodeTypesKeys = Object.keys(this.nodeTypes.getKnownTypes());
const nodeTypes = nodeTypesKeys
.map((nodeName) => {
return { ...this.nodeTypes.getByNameAndVersion(nodeName).description, name: nodeName };
})
.filter((nodeType) => nodeType.hidden !== true);
return nodeTypes;
}
private isWorkflowEvent(eventName: string): boolean {
return [
'prompt_validation',
'generated_steps',
'generated_nodes',
'composed_nodes',
'composed_connections',
'generated_workflow_json',
].includes(eventName);
}
private getAgent() {
const validatorChainNode = async (
state: typeof WorkflowState.State,
config: RunnableConfig,
): Promise<Partial<typeof WorkflowState.State>> => {
assert(this.llmSimpleTask, 'LLM not setup');
const isWorkflowPrompt = await validatorChain(this.llmSimpleTask).invoke(
{
prompt: state.prompt,
},
config,
);
if (!isWorkflowPrompt) {
await dispatchCustomEvent('prompt_validation', {
role: 'assistant',
type: 'prompt-validation',
isWorkflowPrompt,
id: Date.now().toString(),
});
}
return {
isWorkflowPrompt,
};
};
const plannerChainNode = async (
state: typeof WorkflowState.State,
config: RunnableConfig,
): Promise<Partial<typeof WorkflowState.State>> => {
assert(this.llmComplexTask, 'LLM not setup');
const steps = await plannerChain(this.llmComplexTask).invoke(
{
prompt: state.prompt,
},
config,
);
await dispatchCustomEvent('generated_steps', {
role: 'assistant',
type: 'workflow-step',
steps,
id: Date.now().toString(),
read: false,
});
return {
steps,
};
};
const nodeSelectionChainNode = async (
state: typeof WorkflowState.State,
config: RunnableConfig,
) => {
assert(this.llmSimpleTask, 'LLM not setup');
const getNodeMessage = (node: INodeTypeDescription) => {
return `
<node_name>${node.name}</node_name>
<node_description>${node.description}</node_description>
`;
};
const allowedNodes = this.parsedNodeTypes.map(getNodeMessage).join('');
const result = await nodesSelectionChain(this.llmSimpleTask).invoke(
{
allowedNodes,
prompt: state.prompt,
steps: state.steps.join('\n'),
},
config,
);
const nodes = [...new Set(result.map((r) => r.node))];
await dispatchCustomEvent('generated_nodes', {
role: 'assistant',
type: 'workflow-node',
nodes,
id: Date.now().toString(),
read: false,
});
return {
nodes,
};
};
const nodesComposerChainNode = async (
state: typeof WorkflowState.State,
config: RunnableConfig,
) => {
assert(this.llmComplexTask, 'LLM not setup');
const getLatestVersion = (nodeType: string) => {
const node = this.parsedNodeTypes.find((n) => n.name === nodeType);
if (!node) {
throw new OperationalError(`Node type not found: ${nodeType}`);
}
if (node.defaultVersion) {
return node.defaultVersion;
}
return typeof node.version === 'number'
? node.version
: node.version[node.version.length - 1];
};
const getNodeMessage = (nodeName: string) => {
const node = this.parsedNodeTypes.find((n) => n.name === nodeName);
if (!node) {
throw new OperationalError(`Node type not found: ${nodeName}`);
}
return `
<node_name>
${node.name}
</node_name>
<node_description>
${node.description}
</node_description>
<node_parameters>
${JSON.stringify(node.properties)}
</node_parameters>
`;
};
const result = await nodesComposerChain(this.llmComplexTask).invoke(
{
user_workflow_prompt: state.prompt,
nodes: state.nodes.map(getNodeMessage).join('\n\n'),
},
config,
);
const composedNodes = result.map((node, index) => {
const version = getLatestVersion(node.type);
return {
...node,
position: [index * 150, 0],
typeVersion: version,
};
});
await dispatchCustomEvent('composed_nodes', {
role: 'assistant',
type: 'workflow-composed',
nodes: composedNodes,
id: Date.now().toString(),
read: false,
});
return {
workflowJSON: {
nodes: composedNodes,
connections: {},
},
};
};
const connectionComposerChainNode = async (
state: typeof WorkflowState.State,
config: RunnableConfig,
) => {
assert(this.llmComplexTask, 'LLM not setup');
// Pass the selected nodes as input to create connections.
const getNodeMessage = (node: INode) => {
return `
<node>
${JSON.stringify(node)}
</node>
`;
};
const connections = await connectionComposerChain(this.llmComplexTask).invoke(
{
workflowJSON: state.workflowJSON.nodes.map(getNodeMessage).join('\n\n'),
},
config,
);
const workflowJSON = {
...state.workflowJSON,
connections,
};
await dispatchCustomEvent('composed_connections', {
role: 'assistant',
type: 'workflow-connections',
workflowJSON,
id: Date.now().toString(),
read: false,
});
return {
workflowJSON,
};
};
///////////////////// Finalization /////////////////////
// Finalize the workflow JSON by combining nodes and their connections.
async function generateWorkflowJSON(state: typeof WorkflowState.State) {
await dispatchCustomEvent('generated_workflow_json', {
role: 'assistant',
type: 'workflow-generated',
codeSnippet: JSON.stringify(state.workflowJSON, null, 4),
});
return { workflowJSON: JSON.stringify(state.workflowJSON, null, 2) };
}
///////////////////// Workflow Graph Definition /////////////////////
const workflowGraph = new StateGraph(WorkflowState)
.addNode('validator', validatorChainNode)
.addNode('planner', plannerChainNode)
.addNode('node_selector', nodeSelectionChainNode)
.addNode('nodes_composer', nodesComposerChainNode)
.addNode('connection_composer', connectionComposerChainNode)
.addNode('finalize', generateWorkflowJSON);
// Define the graph edges to set the processing order:
// Start with the validator
workflowGraph.addEdge(START, 'validator');
// If validated, continue to planner
workflowGraph.addConditionalEdges('validator', (state) => {
return state.isWorkflowPrompt ? 'planner' : END;
});
// Planner node flows into node selector:
workflowGraph.addEdge('planner', 'node_selector');
// Node selector is followed by nodes composer:
workflowGraph.addEdge('node_selector', 'nodes_composer');
// Nodes composer is followed by connection composer:
workflowGraph.addEdge('nodes_composer', 'connection_composer');
// Connection composer flows to finalization:
workflowGraph.addEdge('connection_composer', 'finalize');
// Finalization flows to end:
workflowGraph.addEdge('finalize', END);
return workflowGraph;
}
async *chat(payload: { question: string }, user?: IUser) {
if (!this.llmComplexTask || !this.llmSimpleTask) {
await this.setupModels(user);
}
const agent = this.getAgent().compile();
const initialState: typeof WorkflowState.State = {
messages: [],
prompt: payload.question,
steps: [],
nodes: [],
workflowJSON: { nodes: [], connections: {} },
isWorkflowPrompt: false,
next: 'PLAN',
};
const stream = agent.streamEvents(initialState, {
streamMode: 'custom',
recursionLimit: 10,
version: 'v2',
});
for await (const chunk of stream) {
let messageChunk: MessageResponse;
if (chunk.event === 'on_custom_event') {
if (this.isWorkflowEvent(chunk.name)) {
messageChunk = chunk.data as MessageResponse;
} else {
messageChunk = {
role: 'assistant',
type: 'intermediate-step',
text: chunk.data as string,
step: chunk.name,
};
}
yield { messages: [messageChunk] };
}
}
}
}

View File

@@ -1,156 +0,0 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { AIMessageChunk } from '@langchain/core/messages';
import { SystemMessage } from '@langchain/core/messages';
import { ChatPromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts';
import { DynamicStructuredTool } from '@langchain/core/tools';
import { OperationalError } from 'n8n-workflow';
import { z } from 'zod';
export const connectionComposerPrompt = new SystemMessage(
`You are an expert in creating n8n workflow connections. Your job is to create a valid n8n workflow by connecting nodes in a logical sequence.
## Your Task
Create connections between nodes that form a coherent, executable workflow based on the user's request.
## Input Format
You will receive a list of n8n nodes with their details in <node> tags:
\`\`\`
<node>
{
"name": "Node display name",
"type": "n8n-nodes-base.nodeType",
"parameters": { ... },
"position": [x, y]
}
</node>
\`\`\`
## n8n Connection Structure
In n8n workflows:
1. Data flows from one node to the next through connections
2. Connections are defined in the "connections" object
3. Each node's output can connect to one or more nodes' inputs
4. Each connection has a source node, target node, and IO indices
## Connection Format
\`\`\`json
{
"connections": {
"Source Node Display Name": {
"main": [
[
{
"node": "Target Node Display Name",
"type": "main",
"index": 0
}
]
]
}
}
}
\`\`\`
## Rules for Creating Connections
1. ALWAYS use the node "name" field (display name) for the connection references
2. Create a logical flow from trigger/input nodes to output/action nodes
3. Each node MUST connect to at least one other node (except terminal nodes)
4. Don't create loops or cycles in the workflow
5. Ensure the output data from one node is compatible with the input expected by the next node
6. For nodes with multiple outputs (like IF nodes), connect each output appropriately:
- For IF nodes, first output (index 0) is the TRUE branch, second output (index 1) is the FALSE branch
- For Switch nodes, each output (starting at index 0) corresponds to a different case
## Common Workflow Patterns
1. Trigger → Process → Action
2. Data Source → Filter/Transform → Destination
3. Scheduled Trigger → HTTP Request → Process Response → Send Notification
4. Conditional Branch: Previous Node → IF Node → [True Branch, False Branch]
## Output
Return ONLY a valid JSON object with the "connections" property following the structure above:
\`\`\`json
{
"connections": {
"NodeName1": {
"main": [[{ "node": "NodeName2", "type": "main", "index": 0 }]]
},
"NodeName2": {
"main": [
[{ "node": "TrueBranchNode", "type": "main", "index": 0 }],
[{ "node": "FalseBranchNode", "type": "main", "index": 0 }]
]
},
...
}
}
\`\`\``,
);
const connectionsSchema = z.object({
connections: z
.record(
z
.string()
.describe(
'The source node\'s display name exactly as specified in the node\'s "name" field',
),
z
.object({
main: z.array(
z.array(
z.object({
node: z
.string()
.describe(
'The target node\'s display name exactly as specified in the node\'s "name" field',
),
type: z
.literal('main')
.describe('The connection type, always use "main" for standard n8n connections'),
index: z
.number()
.describe(
'Output index from the source node, typically 0 for single-output nodes, 0=true/1=false for IF nodes',
),
}),
),
),
})
.describe('The connection configuration for a single source node'),
)
.describe('A mapping of all connections in the workflow, where each key is a source node name'),
});
const connectionComposerTool = new DynamicStructuredTool({
name: 'compose_connections',
description:
"Create valid connections between n8n nodes to form a coherent, executable workflow that implements the user's request.",
schema: connectionsSchema,
func: async (input) => {
return { connections: input.connections };
},
});
const humanTemplate = '{workflowJSON}';
const chatPrompt = ChatPromptTemplate.fromMessages([
connectionComposerPrompt,
HumanMessagePromptTemplate.fromTemplate(humanTemplate),
]);
export const connectionComposerChain = (llm: BaseChatModel) => {
if (!llm.bindTools) {
throw new OperationalError("LLM doesn't support binding tools");
}
return chatPrompt
.pipe(
llm.bindTools([connectionComposerTool], {
tool_choice: connectionComposerTool.name,
}),
)
.pipe((x: AIMessageChunk) => {
const toolCall = x.tool_calls?.[0];
return (toolCall?.args as z.infer<typeof connectionsSchema>).connections;
});
};

View File

@@ -1,106 +0,0 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { AIMessageChunk } from '@langchain/core/messages';
import { SystemMessage } from '@langchain/core/messages';
import { ChatPromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts';
import { DynamicStructuredTool } from '@langchain/core/tools';
import { OperationalError } from 'n8n-workflow';
import { z } from 'zod';
export const nodeSelectorPrompt = new SystemMessage(
`You are an expert in n8n workflows who selects the optimal n8n nodes to implement workflow steps.
## Your Task
For each workflow step, recommend the most appropriate n8n nodes from the allowed list.
## Input Information
- <user_request>: Original user workflow request
- <steps>: List of workflow steps to implement
- <allowed_n8n_nodes>: List of available n8n nodes with descriptions
## CRITICAL REQUIREMENTS
- ONLY recommend nodes that EXACTLY match names from the <allowed_n8n_nodes> list
- NEVER suggest nodes that are not explicitly defined in <allowed_n8n_nodes>
- ALWAYS use the COMPLETE node name as it appears in <node_name> tags (e.g., "Gmail" is NOT sufficient if the node name is "n8n-nodes-base.gmail")
- VERIFY each recommended node exists in the allowed list before including it
## Selection Criteria
1. Functionality - Node must be able to perform the required action
2. Integration - Prefer nodes that integrate directly with services mentioned in the user request
3. Efficiency - Prefer nodes that accomplish the task with minimal configuration
## Output Requirements
For the planned workflow steps, provider:
1. List of all possibly useful nodes in order of preference
2. Concise reasoning for why each node is suitable
3. Use EXACT, FULL node names from <node_name> tags
4. Pay attention to case sensitivity, e.g. "n8n-nodes-base.msql" is NOT "n8n-nodes-base.mySql"!
Remember: ONLY use nodes from the <allowed_n8n_nodes> list and ALWAYS use their FULL names exactly as provided.`,
);
const nodeSelectorSchema = z.object({
recommended_nodes: z
.array(
z.object({
score: z.number().describe('Matching score of the node for all the workflows steps'),
node: z
.string()
.describe(
'The full node type identifier (e.g., "n8n-nodes-base.if") from <allowed_n8n_nodes> list',
),
reasoning: z
.string()
.describe(
'Very short explanation of why this node might be used to implement the workflow step',
),
}),
)
.min(1)
.max(20)
.describe(
'Recommended n8n nodes for implementing any of the workflow steps, in order of descending preference. ONLY use nodes from the <allowed_n8n_nodes> list with EXACT full names from <node_name> tags.',
),
});
const nodeSelectorTool = new DynamicStructuredTool({
name: 'select_n8n_nodes',
description:
'Match each workflow step with the most appropriate n8n nodes from the allowed list, ensuring they can implement the required functionality.',
schema: nodeSelectorSchema,
func: async ({ recommended_nodes }) => {
return { recommended_nodes };
},
});
const humanTemplate = `
<user_request>
{prompt}
</user_request>
<steps>
{steps}
</steps>
<allowed_n8n_nodes>
{allowedNodes}
</allowed_n8n_nodes>
`;
const chatPrompt = ChatPromptTemplate.fromMessages([
nodeSelectorPrompt,
HumanMessagePromptTemplate.fromTemplate(humanTemplate),
]);
export const nodesSelectionChain = (llm: BaseChatModel) => {
if (!llm.bindTools) {
throw new OperationalError("LLM doesn't support binding tools");
}
return chatPrompt
.pipe(
llm.bindTools([nodeSelectorTool], {
tool_choice: nodeSelectorTool.name,
}),
)
.pipe((x: AIMessageChunk) => {
const toolCall = x.tool_calls?.[0];
return (toolCall?.args as z.infer<typeof nodeSelectorSchema>).recommended_nodes;
});
};

View File

@@ -1,466 +0,0 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { AIMessageChunk } from '@langchain/core/messages';
import { SystemMessage } from '@langchain/core/messages';
import { ChatPromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts';
import { DynamicStructuredTool } from '@langchain/core/tools';
import { OperationalError } from 'n8n-workflow';
import { z } from 'zod';
// Using SystemMessage directly instead of escapeSingleCurlyBrackets to avoid
// issues with double curly braces in n8n expressions
const systemPrompt = new SystemMessage(`You are an expert n8n workflow architect who creates complete node configurations for complex workflows.
## Your Task
Generate fully-formed n8n node configurations with properly structured parameters for each selected node.
## Reference Information
You will receive:
1. The original user workflow request
2. A list of selected n8n nodes with their descriptions and parameters
## Node Configuration Guidelines
1. CREATE PROPER STRUCTURE: Include all required fields (parameters, name, type)
2. USE DESCRIPTIVE NAMES: Each node name should clearly describe its function
3. POPULATE KEY PARAMETERS: Set values for essential parameters based on node type
4. MAINTAIN LOGICAL FLOW: Node parameters should enable proper data flow
5. FOLLOW NODE PATTERNS: Use the correct structure for each node type
6. ADD DOCUMENTATION: Include at least one sticky note, explaining the workflow. Include additional sticky notes for complex parts of the workflow.
## CRITICAL: Correctly Formatting n8n Expressions
When using expressions to reference data from other nodes:
- ALWAYS use the format: \`={{ $('Node Name').item.json.field }}\`
- NEVER omit the equals sign before the double curly braces
- ALWAYS use DOUBLE curly braces, never single
- NEVER use emojis or special characters inside expressions as they will break the expression
- INCORRECT: \`{ $('Node Name').item.json.field }\` (missing =, single braces)
- INCORRECT: \`{{ $('Node Name').item.json.field }}\` (missing =)
- INCORRECT: \`={{ $('👍 Node').item.json.field }}\` (contains emoji)
- CORRECT: \`={{ $('Previous Node').item.json.field }}\`
This format is essential for n8n to properly process the expression.
## IF Node Configuration (CRITICAL)
The IF node allows conditional branching based on comparing values. It has two outputs:
- Output 0: TRUE branch (when conditions are met)
- Output 1: FALSE branch (when conditions are NOT met)
### Key Points for IF Node:
1. MATCH OPERATOR TYPE TO DATA TYPE - Use the correct operator type that matches your data:
- For string values: use "type": "string" with operations like "equals", "contains", "exists"
- For number values: use "type": "number" with operations like "equals", "gt", "lt"
- For boolean values: use "type": "boolean" with operations like "equals", "true", "false"
- For arrays: use "type": "array" with operations like "empty", "contains"
- For objects: use "type": "object" with operations like "exists", "empty"
- For dates: use "type": "dateTime" with operations like "before", "after"
2. USE SINGLE VALUE OPERATORS CORRECTLY:
- Some operators like "exists", "notExists", "empty" don't need a right value
- For these operators, include "singleValue": true in the operator object
- Example: Checking if a string exists: "operator": { "type": "string", "operation": "exists", "singleValue": true }
3. USE CORRECT DATA TYPES FOR RIGHT VALUES:
- Number comparisons: use actual numbers (without quotes) like 5, not "5"
- Boolean comparisons: use true or false (without quotes), not "true" or "false"
- String comparisons: use quoted strings like "text"
- When using expressions for the right value, include the proper format: "={{ expression }}"
### IF Node Examples
#### Example 1: Check if a number is greater than 5
\`\`\`json
{
"parameters": {
"conditions": {
"options": {
"caseSensitive": false,
"leftValue": "",
"typeValidation": "loose"
},
"conditions": [
{
"leftValue": "={{ $('Previous Node').item.json.amount }}",
"rightValue": 5,
"operator": {
"type": "number",
"operation": "gt"
}
}
],
"combinator": "and"
},
"options": {
"ignoreCase": true,
"looseTypeValidation": true
}
}
}
\`\`\`
#### Example 2: Check if a string exists
\`\`\`json
{
"parameters": {
"conditions": {
"options": {
"caseSensitive": false,
"leftValue": "",
"typeValidation": "loose"
},
"conditions": [
{
"leftValue": "={{ $('Previous Node').item.json.email }}",
"rightValue": "",
"operator": {
"type": "string",
"operation": "exists",
"singleValue": true
}
}
],
"combinator": "and"
},
"options": {
"ignoreCase": true,
"looseTypeValidation": true
}
}
}
\`\`\`
#### Example 3: Check if a boolean is true
\`\`\`json
{
"parameters": {
"conditions": {
"options": {
"caseSensitive": false,
"leftValue": "",
"typeValidation": "loose"
},
"conditions": [
{
"leftValue": "={{ $('Previous Node').item.json.isActive }}",
"rightValue": "",
"operator": {
"type": "boolean",
"operation": "true",
"singleValue": true
}
}
],
"combinator": "and"
},
"options": {
"ignoreCase": true,
"looseTypeValidation": true
}
}
}
\`\`\`
#### Example 4: Compare string value
\`\`\`json
{
"parameters": {
"conditions": {
"options": {
"caseSensitive": false,
"leftValue": "",
"typeValidation": "loose"
},
"conditions": [
{
"leftValue": "={{ $('Previous Node').item.json.status }}",
"rightValue": "active",
"operator": {
"type": "string",
"operation": "equals"
}
}
],
"combinator": "and"
},
"options": {
"ignoreCase": true,
"looseTypeValidation": true
}
}
}
\`\`\`
#### Example 5: Compare boolean value
\`\`\`json
{
"parameters": {
"conditions": {
"options": {
"caseSensitive": false,
"leftValue": "",
"typeValidation": "loose"
},
"conditions": [
{
"leftValue": "={{ $('Previous Node').item.json.isVerified }}",
"rightValue": true,
"operator": {
"type": "boolean",
"operation": "equals"
}
}
],
"combinator": "and"
},
"options": {
"ignoreCase": true,
"looseTypeValidation": true
}
}
}
\`\`\`
### Common Operator Types and Operations
#### String Operators:
- "exists", "notExists", "empty", "notEmpty" (use with "singleValue": true)
- "equals", "notEquals", "contains", "notContains", "startsWith", "endsWith", "regex"
#### Number Operators:
- "exists", "notExists" (use with "singleValue": true)
- "equals", "notEquals", "gt" (greater than), "lt" (less than), "gte" (greater than or equal), "lte" (less than or equal)
#### Boolean Operators:
- "exists", "notExists" (use with "singleValue": true)
- "true", "false" (use with "singleValue": true)
- "equals", "notEquals"
#### Array Operators:
- "exists", "notExists", "empty", "notEmpty" (use with "singleValue": true)
- "contains", "notContains", "lengthEquals", "lengthNotEquals"
## Other Important Node Structures
### Set Node Structure
\`\`\`json
{
"parameters": {
"assignments": {
"assignments": [
{
"id": "unique-id-1",
"name": "property_name_1",
"value": "property_value_1",
"type": "string"
}
]
},
"options": {}
}
}
\`\`\`
### HTTP Request Node Structures
#### GET Request
\`\`\`json
{
"parameters": {
"url": "https://example.com",
"sendHeaders": true,
"headerParameters": {
"parameters": [
{
"name": "header-name",
"value": "header-value"
}
]
},
"options": {}
}
}
\`\`\`
#### POST Request
\`\`\`json
{
"parameters": {
"method": "POST",
"url": "https://example.com",
"sendHeaders": true,
"headerParameters": {
"parameters": [
{
"name": "header-name",
"value": "header-value"
}
]
},
"sendBody": true,
"bodyParameters": {
"parameters": [
{
"name": "field-name",
"value": "field-value"
}
]
},
"options": {}
}
}
\`\`\`
### Sticky Note Structure
\`\`\`json
{
"parameters": {
"content": "Note content here"
},
"name": "Descriptive Name",
"type": "n8n-nodes-base.stickyNote",
"notes": true
}
\`\`\`
## Expression Examples
1. Reference a field from another node:
\`\`\`
"value": "={{ $('Previous Node').item.json.fieldName }}"
\`\`\`
2. Use an expression with string concatenation:
\`\`\`
"value": "={{ 'Hello ' + $('User Input').item.json.name }}"
\`\`\`
3. Access an array item:
\`\`\`
"value": "={{ $('Data Node').item.json.items[0].id }}"
\`\`\`
4. IMPORTANT: How to properly format text fields with expressions
### PREFERRED METHOD: Embedding expressions directly within text
\`\`\`
"text": "=ALERT: It is currently raining in {{ $('Weather Node').item.json.city }}! Temperature: {{ $('Weather Node').item.json.main.temp }}°C"
\`\`\`
### Alternative method: Using string concatenation (use only when needed for complex operations)
\`\`\`
"text": "={{ 'ALERT: It is currently raining in ' + $('Weather Node').item.json.city + '! Temperature: ' + $('Weather Node').item.json.temp + '°C' }}"
\`\`\`
## CRITICAL: Formatting Text Fields with Expressions
### KEY RULES FOR THE PREFERRED METHOD (Embedding expressions in text):
- Start the string with just "=" (not "={{")
- Place each expression inside {{ }} without the = prefix
- MOST READABLE and RECOMMENDED approach
- Example: "text": "=Status: {{ $('Node').item.json.status }} at {{ $('Node').item.json.time }}"
### KEY RULES FOR THE ALTERNATIVE METHOD (String concatenation):
- Only use when you need complex operations not possible with embedded expressions
- Enclose the entire text in a single expression with "={{ }}"
- Put all static text in quotes and connect with + operators
- Example: "text": "={{ 'Status: ' + $('Node').item.json.status + ' at ' + $('Node').item.json.time }}"
### EXAMPLES OF PREFERRED USAGE:
1. Slack message (PREFERRED):
\`\`\`json
"text": "=ALERT: It is currently raining in {{ $('Weather Node').item.json.city }}! Temperature: {{ $('Weather Node').item.json.main.temp }}°C"
\`\`\`
2. Email subject (PREFERRED):
\`\`\`json
"subject": "=Order #{{ $('Order Node').item.json.orderId }} Status Update"
\`\`\`
3. Image prompt (PREFERRED):
\`\`\`json
"prompt": "=Create an image of {{ $('Location Node').item.json.city }} during {{ $('Weather Node').item.json.weather[0].description }}"
\`\`\`
4. Slack message with multiple data points (PREFERRED):
\`\`\`json
"text": "=Customer {{ $('Customer Data').item.json.name }} has placed order #{{ $('Order Data').item.json.id }} for {{ $('Order Data').item.json.amount }}€"
\`\`\`
5. HTTP request URL (PREFERRED):
\`\`\`json
"url": "=https://api.example.com/users/{{ $('User Data').item.json.id }}/orders?status={{ $('Filter').item.json.status }}"
\`\`\`
### COMMON MISTAKES TO AVOID:
- INCORRECT: "text": "ALERT: Temperature is {{ $('Weather Node').item.json.temp }}°C" (missing = prefix)
- INCORRECT: "text": "={{ $('Weather Node').item.json.temp }}" (using expression for dynamic part only)
- INCORRECT: "text": "={{ $('⚠️ Weather').item.json.temp }}" (emoji in node name)
- INCORRECT: "text": "={{ 'ALERT' }} {{ $('Weather').item.json.city }}" (mixing methods)
## Output Format
Return valid JSON that can be consumed by the n8n platform. Your response must match the tool's required schema.`);
const humanTemplate = `
<user_workflow_prompt>
{user_workflow_prompt}
</user_workflow_prompt>
<selected_n8n_nodes>
{nodes}
</selected_n8n_nodes>
`;
export const nodesComposerPrompt = ChatPromptTemplate.fromMessages([
systemPrompt,
HumanMessagePromptTemplate.fromTemplate(humanTemplate),
]);
const nodeConfigSchema = z.object({
nodes: z
.array(
z
.object({
parameters: z
.record(z.string(), z.any())
.describe(
"The node's configuration parameters. Must include all required parameters for the node type to function properly. For expressions referencing other nodes, use the format: \"={{ $('Node Name').item.json.field }}\"",
)
.refine((data) => Object.keys(data).length > 0, {
message: 'Parameters cannot be empty',
}),
type: z
.string()
.describe('The full node type identifier (e.g., "n8n-nodes-base.httpRequest")'),
name: z
.string()
.describe(
'A descriptive name for the node that clearly indicates its purpose in the workflow',
),
})
.describe('A complete n8n node configuration'),
)
.describe('Array of all nodes for the workflow with their complete configurations'),
});
const generateNodeConfigTool = new DynamicStructuredTool({
name: 'generate_n8n_nodes',
description:
'Generate fully configured n8n nodes with appropriate parameters based on the workflow requirements and selected node types.',
schema: nodeConfigSchema,
func: async (input) => {
return { nodes: input.nodes };
},
});
export const nodesComposerChain = (llm: BaseChatModel) => {
if (!llm.bindTools) {
throw new OperationalError("LLM doesn't support binding tools");
}
return nodesComposerPrompt
.pipe(
llm.bindTools([generateNodeConfigTool], {
tool_choice: generateNodeConfigTool.name,
}),
)
.pipe((x: AIMessageChunk) => {
const toolCall = x.tool_calls?.[0];
return (toolCall?.args as z.infer<typeof nodeConfigSchema>).nodes;
});
};

View File

@@ -1,75 +0,0 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { AIMessageChunk } from '@langchain/core/messages';
import { SystemMessage } from '@langchain/core/messages';
import { ChatPromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts';
import { DynamicStructuredTool } from '@langchain/core/tools';
import { OperationalError } from 'n8n-workflow';
import { z } from 'zod';
const validatorPrompt = new SystemMessage(
`You are a workflow prompt validator for n8n. You need to analyze the user's prompt and determine
if they're actually trying to build a workflow that connects different online services or automates a task.
A workflow prompt should:
- Describe an automation or integration task
- Potentially mention connecting services (like Google Sheets, Slack, etc.)
- Describe a process that could be broken down into steps
- Mention something that could be automated
Examples of VALID workflow prompts:
- "Create a workflow that sends a Slack message when a new row is added to Google Sheets"
- "I want to automatically save Gmail attachments to Dropbox"
- "Build a workflow that posts new Twitter mentions to a Discord channel"
- "When I get a new lead in my CRM, add them to my email marketing list"
Examples of INVALID workflow prompts:
- "What's the weather like today?"
- "Tell me a joke"
- "What is n8n?"
- "Help me fix my computer"
- "What time is it?"
Analyze the prompt and determine if it's a valid workflow prompt. Respond with just true or false.`,
);
const validatorSchema = z.object({
isWorkflowPrompt: z.boolean(),
});
const validatorTool = new DynamicStructuredTool({
name: 'validate_prompt',
description: 'Validate if the user prompt is a workflow prompt',
schema: validatorSchema,
func: async ({ isWorkflowPrompt }) => {
return { isWorkflowPrompt };
},
});
const humanTemplate = `
<user_prompt>
{prompt}
</user_prompt>
`;
const chatPrompt = ChatPromptTemplate.fromMessages([
validatorPrompt,
HumanMessagePromptTemplate.fromTemplate(humanTemplate),
]);
export const validatorChain = (llm: BaseChatModel) => {
if (!llm.bindTools) {
throw new OperationalError("LLM doesn't support binding tools");
}
return chatPrompt
.pipe(
llm.bindTools([validatorTool], {
tool_choice: validatorTool.name,
}),
)
.pipe((x: AIMessageChunk) => {
const toolCall = x.tool_calls?.[0];
return (toolCall?.args as z.infer<typeof validatorTool.schema>).isWorkflowPrompt;
});
};

View File

@@ -1,113 +0,0 @@
import type { IWorkflowBase } from 'n8n-workflow';
export type SimpleWorkflow = Pick<IWorkflowBase, 'nodes' | 'connections'>;
export interface CodeDiffMessage {
role: 'assistant';
type: 'code-diff';
description?: string;
codeDiff?: string;
suggestionId: string;
solution_count: number;
}
export interface QuickReplyOption {
text: string;
type: string;
isFeedback?: boolean;
}
export interface AssistantChatMessage {
role: 'assistant';
type: 'message';
text: string;
step?: string;
codeSnippet?: string;
}
export interface AssistantSummaryMessage {
role: 'assistant';
type: 'summary';
title: string;
content: string;
}
export interface EndSessionMessage {
role: 'assistant';
type: 'event';
eventName: 'end-session';
}
export interface AgentChatMessage {
role: 'assistant';
type: 'agent-suggestion';
title: string;
text: string;
}
export interface AgentThinkingStep {
role: 'assistant';
type: 'intermediate-step';
text: string;
step: string;
}
export interface WorkflowStepMessage {
role: 'assistant';
type: 'workflow-step';
steps: string[];
id: string;
read: boolean;
}
export interface WorkflowNodeMessage {
role: 'assistant';
type: 'workflow-node';
nodes: string[];
id: string;
read: boolean;
}
export interface WorkflowComposedMessage {
role: 'assistant';
type: 'workflow-composed';
nodes: Array<{
parameters: Record<string, unknown>;
type: string;
name: string;
position: [number, number];
}>;
id: string;
read: boolean;
}
export interface WorkflowConnectionsMessage {
role: 'assistant';
type: 'workflow-connections';
workflowJSON: SimpleWorkflow;
id: string;
read: boolean;
}
export interface PromptValidationMessage {
role: 'assistant';
type: 'prompt-validation';
isWorkflowPrompt: boolean;
id: string;
}
export type MessageResponse =
| ((
| AssistantChatMessage
| CodeDiffMessage
| AssistantSummaryMessage
| AgentChatMessage
| AgentThinkingStep
| WorkflowStepMessage
| WorkflowNodeMessage
| WorkflowComposedMessage
| WorkflowConnectionsMessage
| PromptValidationMessage
) & {
quickReplies?: QuickReplyOption[];
})
| EndSessionMessage;

View File

@@ -1,24 +0,0 @@
import type { BaseMessage } from '@langchain/core/messages';
import { Annotation, END } from '@langchain/langgraph';
import type { SimpleWorkflow } from './types';
export const WorkflowState = Annotation.Root({
messages: Annotation<BaseMessage[]>({
reducer: (x, y) => x.concat(y),
}),
// The original prompt from the user.
prompt: Annotation<string>({ reducer: (x, y) => y ?? x ?? '' }),
// The list of logically derived workflow steps.
steps: Annotation<string[]>({ reducer: (x, y) => y ?? x ?? [] }),
// The list of candidate or selected n8n node names.
nodes: Annotation<string[]>({ reducer: (x, y) => y ?? x ?? [] }),
// The JSON representation of the workflow being built.
workflowJSON: Annotation<SimpleWorkflow>({
reducer: (x, y) => y ?? x ?? { nodes: [], connections: {} },
}),
// Whether the user prompt is a workflow prompt.
isWorkflowPrompt: Annotation<boolean>({ reducer: (x, y) => y ?? x ?? false }),
// The next phase to be executed in the workflow graph.
next: Annotation<string>({ reducer: (x, y) => y ?? x ?? END, default: () => END }),
});

View File

@@ -1,8 +1,43 @@
import type { IRunExecutionData, IWorkflowBase, NodeExecutionSchema } from 'n8n-workflow';
import { z } from 'zod';
import { Z } from 'zod-class';
export class AiBuilderChatRequestDto extends Z.class({
payload: z.object({
question: z.string(),
role: z.literal('user'),
type: z.literal('message'),
text: z.string(),
workflowContext: z.object({
currentWorkflow: z
.custom<Partial<IWorkflowBase>>((val: Partial<IWorkflowBase>) => {
if (!val.nodes && !val.connections) {
return false;
}
return val;
})
.optional(),
executionData: z
.custom<IRunExecutionData['resultData']>((val: IRunExecutionData['resultData']) => {
if (!val.runData && !val.error) {
return false;
}
return val;
})
.optional(),
executionSchema: z
.custom<NodeExecutionSchema[]>((val: NodeExecutionSchema[]) => {
// Check if the array is empty or if all items have nodeName and schema properties
if (!Array.isArray(val) || val.every((item) => !item.nodeName || !item.schema)) {
return false;
}
return val;
})
.optional(),
}),
}),
}) {}

View File

@@ -0,0 +1,6 @@
import { z } from 'zod';
import { Z } from 'zod-class';
export class AiSessionRetrievalRequestDto extends Z.class({
workflowId: z.string().optional(),
}) {}

View File

@@ -3,6 +3,7 @@ export { AiChatRequestDto } from './ai/ai-chat-request.dto';
export { AiBuilderChatRequestDto } from './ai/ai-build-request.dto';
export { AiApplySuggestionRequestDto } from './ai/ai-apply-suggestion-request.dto';
export { AiFreeCreditsRequestDto } from './ai/ai-free-credits-request.dto';
export { AiSessionRetrievalRequestDto } from './ai/ai-session-retrieval-request.dto';
export { BinaryDataQueryDto } from './binary-data/binary-data-query.dto';
export { BinaryDataSignedQueryDto } from './binary-data/binary-data-signed-query.dto';

View File

@@ -1,47 +1,34 @@
import { AiWorkflowBuilderService } from '@n8n/ai-workflow-builder';
import { Command } from '@n8n/decorators';
import { Container } from '@n8n/di';
import fs from 'fs';
import { jsonParse, UserError } from 'n8n-workflow';
import { z } from 'zod';
import { NodeTypes } from '@/node-types';
import { WorkerPool } from './worker-pool';
import { BaseCommand } from '../base-command';
interface WorkflowGeneratedMessage {
role: 'assistant';
type: 'workflow-generated';
codeSnippet: string;
}
// interface WorkflowGenerationDatasetItem {
// prompt: string;
// referenceWorkflow: string;
// }
// We'll use this later for evals
// async function _waitForWorkflowGenerated(aiResponse: AsyncGenerator<{ messages: any[] }>) {
// let workflowJson: string | undefined;
interface WorkflowGenerationDatasetItem {
prompt: string;
referenceWorkflow: string;
}
// for await (const chunk of aiResponse) {
// const wfGeneratedMessage = chunk.messages.find(
// (m): m is WorkflowGeneratedMessage =>
// 'type' in m && (m as { type?: string }).type === 'workflow-generated',
// );
async function waitForWorkflowGenerated(aiResponse: AsyncGenerator<{ messages: any[] }>) {
let workflowJson: string | undefined;
// if (wfGeneratedMessage?.codeSnippet) {
// workflowJson = wfGeneratedMessage.codeSnippet;
// }
// }
for await (const chunk of aiResponse) {
const wfGeneratedMessage = chunk.messages.find(
(m): m is WorkflowGeneratedMessage =>
'type' in m && (m as { type?: string }).type === 'workflow-generated',
);
// if (!workflowJson) {
// // FIXME: Use proper error class
// throw new UserError('No workflow generated message found in AI response');
// }
if (wfGeneratedMessage?.codeSnippet) {
workflowJson = wfGeneratedMessage.codeSnippet;
}
}
if (!workflowJson) {
// FIXME: Use proper error class
throw new UserError('No workflow generated message found in AI response');
}
return workflowJson;
}
// return workflowJson;
// }
const flagsSchema = z.object({
prompt: z
@@ -86,135 +73,139 @@ export class TTWFGenerateCommand extends BaseCommand<z.infer<typeof flagsSchema>
/**
* Reads the dataset file in JSONL format
*/
private async readDataset(filePath: string): Promise<WorkflowGenerationDatasetItem[]> {
try {
const data = await fs.promises.readFile(filePath, { encoding: 'utf-8' });
// We'll use this later for evals
// private async readDataset(filePath: string): Promise<WorkflowGenerationDatasetItem[]> {
// try {
// const data = await fs.promises.readFile(filePath, { encoding: 'utf-8' });
const lines = data.split('\n').filter((line) => line.trim() !== '');
// const lines = data.split('\n').filter((line) => line.trim() !== '');
if (lines.length === 0) {
throw new UserError('Dataset file is empty or contains no valid lines');
}
// if (lines.length === 0) {
// throw new UserError('Dataset file is empty or contains no valid lines');
// }
return lines.map((line, index) => {
try {
return jsonParse<WorkflowGenerationDatasetItem>(line);
} catch (error) {
throw new UserError(`Invalid JSON line on index: ${index}`);
}
});
} catch (error) {
throw new UserError(`Failed to read dataset file: ${error}`);
}
}
// return lines.map((line, index) => {
// try {
// return jsonParse<WorkflowGenerationDatasetItem>(line);
// } catch (error) {
// throw new UserError(`Invalid JSON line on index: ${index}`);
// }
// });
// } catch (error) {
// throw new UserError(`Failed to read dataset file: ${error}`);
// }
// }
async run() {
const { flags } = this;
if (!flags.input && !flags.prompt) {
throw new UserError('Either --input or --prompt must be provided.');
}
if (flags.input && flags.prompt) {
throw new UserError('You cannot use --input and --prompt together. Use one or the other.');
}
const nodeTypes = Container.get(NodeTypes);
const wfBuilder = new AiWorkflowBuilderService(nodeTypes);
if (flags.prompt) {
// Single prompt mode
if (flags.output && fs.existsSync(flags.output)) {
if (fs.lstatSync(flags.output).isDirectory()) {
this.logger.info('The parameter --output must be a writeable file');
return;
}
this.logger.warn('The output file already exists. It will be overwritten.');
fs.unlinkSync(flags.output);
}
try {
this.logger.info(`Processing prompt: ${flags.prompt}`);
const aiResponse = wfBuilder.chat({ question: flags.prompt });
const generatedWorkflow = await waitForWorkflowGenerated(aiResponse);
this.logger.info(`Generated workflow for prompt: ${flags.prompt}`);
if (flags.output) {
fs.writeFileSync(flags.output, generatedWorkflow);
this.logger.info(`Workflow saved to ${flags.output}`);
} else {
this.logger.info('Generated Workflow:');
// Pretty print JSON
this.logger.info(JSON.stringify(JSON.parse(generatedWorkflow), null, 2));
}
} catch (e) {
const errorMessage = e instanceof Error ? e.message : 'An error occurred';
this.logger.error(`Error processing prompt "${flags.prompt}": ${errorMessage}`);
}
} else if (flags.input) {
// Batch mode
const output = flags.output ?? 'ttwf-results.jsonl';
if (fs.existsSync(output)) {
if (fs.lstatSync(output).isDirectory()) {
this.logger.info('The parameter --output must be a writeable file');
return;
}
this.logger.warn('The output file already exists. It will be overwritten.');
fs.unlinkSync(output);
}
const pool = new WorkerPool<string>(flags.concurrency ?? 1);
const dataset = await this.readDataset(flags.input);
// Open file for writing results
const outputStream = fs.createWriteStream(output, { flags: 'a' });
const datasetWithLimit = (flags.limit ?? -1) > 0 ? dataset.slice(0, flags.limit) : dataset;
await Promise.allSettled(
datasetWithLimit.map(async (item) => {
try {
const generatedWorkflow = await pool.execute(async () => {
this.logger.info(`Processing prompt: ${item.prompt}`);
const aiResponse = wfBuilder.chat({ question: item.prompt });
return await waitForWorkflowGenerated(aiResponse);
});
this.logger.info(`Generated workflow for prompt: ${item.prompt}`);
// Write the generated workflow to the output file
outputStream.write(
JSON.stringify({
prompt: item.prompt,
generatedWorkflow,
referenceWorkflow: item.referenceWorkflow,
}) + '\n',
);
} catch (e) {
const errorMessage = e instanceof Error ? e.message : 'An error occurred';
this.logger.error(`Error processing prompt "${item.prompt}": ${errorMessage}`);
// Optionally write the error to the output file
outputStream.write(
JSON.stringify({
prompt: item.prompt,
referenceWorkflow: item.referenceWorkflow,
errorMessage,
}) + '\n',
);
}
}),
this.logger.error(
'This command is displayed until all ai-workflow builder related PR are merged',
);
// const { flags } = this;
outputStream.end();
}
// if (!flags.input && !flags.prompt) {
// throw new UserError('Either --input or --prompt must be provided.');
// }
// if (flags.input && flags.prompt) {
// throw new UserError('You cannot use --input and --prompt together. Use one or the other.');
// }
// const nodeTypes = Container.get(NodeTypes);
// const wfBuilder = new AiWorkflowBuilderService(nodeTypes);
// if (flags.prompt) {
// // Single prompt mode
// if (flags.output && fs.existsSync(flags.output)) {
// if (fs.lstatSync(flags.output).isDirectory()) {
// this.logger.info('The parameter --output must be a writeable file');
// return;
// }
// this.logger.warn('The output file already exists. It will be overwritten.');
// fs.unlinkSync(flags.output);
// }
// try {
// this.logger.info(`Processing prompt: ${flags.prompt}`);
// const aiResponse = wfBuilder.chat({ question: flags.prompt });
// const generatedWorkflow = await waitForWorkflowGenerated(aiResponse);
// this.logger.info(`Generated workflow for prompt: ${flags.prompt}`);
// if (flags.output) {
// fs.writeFileSync(flags.output, generatedWorkflow);
// this.logger.info(`Workflow saved to ${flags.output}`);
// } else {
// this.logger.info('Generated Workflow:');
// // Pretty print JSON
// this.logger.info(JSON.stringify(JSON.parse(generatedWorkflow), null, 2));
// }
// } catch (e) {
// const errorMessage = e instanceof Error ? e.message : 'An error occurred';
// this.logger.error(`Error processing prompt "${flags.prompt}": ${errorMessage}`);
// }
// } else if (flags.input) {
// // Batch mode
// const output = flags.output ?? 'ttwf-results.jsonl';
// if (fs.existsSync(output)) {
// if (fs.lstatSync(output).isDirectory()) {
// this.logger.info('The parameter --output must be a writeable file');
// return;
// }
// this.logger.warn('The output file already exists. It will be overwritten.');
// fs.unlinkSync(output);
// }
// const pool = new WorkerPool<string>(flags.concurrency ?? 1);
// const dataset = await this.readDataset(flags.input);
// // Open file for writing results
// const outputStream = fs.createWriteStream(output, { flags: 'a' });
// const datasetWithLimit = (flags.limit ?? -1) > 0 ? dataset.slice(0, flags.limit) : dataset;
// await Promise.allSettled(
// datasetWithLimit.map(async (item) => {
// try {
// const generatedWorkflow = await pool.execute(async () => {
// this.logger.info(`Processing prompt: ${item.prompt}`);
// const aiResponse = wfBuilder.chat({ question: item.prompt });
// return await waitForWorkflowGenerated(aiResponse);
// });
// this.logger.info(`Generated workflow for prompt: ${item.prompt}`);
// // Write the generated workflow to the output file
// outputStream.write(
// JSON.stringify({
// prompt: item.prompt,
// generatedWorkflow,
// referenceWorkflow: item.referenceWorkflow,
// }) + '\n',
// );
// } catch (e) {
// const errorMessage = e instanceof Error ? e.message : 'An error occurred';
// this.logger.error(`Error processing prompt "${item.prompt}": ${errorMessage}`);
// // Optionally write the error to the output file
// outputStream.write(
// JSON.stringify({
// prompt: item.prompt,
// referenceWorkflow: item.referenceWorkflow,
// errorMessage,
// }) + '\n',
// );
// }
// }),
// );
// outputStream.end();
// }
}
async catch(error: Error) {

View File

@@ -2,6 +2,7 @@ import type {
AiAskRequestDto,
AiApplySuggestionRequestDto,
AiChatRequestDto,
AiBuilderChatRequestDto,
} from '@n8n/api-types';
import type { AuthenticatedRequest } from '@n8n/db';
import type { AiAssistantSDK } from '@n8n_io/ai-assistant-sdk';
@@ -27,6 +28,7 @@ describe('AiController', () => {
jest.clearAllMocks();
response.header.mockReturnThis();
response.status.mockReturnThis();
});
describe('chat', () => {
@@ -110,4 +112,134 @@ describe('AiController', () => {
);
});
});
describe('build', () => {
const payload: AiBuilderChatRequestDto = {
payload: {
text: 'Create a workflow',
type: 'message',
role: 'user',
workflowContext: {
currentWorkflow: { id: 'workflow123' },
},
},
};
it('should handle build request successfully', async () => {
const mockChunks = [
{ messages: [{ role: 'assistant', type: 'message', text: 'Building...' } as const] },
{ messages: [{ role: 'assistant', type: 'workflow-updated', codeSnippet: '{}' } as const] },
];
// Create an async generator that yields chunks
async function* mockChatGenerator() {
for (const chunk of mockChunks) {
yield chunk;
}
}
workflowBuilderService.chat.mockReturnValue(mockChatGenerator());
await controller.build(request, response, payload);
expect(workflowBuilderService.chat).toHaveBeenCalledWith(
{
message: 'Create a workflow',
workflowContext: {
currentWorkflow: { id: 'workflow123' },
executionData: undefined,
executionSchema: undefined,
},
},
request.user,
);
expect(response.header).toHaveBeenCalledWith('Content-type', 'application/json-lines');
expect(response.flush).toHaveBeenCalled();
expect(response.write).toHaveBeenCalledTimes(2);
expect(response.write).toHaveBeenNthCalledWith(
1,
JSON.stringify(mockChunks[0]) + '⧉⇋⇋➽⌑⧉§§\n',
);
expect(response.write).toHaveBeenNthCalledWith(
2,
JSON.stringify(mockChunks[1]) + '⧉⇋⇋➽⌑⧉§§\n',
);
expect(response.end).toHaveBeenCalled();
});
it('should handle errors during streaming and send error chunk', async () => {
const mockError = new Error('Tool execution failed');
// Create an async generator that throws an error
async function* mockChatGeneratorWithError() {
yield { messages: [{ role: 'assistant', type: 'message', text: 'Starting...' } as const] };
throw mockError;
}
workflowBuilderService.chat.mockReturnValue(mockChatGeneratorWithError());
await controller.build(request, response, payload);
expect(workflowBuilderService.chat).toHaveBeenCalled();
expect(response.header).toHaveBeenCalledWith('Content-type', 'application/json-lines');
expect(response.write).toHaveBeenCalledTimes(2);
// First chunk
expect(response.write).toHaveBeenNthCalledWith(
1,
JSON.stringify({
messages: [{ role: 'assistant', type: 'message', text: 'Starting...' }],
}) + '⧉⇋⇋➽⌑⧉§§\n',
);
// Error chunk
expect(response.write).toHaveBeenNthCalledWith(
2,
JSON.stringify({
messages: [
{
role: 'assistant',
type: 'error',
content: 'Tool execution failed',
},
],
}) + '⧉⇋⇋➽⌑⧉§§\n',
);
expect(response.end).toHaveBeenCalled();
});
it('should handle errors before streaming starts', async () => {
const mockError = new Error('Failed to initialize');
workflowBuilderService.chat.mockImplementation(() => {
throw mockError;
});
response.headersSent = false;
await controller.build(request, response, payload);
expect(response.status).toHaveBeenCalledWith(500);
expect(response.json).toHaveBeenCalledWith({
code: 500,
message: 'Failed to initialize',
});
expect(response.write).not.toHaveBeenCalled();
expect(response.end).not.toHaveBeenCalled();
});
it('should not try to send error response if headers already sent', async () => {
const mockError = new Error('Failed after headers');
workflowBuilderService.chat.mockImplementation(() => {
throw mockError;
});
response.headersSent = true;
await controller.build(request, response, payload);
expect(response.status).not.toHaveBeenCalled();
expect(response.json).not.toHaveBeenCalled();
expect(response.end).toHaveBeenCalled();
});
});
});

View File

@@ -5,6 +5,7 @@ import {
AiAskRequestDto,
AiFreeCreditsRequestDto,
AiBuilderChatRequestDto,
AiSessionRetrievalRequestDto,
} from '@n8n/api-types';
import { AuthenticatedRequest } from '@n8n/db';
import { Body, Post, RestController } from '@n8n/decorators';
@@ -35,32 +36,69 @@ export class AiController {
private readonly userService: UserService,
) {}
@Post('/build', { rateLimit: { limit: 100 } })
// Use usesTemplates flag to bypass the send() wrapper which would cause
// "Cannot set headers after they are sent" error for streaming responses.
// This ensures errors during streaming are handled within the stream itself.
@Post('/build', { rateLimit: { limit: 100 }, usesTemplates: true })
async build(
req: AuthenticatedRequest,
res: FlushableResponse,
@Body payload: AiBuilderChatRequestDto,
) {
try {
const { text, workflowContext } = payload.payload;
const aiResponse = this.workflowBuilderService.chat(
{
question: payload.payload.question ?? '',
message: text,
workflowContext: {
currentWorkflow: workflowContext.currentWorkflow,
executionData: workflowContext.executionData,
executionSchema: workflowContext.executionSchema,
},
},
req.user,
);
res.header('Content-type', 'application/json-lines').flush();
try {
// Handle the stream
for await (const chunk of aiResponse) {
res.flush();
res.write(JSON.stringify(chunk) + '⧉⇋⇋➽⌑⧉§§\n');
}
} catch (streamError) {
// If an error occurs during streaming, send it as part of the stream
// This prevents "Cannot set headers after they are sent" error
assert(streamError instanceof Error);
// Send error as proper error type now that frontend supports it
const errorChunk = {
messages: [
{
role: 'assistant',
type: 'error',
content: streamError.message,
},
],
};
res.write(JSON.stringify(errorChunk) + '⧉⇋⇋➽⌑⧉§§\n');
}
res.end();
} catch (e) {
// This catch block handles errors that occur before streaming starts
// Since headers haven't been sent yet, we can still send a proper error response
assert(e instanceof Error);
throw new InternalServerError(e.message, e);
if (!res.headersSent) {
res.status(500).json({
code: 500,
message: e.message,
});
} else {
// If headers were already sent dont't send a second error response
res.end();
}
}
}
@@ -157,4 +195,19 @@ export class AiController {
throw new InternalServerError(e.message, e);
}
}
@Post('/sessions', { rateLimit: { limit: 100 } })
async getSessions(
req: AuthenticatedRequest,
_: Response,
@Body payload: AiSessionRetrievalRequestDto,
) {
try {
const sessions = await this.workflowBuilderService.getSessions(payload.workflowId, req.user);
return sessions;
} catch (e) {
assert(e instanceof Error);
throw new InternalServerError(e.message, e);
}
}
}

View File

@@ -1,4 +1,6 @@
import { AiWorkflowBuilderService } from '@n8n/ai-workflow-builder';
import { ChatPayload } from '@n8n/ai-workflow-builder/dist/workflow-builder-agent';
import { Logger } from '@n8n/backend-common';
import { GlobalConfig } from '@n8n/config';
import { Service } from '@n8n/di';
import { AiAssistantClient } from '@n8n_io/ai-assistant-sdk';
@@ -20,6 +22,7 @@ export class WorkflowBuilderService {
private readonly nodeTypes: NodeTypes,
private readonly license: License,
private readonly config: GlobalConfig,
private readonly logger: Logger,
) {}
private async getService(): Promise<AiWorkflowBuilderService> {
@@ -40,13 +43,19 @@ export class WorkflowBuilderService {
});
}
this.service = new AiWorkflowBuilderService(this.nodeTypes, client);
this.service = new AiWorkflowBuilderService(this.nodeTypes, client, this.logger);
}
return this.service;
}
async *chat(payload: { question: string }, user: IUser) {
async *chat(payload: ChatPayload, user: IUser) {
const service = await this.getService();
yield* service.chat(payload, user);
}
async getSessions(workflowId: string | undefined, user: IUser) {
const service = await this.getService();
const sessions = await service.getSessions(workflowId, user);
return sessions;
}
}

View File

@@ -399,73 +399,147 @@ RichTextMessage.args = {
]),
};
export const WorkflowStepsChat = Template.bind({});
WorkflowStepsChat.args = {
export const TextMessageWithRegularRating = Template.bind({});
TextMessageWithRegularRating.args = {
user: {
firstName: 'Max',
lastName: 'Test',
},
messages: getMessages([
{
id: '123',
type: 'workflow-step',
id: '127',
type: 'text',
role: 'assistant',
steps: [
'Create a new HTTP Trigger node',
'Add a Transform node to process the data',
'Connect to your database using PostgreSQL node',
'Send confirmation email with SendGrid node',
],
content:
"I've generated a workflow that automatically processes your CSV files and sends email notifications. The workflow includes error handling and data validation steps.",
read: false,
showRating: true,
ratingStyle: 'regular',
showFeedback: true,
},
]),
};
export const WorkflowNodesChat = Template.bind({});
WorkflowNodesChat.args = {
export const TextMessageWithMinimalRating = Template.bind({});
TextMessageWithMinimalRating.args = {
user: {
firstName: 'Max',
lastName: 'Test',
},
messages: getMessages([
{
id: '124',
type: 'workflow-node',
id: '128',
type: 'text',
role: 'assistant',
nodes: ['HTTP Trigger', 'Transform', 'PostgreSQL', 'SendGrid'],
content:
"Here's a quick tip: You can use the Code node to transform data between different formats.",
read: false,
showRating: true,
ratingStyle: 'minimal',
showFeedback: true,
},
]),
};
export const ComposedNodesChat = Template.bind({});
ComposedNodesChat.args = {
export const MultipleMessagesWithRatings = Template.bind({});
MultipleMessagesWithRatings.args = {
user: {
firstName: 'Max',
lastName: 'Test',
},
messages: getMessages([
{
id: '125',
type: 'workflow-composed',
id: '129',
type: 'text',
role: 'user',
content: 'Can you help me create a workflow for processing webhooks?',
read: true,
},
{
id: '130',
type: 'text',
role: 'assistant',
nodes: [
{
name: 'HTTP Trigger',
type: 'n8n-nodes-base.httpTrigger',
parameters: {
path: '/webhook',
authentication: 'none',
},
position: [100, 100],
content: "I'll help you create a webhook processing workflow. Here are the steps:",
read: true,
showRating: true,
ratingStyle: 'minimal',
showFeedback: true,
},
{
name: 'Transform',
type: 'n8n-nodes-base.set',
parameters: {
values: { field: 'value' },
id: '131',
type: 'text',
role: 'assistant',
content: `Follow these steps:
1. Add a Webhook node to receive incoming data
2. Use a Switch node to route based on webhook type
3. Add data transformation with a Code node
4. Store results in your database`,
read: true,
},
position: [300, 100],
{
id: '132',
type: 'text',
role: 'assistant',
content:
'This workflow will handle incoming webhooks efficiently and store the processed data.',
read: false,
showRating: true,
ratingStyle: 'regular',
showFeedback: true,
},
]),
};
export const CodeDiffWithMinimalRating = Template.bind({});
CodeDiffWithMinimalRating.args = {
user: {
firstName: 'Max',
lastName: 'Test',
},
messages: getMessages([
{
id: '133',
type: 'code-diff',
role: 'assistant',
description: 'Fix the error handling in your code',
codeDiff:
'@@ -1,3 +1,8 @@\\n const data = await fetchData();\\n-return data;\\n+\\n+if (!data || data.error) {\\n+ throw new Error(data?.error || "Failed to fetch data");\\n+}\\n+\\n+return data;',
suggestionId: 'fix_error_handling',
read: false,
showRating: true,
ratingStyle: 'minimal',
showFeedback: true,
},
]),
};
export const ToolMessageRunning = Template.bind({});
ToolMessageRunning.args = {
user: {
firstName: 'Max',
lastName: 'Test',
},
messages: getMessages([
{
id: '127',
type: 'tool',
role: 'assistant',
toolName: 'code_tool',
toolCallId: 'call_123',
status: 'running',
updates: [
{
type: 'progress',
data: { message: 'Analyzing the codebase structure...' },
timestamp: new Date().toISOString(),
},
{
type: 'input',
data: {
query: 'Find all Vue components in the project',
path: '/src/components',
},
timestamp: new Date().toISOString(),
},
],
read: false,
@@ -473,18 +547,169 @@ ComposedNodesChat.args = {
]),
};
export const RateWorkflowMessage = Template.bind({});
RateWorkflowMessage.args = {
export const ToolMessageCompleted = Template.bind({});
ToolMessageCompleted.args = {
user: {
firstName: 'Max',
lastName: 'Test',
},
messages: getMessages([
{
id: '126',
type: 'rate-workflow',
id: '128',
type: 'tool',
role: 'assistant',
content: 'Is this workflow helpful?',
toolName: 'search_files',
toolCallId: 'call_456',
status: 'completed',
updates: [
{
type: 'input',
data: {
pattern: '*.vue',
directory: '/src',
},
timestamp: new Date().toISOString(),
},
{
type: 'progress',
data: { message: 'Searching for Vue files...' },
timestamp: new Date().toISOString(),
},
{
type: 'output',
data: {
files: [
'/src/components/Button.vue',
'/src/components/Modal.vue',
'/src/views/Home.vue',
],
count: 3,
},
timestamp: new Date().toISOString(),
},
],
read: false,
},
]),
};
export const ToolMessageError = Template.bind({});
ToolMessageError.args = {
user: {
firstName: 'Max',
lastName: 'Test',
},
messages: getMessages([
{
id: '129',
type: 'tool',
role: 'assistant',
toolName: 'database_query',
toolCallId: 'call_789',
status: 'error',
updates: [
{
type: 'input',
data: {
query: 'SELECT * FROM users WHERE id = 123',
database: 'production',
},
timestamp: new Date().toISOString(),
},
{
type: 'progress',
data: { message: 'Connecting to database...' },
timestamp: new Date().toISOString(),
},
{
type: 'error',
data: {
error: 'Connection timeout',
details: 'Failed to connect to database after 30 seconds',
},
timestamp: new Date().toISOString(),
},
],
read: false,
},
]),
};
export const MixedMessagesWithTools = Template.bind({});
MixedMessagesWithTools.args = {
user: {
firstName: 'Max',
lastName: 'Test',
},
messages: getMessages([
{
id: '130',
type: 'text',
role: 'user',
content: 'Can you help me analyze my workflow?',
read: true,
},
{
id: '131',
type: 'text',
role: 'assistant',
content: "I'll analyze your workflow now. Let me search for the relevant files.",
read: true,
},
{
id: '132',
type: 'tool',
role: 'assistant',
toolName: 'search_workflow_files',
toolCallId: 'call_999',
status: 'completed',
updates: [
{
type: 'input',
data: {
workflowId: 'wf_123',
includeNodes: true,
},
timestamp: new Date().toISOString(),
},
{
type: 'progress',
data: { message: 'Loading workflow configuration...' },
timestamp: new Date().toISOString(),
},
{
type: 'progress',
data: { message: 'Analyzing node connections...' },
timestamp: new Date().toISOString(),
},
{
type: 'output',
data: {
nodes: 5,
connections: 8,
issues: ['Missing error handling in HTTP node', 'Unused variable in Code node'],
},
timestamp: new Date().toISOString(),
},
],
read: true,
},
{
id: '133',
type: 'text',
role: 'assistant',
content: 'I found some issues in your workflow. Here are my recommendations:',
read: true,
},
{
id: '134',
type: 'code-diff',
role: 'assistant',
description: 'Add error handling to your HTTP node',
codeDiff:
// eslint-disable-next-line n8n-local-rules/no-interpolation-in-regular-string
'@@ -1,3 +1,8 @@\n const response = await $http.request(options);\n-return response.data;\n+\n+if (response.status !== 200) {\n+ throw new Error(`HTTP request failed with status ${response.status}`);\n+}\n+\n+return response.data;',
suggestionId: 'fix_http_error',
read: false,
},
]),

View File

@@ -1,18 +1,9 @@
<script setup lang="ts">
import { computed, ref } from 'vue';
import { computed, nextTick, ref, watch } from 'vue';
import BlockMessage from './messages/BlockMessage.vue';
import CodeDiffMessage from './messages/CodeDiffMessage.vue';
import ErrorMessage from './messages/ErrorMessage.vue';
import EventMessage from './messages/EventMessage.vue';
import TextMessage from './messages/TextMessage.vue';
import ComposedNodesMessage from './messages/workflow/ComposedNodesMessage.vue';
import RateWorkflowMessage from './messages/workflow/RateWorkflowMessage.vue';
import WorkflowGeneratedMessage from './messages/workflow/WorkflowGeneratedMessage.vue';
import WorkflowNodesMessage from './messages/workflow/WorkflowNodesMessage.vue';
import WorkflowStepsMessage from './messages/workflow/WorkflowStepsMessage.vue';
import MessageWrapper from './messages/MessageWrapper.vue';
import { useI18n } from '../../composables/useI18n';
import type { ChatUI } from '../../types/assistant';
import type { ChatUI, RatingFeedback } from '../../types/assistant';
import AssistantIcon from '../AskAssistantIcon/AssistantIcon.vue';
import AssistantLoadingMessage from '../AskAssistantLoadingMessage/AssistantLoadingMessage.vue';
import AssistantText from '../AskAssistantText/AssistantText.vue';
@@ -36,6 +27,7 @@ interface Props {
sessionId?: string;
title?: string;
placeholder?: string;
scrollOnNewMessage?: boolean;
}
const emit = defineEmits<{
@@ -43,9 +35,7 @@ const emit = defineEmits<{
message: [string, string?, boolean?];
codeReplace: [number];
codeUndo: [number];
thumbsUp: [];
thumbsDown: [];
submitFeedback: [string];
feedback: [RatingFeedback];
}>();
const onClose = () => emit('close');
@@ -59,11 +49,22 @@ const props = withDefaults(defineProps<Props>(), {
messages: () => [],
loadingMessage: undefined,
sessionId: undefined,
scrollOnNewMessage: false,
});
// Ensure all messages have required id and read properties
const normalizedMessages = computed(() => {
return props.messages.map((msg, index) => ({
...msg,
id: msg.id || `msg-${index}`,
read: msg.read ?? true,
}));
});
const textInputValue = ref<string>('');
const chatInput = ref<HTMLTextAreaElement | null>(null);
const messagesRef = ref<HTMLDivElement | null>(null);
const sessionEnded = computed(() => {
return isEndOfSessionEvent(props.messages?.[props.messages.length - 1]);
@@ -101,17 +102,36 @@ function growInput() {
chatInput.value.style.height = `${Math.min(scrollHeight, MAX_CHAT_INPUT_HEIGHT)}px`;
}
function onThumbsUp() {
emit('thumbsUp');
function onRateMessage(feedback: RatingFeedback) {
emit('feedback', feedback);
}
function onThumbsDown() {
emit('thumbsDown');
function scrollToBottom() {
if (messagesRef.value) {
messagesRef.value?.scrollTo({
top: messagesRef.value.scrollHeight,
behavior: 'smooth',
});
}
function onSubmitFeedback(feedback: string) {
emit('submitFeedback', feedback);
}
watch(sendDisabled, () => {
chatInput.value?.focus();
});
watch(
() => props.messages,
async (messages) => {
// Check if the last message is user and scroll to bottom of the chat
if (props.scrollOnNewMessage && messages.length > 0) {
// Wait for DOM updates before scrolling
await nextTick();
// Check if messagesRef is available after nextTick
if (messagesRef.value) {
scrollToBottom();
}
}
},
{ immediate: true, deep: true },
);
</script>
<template>
@@ -129,85 +149,28 @@ function onSubmitFeedback(feedback: string) {
</div>
</div>
<div :class="$style.body">
<div v-if="messages?.length || loadingMessage" :class="$style.messages">
<div v-if="messages?.length">
<div
v-if="normalizedMessages?.length || loadingMessage"
ref="messagesRef"
:class="$style.messages"
>
<div v-if="normalizedMessages?.length">
<data
v-for="(message, i) in messages"
:key="i"
v-for="(message, i) in normalizedMessages"
:key="message.id"
:data-test-id="
message.role === 'assistant' ? 'chat-message-assistant' : 'chat-message-user'
"
>
<TextMessage
v-if="message.type === 'text'"
<MessageWrapper
:message="message"
:is-first-of-role="i === 0 || message.role !== messages[i - 1].role"
:is-first-of-role="i === 0 || message.role !== normalizedMessages[i - 1].role"
:user="user"
:streaming="streaming"
:is-last-message="i === messages.length - 1"
/>
<BlockMessage
v-else-if="message.type === 'block'"
:message="message"
:is-first-of-role="i === 0 || message.role !== messages[i - 1].role"
:user="user"
:streaming="streaming"
:is-last-message="i === messages.length - 1"
/>
<ErrorMessage
v-else-if="message.type === 'error'"
:message="message"
:is-first-of-role="i === 0 || message.role !== messages[i - 1].role"
:user="user"
/>
<EventMessage
v-else-if="message.type === 'event'"
:message="message"
:is-first-of-role="i === 0 || message.role !== messages[i - 1].role"
:user="user"
/>
<CodeDiffMessage
v-else-if="message.type === 'code-diff'"
:message="message"
:is-first-of-role="i === 0 || message.role !== messages[i - 1].role"
:user="user"
:streaming="streaming"
:is-last-message="i === messages.length - 1"
:is-last-message="i === normalizedMessages.length - 1"
@code-replace="() => emit('codeReplace', i)"
@code-undo="() => emit('codeUndo', i)"
/>
<WorkflowStepsMessage
v-else-if="message.type === 'workflow-step'"
:message="message"
:is-first-of-role="i === 0 || message.role !== messages[i - 1].role"
:user="user"
/>
<WorkflowNodesMessage
v-else-if="message.type === 'workflow-node'"
:message="message"
:is-first-of-role="i === 0 || message.role !== messages[i - 1].role"
:user="user"
/>
<ComposedNodesMessage
v-else-if="message.type === 'workflow-composed'"
:message="message"
:is-first-of-role="i === 0 || message.role !== messages[i - 1].role"
:user="user"
/>
<WorkflowGeneratedMessage
v-else-if="message.type === 'workflow-generated'"
:message="message"
:is-first-of-role="i === 0 || message.role !== messages[i - 1].role"
:user="user"
/>
<RateWorkflowMessage
v-else-if="message.type === 'rate-workflow'"
:message="message"
:is-first-of-role="i === 0 || message.role !== messages[i - 1].role"
:user="user"
@thumbs-up="onThumbsUp"
@thumbs-down="onThumbsDown"
@submit-feedback="onSubmitFeedback"
@feedback="onRateMessage"
/>
<div
@@ -215,7 +178,7 @@ function onSubmitFeedback(feedback: string) {
!streaming &&
'quickReplies' in message &&
message.quickReplies?.length &&
i === messages.length - 1
i === normalizedMessages.length - 1
"
:class="$style.quickReplies"
>
@@ -237,7 +200,7 @@ function onSubmitFeedback(feedback: string) {
</div>
<div
v-if="loadingMessage"
:class="{ [$style.message]: true, [$style.loading]: messages?.length }"
:class="{ [$style.message]: true, [$style.loading]: normalizedMessages?.length }"
>
<AssistantLoadingMessage :message="loadingMessage" />
</div>
@@ -355,6 +318,20 @@ function onSubmitFeedback(feedback: string) {
padding: var(--spacing-xs);
overflow-y: auto;
@supports not (selector(::-webkit-scrollbar)) {
scrollbar-width: thin;
}
@supports selector(::-webkit-scrollbar) {
&::-webkit-scrollbar {
width: var(--spacing-2xs);
}
&::-webkit-scrollbar-thumb {
border-radius: var(--spacing-xs);
background: var(--color-foreground-dark);
border: var(--spacing-5xs) solid white;
}
}
& + & {
padding-top: 0;
}

View File

@@ -150,6 +150,7 @@ exports[`AskAssistantChat > does not render retry button if no error is present
<!--v-if-->
</div>
<!--v-if-->
</div>
<!--v-if-->
</data>
@@ -338,6 +339,7 @@ exports[`AskAssistantChat > renders chat with messages correctly 1`] = `
<!--v-if-->
</div>
<!--v-if-->
</div>
<!--v-if-->
</data>
@@ -614,6 +616,7 @@ exports[`AskAssistantChat > renders chat with messages correctly 1`] = `
</div>
</div>
<!--v-if-->
</div>
<!--v-if-->
</data>
@@ -658,6 +661,7 @@ exports[`AskAssistantChat > renders chat with messages correctly 1`] = `
<!--v-if-->
</div>
<!--v-if-->
</div>
<!--v-if-->
</data>
@@ -789,6 +793,7 @@ Testing more code
</div>
</div>
<!--v-if-->
</div>
<!--v-if-->
</data>
@@ -918,6 +923,7 @@ Testing more code
</div>
</div>
<!--v-if-->
</div>
<div
class="quickReplies"
@@ -1331,6 +1337,7 @@ exports[`AskAssistantChat > renders end of session chat correctly 1`] = `
<!--v-if-->
</div>
<!--v-if-->
</div>
<!--v-if-->
</data>
@@ -1339,6 +1346,8 @@ exports[`AskAssistantChat > renders end of session chat correctly 1`] = `
>
<div
class="message"
is-last-message="true"
streaming="false"
>
<!--v-if-->
@@ -1407,6 +1416,7 @@ exports[`AskAssistantChat > renders end of session chat correctly 1`] = `
</span>
</div>
<!--v-if-->
</div>
<!--v-if-->
</data>
@@ -1527,6 +1537,8 @@ exports[`AskAssistantChat > renders error message correctly with retry button 1`
>
<div
class="message"
is-last-message="true"
streaming="false"
>
<div
class="roleName userSection"
@@ -1608,6 +1620,7 @@ exports[`AskAssistantChat > renders error message correctly with retry button 1`
/>
</div>
<!--v-if-->
</div>
<!--v-if-->
</data>
@@ -1866,6 +1879,7 @@ catch(e) {
<!--v-if-->
</div>
<!--v-if-->
</div>
<!--v-if-->
</data>
@@ -2056,6 +2070,7 @@ exports[`AskAssistantChat > renders streaming chat correctly 1`] = `
/>
</div>
<!--v-if-->
</div>
<!--v-if-->
</data>

View File

@@ -1,8 +1,9 @@
<script setup lang="ts">
import { computed } from 'vue';
import MessageRating from './MessageRating.vue';
import { useI18n } from '../../../composables/useI18n';
import type { ChatUI } from '../../../types/assistant';
import type { ChatUI, RatingFeedback } from '../../../types/assistant';
import AssistantAvatar from '../../AskAssistantAvatar/AssistantAvatar.vue';
import N8nAvatar from '../../N8nAvatar';
@@ -16,9 +17,18 @@ interface Props {
}
const props = defineProps<Props>();
const emit = defineEmits<{
feedback: [RatingFeedback];
}>();
const { t } = useI18n();
const isUserMessage = computed(() => props.message.role === 'user');
function onRate(rating: RatingFeedback) {
emit('feedback', rating);
}
</script>
<template>
@@ -37,6 +47,12 @@ const isUserMessage = computed(() => props.message.role === 'user');
</template>
</div>
<slot></slot>
<MessageRating
v-if="message.showRating && !isUserMessage"
:style="message.ratingStyle"
:show-feedback="message.showFeedback"
@feedback="onRate"
/>
</div>
</template>

View File

@@ -1,7 +1,7 @@
<script setup lang="ts">
import BaseMessage from './BaseMessage.vue';
import { useMarkdown } from './useMarkdown';
import type { ChatUI } from '../../../types/assistant';
import type { ChatUI, RatingFeedback } from '../../../types/assistant';
import BlinkingCursor from '../../BlinkingCursor/BlinkingCursor.vue';
interface Props {
@@ -16,11 +16,19 @@ interface Props {
}
defineProps<Props>();
const emit = defineEmits<{
feedback: [RatingFeedback];
}>();
const { renderMarkdown } = useMarkdown();
</script>
<template>
<BaseMessage :message="message" :is-first-of-role="isFirstOfRole" :user="user">
<BaseMessage
:message="message"
:is-first-of-role="isFirstOfRole"
:user="user"
@feedback="(feedback: RatingFeedback) => emit('feedback', feedback)"
>
<div :class="$style.block">
<div :class="$style.blockTitle">
{{ message.title }}

View File

@@ -1,22 +1,10 @@
<script setup lang="ts">
import BaseMessage from './BaseMessage.vue';
import type { ChatUI } from '../../../types/assistant';
import type { ChatUI, RatingFeedback } from '../../../types/assistant';
import CodeDiff from '../../CodeDiff/CodeDiff.vue';
interface Props {
message: {
role: 'assistant';
type: 'code-diff';
description?: string;
codeDiff?: string;
replacing?: boolean;
replaced?: boolean;
error?: boolean;
suggestionId: string;
id: string;
read: boolean;
quickReplies?: ChatUI.QuickReply[];
};
message: ChatUI.CodeDiffMessage & { id: string; read: boolean };
isFirstOfRole: boolean;
user?: {
firstName: string;
@@ -31,11 +19,17 @@ defineProps<Props>();
const emit = defineEmits<{
codeReplace: [];
codeUndo: [];
feedback: [RatingFeedback];
}>();
</script>
<template>
<BaseMessage :message="message" :is-first-of-role="isFirstOfRole" :user="user">
<BaseMessage
:message="message"
:is-first-of-role="isFirstOfRole"
:user="user"
@feedback="(feedback: RatingFeedback) => emit('feedback', feedback)"
>
<CodeDiff
:title="message.description"
:content="message.codeDiff"

Some files were not shown because too many files have changed in this diff Show More