feat: AI Workflow Builder backend (no-changelog) (#14837)

This commit is contained in:
oleg
2025-04-24 08:43:35 +02:00
committed by GitHub
parent 8c4b9f73f1
commit 1b1d6043d6
24 changed files with 1655 additions and 26 deletions

View File

@@ -0,0 +1,18 @@
const sharedOptions = require('@n8n/eslint-config/shared');
/**
* @type {import('@types/eslint').ESLint.ConfigData}
*/
module.exports = {
extends: ['@n8n/eslint-config/node'],
...sharedOptions(__dirname),
ignorePatterns: ['jest.config.js'],
rules: {
'unicorn/filename-case': ['error', { case: 'kebabCase' }],
complexity: 'error',
},
};

View File

@@ -0,0 +1,6 @@
/** @type {import('jest').Config} */
module.exports = {
...require('../../../jest.config'),
setupFilesAfterEnv: ['n8n-workflow/test/setup.ts'],
testTimeout: 10_000,
};

View File

@@ -0,0 +1,43 @@
{
"name": "@n8n/ai-workflow-builder",
"version": "0.1.0",
"scripts": {
"clean": "rimraf dist .turbo",
"typecheck": "tsc --noEmit",
"build": "tsc -p ./tsconfig.build.json && tsc-alias -p tsconfig.build.json",
"format": "biome format --write src",
"format:check": "biome ci src",
"test": "jest",
"test:watch": "jest --watch",
"lint": "eslint . --quiet",
"lintfix": "eslint . --fix",
"watch": "tsc-watch -p tsconfig.build.json --onCompilationComplete \"tsc-alias -p tsconfig.build.json\""
},
"main": "dist/index.js",
"module": "src/index.ts",
"types": "dist/index.d.ts",
"files": [
"dist/**/*"
],
"exports": {
".": {
"require": "./dist/index.js",
"import": "./src/index.ts",
"types": "./dist/index.d.ts"
}
},
"dependencies": {
"@langchain/anthropic": "catalog:",
"@langchain/core": "catalog:",
"@langchain/langgraph": "0.2.45",
"@langchain/openai": "catalog:",
"@n8n/config": "workspace:*",
"@n8n/di": "workspace:*",
"@n8n_io/ai-assistant-sdk": "1.13.0",
"n8n-workflow": "workspace:*",
"zod": "catalog:"
},
"devDependencies": {
"@n8n/typescript-config": "workspace:*"
}
}

View File

@@ -0,0 +1,356 @@
import { dispatchCustomEvent } from '@langchain/core/callbacks/dispatch';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { RunnableConfig } from '@langchain/core/runnables';
import { StateGraph, END, START } from '@langchain/langgraph';
import { GlobalConfig } from '@n8n/config';
import { Service } from '@n8n/di';
import { AiAssistantClient } from '@n8n_io/ai-assistant-sdk';
import { OperationalError, assert, INodeTypes } from 'n8n-workflow';
import type { IUser, INodeTypeDescription, INode } from 'n8n-workflow';
import { connectionComposerChain } from './chains/connection-composer';
import { nodesSelectionChain } from './chains/node-selector';
import { nodesComposerChain } from './chains/nodes-composer';
import { plannerChain } from './chains/planner';
import { ILicenseService } from './interfaces';
import { anthropicClaude37Sonnet, gpt41mini } from './llm-config';
import type { MessageResponse } from './types';
import { WorkflowState } from './workflow-state';
@Service()
export class AiWorkflowBuilderService {
private parsedNodeTypes: INodeTypeDescription[] = [];
private llmSimpleTask: BaseChatModel | undefined;
private llmComplexTask: BaseChatModel | undefined;
private client: AiAssistantClient | undefined;
constructor(
private readonly licenseService: ILicenseService,
private readonly nodeTypes: INodeTypes,
private readonly globalConfig: GlobalConfig,
private readonly n8nVersion: string,
) {
this.parsedNodeTypes = this.getNodeTypes();
}
private async setupModels(user: IUser) {
if (this.llmSimpleTask && this.llmComplexTask) {
return;
}
const baseUrl = this.globalConfig.aiAssistant.baseUrl;
// If base URL is set, use api-proxy to access LLMs
if (baseUrl) {
if (!this.client) {
const licenseCert = await this.licenseService.loadCertStr();
const consumerId = this.licenseService.getConsumerId();
this.client = new AiAssistantClient({
licenseCert,
consumerId,
baseUrl,
n8nVersion: this.n8nVersion,
});
}
assert(this.client, 'Client not setup');
// @ts-expect-error getProxyHeaders will only be available after `@n8n_io/ai-assistant-sdk` v1.14.0 is released
// eslint-disable-next-line @typescript-eslint/no-unsafe-call
const authHeaders = (await this.client?.getProxyHeaders(user)) as Record<string, string>;
this.llmSimpleTask = gpt41mini({
baseUrl: baseUrl + '/v1/api-proxy/openai',
// When using api-proxy the key will be populated automatically, we just need to pass a placeholder
apiKey: '_',
headers: authHeaders,
});
this.llmComplexTask = anthropicClaude37Sonnet({
baseUrl: baseUrl + '/v1/api-proxy/anthropic',
apiKey: '_',
headers: authHeaders,
});
return;
}
// If base URL is not set, use environment variables
this.llmSimpleTask = gpt41mini({
apiKey: process.env.N8N_AI_OPENAI_API_KEY ?? '',
});
this.llmComplexTask = anthropicClaude37Sonnet({
apiKey: process.env.N8N_AI_ANTHROPIC_KEY ?? '',
});
}
private getNodeTypes(): INodeTypeDescription[] {
const nodeTypesKeys = Object.keys(this.nodeTypes.getKnownTypes());
const nodeTypes = nodeTypesKeys
.map((nodeName) => {
return { ...this.nodeTypes.getByNameAndVersion(nodeName).description, name: nodeName };
})
.filter((nodeType) => nodeType.hidden !== true);
return nodeTypes;
}
private isWorkflowEvent(eventName: string): boolean {
return [
'generated_steps',
'generated_nodes',
'composed_nodes',
'composed_connections',
'generated_workflow_json',
].includes(eventName);
}
private getAgent() {
const plannerChainNode = async (
state: typeof WorkflowState.State,
config: RunnableConfig,
): Promise<Partial<typeof WorkflowState.State>> => {
assert(this.llmComplexTask, 'LLM not setup');
const steps = await plannerChain(this.llmComplexTask).invoke(
{
prompt: state.prompt,
},
config,
);
await dispatchCustomEvent('generated_steps', {
role: 'assistant',
type: 'workflow-step',
steps,
id: Date.now().toString(),
read: false,
});
return {
steps,
};
};
const nodeSelectionChainNode = async (
state: typeof WorkflowState.State,
config: RunnableConfig,
) => {
assert(this.llmSimpleTask, 'LLM not setup');
const getNodeMessage = (node: INodeTypeDescription) => {
return `
<node_name>${node.name}</node_name>
<node_description>${node.description}</node_description>
`;
};
const allowedNodes = this.parsedNodeTypes.map(getNodeMessage).join('');
const result = await nodesSelectionChain(this.llmSimpleTask).invoke(
{
allowedNodes,
prompt: state.prompt,
steps: state.steps.join('\n'),
},
config,
);
const nodes = [...new Set(result.map((r) => r.node))];
await dispatchCustomEvent('generated_nodes', {
role: 'assistant',
type: 'workflow-node',
nodes,
id: Date.now().toString(),
read: false,
});
return {
nodes,
};
};
const nodesComposerChainNode = async (
state: typeof WorkflowState.State,
config: RunnableConfig,
) => {
assert(this.llmComplexTask, 'LLM not setup');
const getLatestVersion = (nodeType: string) => {
const node = this.parsedNodeTypes.find((n) => n.name === nodeType);
if (!node) {
throw new OperationalError(`Node type not found: ${nodeType}`);
}
if (node.defaultVersion) {
return node.defaultVersion;
}
return typeof node.version === 'number'
? node.version
: node.version[node.version.length - 1];
};
const getNodeMessage = (nodeName: string) => {
const node = this.parsedNodeTypes.find((n) => n.name === nodeName);
if (!node) {
throw new OperationalError(`Node type not found: ${nodeName}`);
}
return `
<node_name>
${node.name}
</node_name>
<node_description>
${node.description}
</node_description>
<node_parameters>
${JSON.stringify(node.properties)}
</node_parameters>
`;
};
const result = await nodesComposerChain(this.llmComplexTask).invoke(
{
user_workflow_prompt: state.prompt,
nodes: state.nodes.map(getNodeMessage).join('\n\n'),
},
config,
);
const composedNodes = result.map((node, index) => {
const version = getLatestVersion(node.type);
return {
...node,
position: [index * 150, 0],
typeVersion: version,
};
});
await dispatchCustomEvent('composed_nodes', {
role: 'assistant',
type: 'workflow-composed',
nodes: composedNodes,
id: Date.now().toString(),
read: false,
});
return {
workflowJSON: {
nodes: composedNodes,
connections: {},
},
};
};
const connectionComposerChainNode = async (
state: typeof WorkflowState.State,
config: RunnableConfig,
) => {
assert(this.llmComplexTask, 'LLM not setup');
// Pass the selected nodes as input to create connections.
const getNodeMessage = (node: INode) => {
return `
<node>
${JSON.stringify(node)}
</node>
`;
};
const connections = await connectionComposerChain(this.llmComplexTask).invoke(
{
workflowJSON: state.workflowJSON.nodes.map(getNodeMessage).join('\n\n'),
},
config,
);
const workflowJSON = {
...state.workflowJSON,
connections,
};
await dispatchCustomEvent('composed_connections', {
role: 'assistant',
type: 'workflow-connections',
workflowJSON,
id: Date.now().toString(),
read: false,
});
return {
workflowJSON,
};
};
///////////////////// Finalization /////////////////////
// Finalize the workflow JSON by combining nodes and their connections.
async function generateWorkflowJSON(state: typeof WorkflowState.State) {
await dispatchCustomEvent('generated_workflow_json', {
role: 'assistant',
type: 'workflow-generated',
codeSnippet: JSON.stringify(state.workflowJSON, null, 4),
});
return { workflowJSON: JSON.stringify(state.workflowJSON, null, 2) };
}
///////////////////// Workflow Graph Definition /////////////////////
const workflowGraph = new StateGraph(WorkflowState)
// .addNode('supervisor', supervisorChainNode)
.addNode('planner', plannerChainNode)
.addNode('node_selector', nodeSelectionChainNode)
.addNode('nodes_composer', nodesComposerChainNode)
.addNode('connection_composer', connectionComposerChainNode)
.addNode('finalize', generateWorkflowJSON);
// Define the graph edges to set the processing order:
// Start with the planner.
workflowGraph.addEdge(START, 'planner');
// Planner node flows into node selector:
workflowGraph.addEdge('planner', 'node_selector');
// Node selector is followed by nodes composer:
workflowGraph.addEdge('node_selector', 'nodes_composer');
// Nodes composer is followed by connection composer:
workflowGraph.addEdge('nodes_composer', 'connection_composer');
// Connection composer flows to finalization:
workflowGraph.addEdge('connection_composer', 'finalize');
// Finalization flows to end:
workflowGraph.addEdge('finalize', END);
return workflowGraph;
}
async *chat(payload: { question: string }, user: IUser) {
if (!this.llmComplexTask || !this.llmSimpleTask) {
await this.setupModels(user);
}
const agent = this.getAgent().compile();
const initialState: typeof WorkflowState.State = {
messages: [],
prompt: payload.question,
steps: [],
nodes: [],
workflowJSON: { nodes: [], connections: {} },
next: 'PLAN',
};
const stream = agent.streamEvents(initialState, {
streamMode: 'custom',
recursionLimit: 10,
version: 'v2',
});
for await (const chunk of stream) {
let messageChunk: MessageResponse;
if (chunk.event === 'on_custom_event') {
if (this.isWorkflowEvent(chunk.name)) {
messageChunk = chunk.data as MessageResponse;
} else {
messageChunk = {
role: 'assistant',
type: 'intermediate-step',
text: chunk.data as string,
step: chunk.name,
};
}
yield { messages: [messageChunk] };
}
}
}
}

View File

@@ -0,0 +1,156 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { AIMessageChunk } from '@langchain/core/messages';
import { SystemMessage } from '@langchain/core/messages';
import { ChatPromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts';
import { DynamicStructuredTool } from '@langchain/core/tools';
import { OperationalError } from 'n8n-workflow';
import { z } from 'zod';
export const connectionComposerPrompt = new SystemMessage(
`You are an expert in creating n8n workflow connections. Your job is to create a valid n8n workflow by connecting nodes in a logical sequence.
## Your Task
Create connections between nodes that form a coherent, executable workflow based on the user's request.
## Input Format
You will receive a list of n8n nodes with their details in <node> tags:
\`\`\`
<node>
{
"name": "Node display name",
"type": "n8n-nodes-base.nodeType",
"parameters": { ... },
"position": [x, y]
}
</node>
\`\`\`
## n8n Connection Structure
In n8n workflows:
1. Data flows from one node to the next through connections
2. Connections are defined in the "connections" object
3. Each node's output can connect to one or more nodes' inputs
4. Each connection has a source node, target node, and IO indices
## Connection Format
\`\`\`json
{
"connections": {
"Source Node Display Name": {
"main": [
[
{
"node": "Target Node Display Name",
"type": "main",
"index": 0
}
]
]
}
}
}
\`\`\`
## Rules for Creating Connections
1. ALWAYS use the node "name" field (display name) for the connection references
2. Create a logical flow from trigger/input nodes to output/action nodes
3. Each node MUST connect to at least one other node (except terminal nodes)
4. Don't create loops or cycles in the workflow
5. Ensure the output data from one node is compatible with the input expected by the next node
6. For nodes with multiple outputs (like IF nodes), connect each output appropriately:
- For IF nodes, first output (index 0) is the TRUE branch, second output (index 1) is the FALSE branch
- For Switch nodes, each output (starting at index 0) corresponds to a different case
## Common Workflow Patterns
1. Trigger → Process → Action
2. Data Source → Filter/Transform → Destination
3. Scheduled Trigger → HTTP Request → Process Response → Send Notification
4. Conditional Branch: Previous Node → IF Node → [True Branch, False Branch]
## Output
Return ONLY a valid JSON object with the "connections" property following the structure above:
\`\`\`json
{
"connections": {
"NodeName1": {
"main": [[{ "node": "NodeName2", "type": "main", "index": 0 }]]
},
"NodeName2": {
"main": [
[{ "node": "TrueBranchNode", "type": "main", "index": 0 }],
[{ "node": "FalseBranchNode", "type": "main", "index": 0 }]
]
},
...
}
}
\`\`\``,
);
const connectionsSchema = z.object({
connections: z
.record(
z
.string()
.describe(
'The source node\'s display name exactly as specified in the node\'s "name" field',
),
z
.object({
main: z.array(
z.array(
z.object({
node: z
.string()
.describe(
'The target node\'s display name exactly as specified in the node\'s "name" field',
),
type: z
.literal('main')
.describe('The connection type, always use "main" for standard n8n connections'),
index: z
.number()
.describe(
'Output index from the source node, typically 0 for single-output nodes, 0=true/1=false for IF nodes',
),
}),
),
),
})
.describe('The connection configuration for a single source node'),
)
.describe('A mapping of all connections in the workflow, where each key is a source node name'),
});
const connectionComposerTool = new DynamicStructuredTool({
name: 'compose_connections',
description:
"Create valid connections between n8n nodes to form a coherent, executable workflow that implements the user's request.",
schema: connectionsSchema,
func: async (input) => {
return { connections: input.connections };
},
});
const humanTemplate = '{workflowJSON}';
const chatPrompt = ChatPromptTemplate.fromMessages([
connectionComposerPrompt,
HumanMessagePromptTemplate.fromTemplate(humanTemplate),
]);
export const connectionComposerChain = (llm: BaseChatModel) => {
if (!llm.bindTools) {
throw new OperationalError("LLM doesn't support binding tools");
}
return chatPrompt
.pipe(
llm.bindTools([connectionComposerTool], {
tool_choice: connectionComposerTool.name,
}),
)
.pipe((x: AIMessageChunk) => {
const toolCall = x.tool_calls?.[0];
return (toolCall?.args as z.infer<typeof connectionsSchema>).connections;
});
};

View File

@@ -0,0 +1,106 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { AIMessageChunk } from '@langchain/core/messages';
import { SystemMessage } from '@langchain/core/messages';
import { ChatPromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts';
import { DynamicStructuredTool } from '@langchain/core/tools';
import { OperationalError } from 'n8n-workflow';
import { z } from 'zod';
export const nodeSelectorPrompt = new SystemMessage(
`You are an expert in n8n workflows who selects the optimal n8n nodes to implement workflow steps.
## Your Task
For each workflow step, recommend the most appropriate n8n nodes from the allowed list.
## Input Information
- <user_request>: Original user workflow request
- <steps>: List of workflow steps to implement
- <allowed_n8n_nodes>: List of available n8n nodes with descriptions
## CRITICAL REQUIREMENTS
- ONLY recommend nodes that EXACTLY match names from the <allowed_n8n_nodes> list
- NEVER suggest nodes that are not explicitly defined in <allowed_n8n_nodes>
- ALWAYS use the COMPLETE node name as it appears in <node_name> tags (e.g., "Gmail" is NOT sufficient if the node name is "n8n-nodes-base.gmail")
- VERIFY each recommended node exists in the allowed list before including it
## Selection Criteria
1. Functionality - Node must be able to perform the required action
2. Integration - Prefer nodes that integrate directly with services mentioned in the user request
3. Efficiency - Prefer nodes that accomplish the task with minimal configuration
## Output Requirements
For the planned workflow steps, provider:
1. List of all possibly useful nodes in order of preference
2. Concise reasoning for why each node is suitable
3. Use EXACT, FULL node names from <node_name> tags
4. Pay attention to case sensitivity, e.g. "n8n-nodes-base.msql" is NOT "n8n-nodes-base.mySql"!
Remember: ONLY use nodes from the <allowed_n8n_nodes> list and ALWAYS use their FULL names exactly as provided.`,
);
const nodeSelectorSchema = z.object({
recommended_nodes: z
.array(
z.object({
score: z.number().describe('Matching score of the node for all the workflows steps'),
node: z
.string()
.describe(
'The full node type identifier (e.g., "n8n-nodes-base.if") from <allowed_n8n_nodes> list',
),
reasoning: z
.string()
.describe(
'Very short explanation of why this node might be used to implement the workflow step',
),
}),
)
.min(1)
.max(20)
.describe(
'Recommended n8n nodes for implementing any of the workflow steps, in order of descending preference. ONLY use nodes from the <allowed_n8n_nodes> list with EXACT full names from <node_name> tags.',
),
});
const nodeSelectorTool = new DynamicStructuredTool({
name: 'select_n8n_nodes',
description:
'Match each workflow step with the most appropriate n8n nodes from the allowed list, ensuring they can implement the required functionality.',
schema: nodeSelectorSchema,
func: async ({ recommended_nodes }) => {
return { recommended_nodes };
},
});
const humanTemplate = `
<user_request>
{prompt}
</user_request>
<steps>
{steps}
</steps>
<allowed_n8n_nodes>
{allowedNodes}
</allowed_n8n_nodes>
`;
const chatPrompt = ChatPromptTemplate.fromMessages([
nodeSelectorPrompt,
HumanMessagePromptTemplate.fromTemplate(humanTemplate),
]);
export const nodesSelectionChain = (llm: BaseChatModel) => {
if (!llm.bindTools) {
throw new OperationalError("LLM doesn't support binding tools");
}
return chatPrompt
.pipe(
llm.bindTools([nodeSelectorTool], {
tool_choice: nodeSelectorTool.name,
}),
)
.pipe((x: AIMessageChunk) => {
const toolCall = x.tool_calls?.[0];
return (toolCall?.args as z.infer<typeof nodeSelectorSchema>).recommended_nodes;
});
};

View File

@@ -0,0 +1,466 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { AIMessageChunk } from '@langchain/core/messages';
import { SystemMessage } from '@langchain/core/messages';
import { ChatPromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts';
import { DynamicStructuredTool } from '@langchain/core/tools';
import { OperationalError } from 'n8n-workflow';
import { z } from 'zod';
// Using SystemMessage directly instead of escapeSingleCurlyBrackets to avoid
// issues with double curly braces in n8n expressions
const systemPrompt = new SystemMessage(`You are an expert n8n workflow architect who creates complete node configurations for complex workflows.
## Your Task
Generate fully-formed n8n node configurations with properly structured parameters for each selected node.
## Reference Information
You will receive:
1. The original user workflow request
2. A list of selected n8n nodes with their descriptions and parameters
## Node Configuration Guidelines
1. CREATE PROPER STRUCTURE: Include all required fields (parameters, name, type)
2. USE DESCRIPTIVE NAMES: Each node name should clearly describe its function
3. POPULATE KEY PARAMETERS: Set values for essential parameters based on node type
4. MAINTAIN LOGICAL FLOW: Node parameters should enable proper data flow
5. FOLLOW NODE PATTERNS: Use the correct structure for each node type
6. ADD DOCUMENTATION: Include at least one sticky note, explaining the workflow. Include additional sticky notes for complex parts of the workflow.
## CRITICAL: Correctly Formatting n8n Expressions
When using expressions to reference data from other nodes:
- ALWAYS use the format: \`={{ $('Node Name').item.json.field }}\`
- NEVER omit the equals sign before the double curly braces
- ALWAYS use DOUBLE curly braces, never single
- NEVER use emojis or special characters inside expressions as they will break the expression
- INCORRECT: \`{ $('Node Name').item.json.field }\` (missing =, single braces)
- INCORRECT: \`{{ $('Node Name').item.json.field }}\` (missing =)
- INCORRECT: \`={{ $('👍 Node').item.json.field }}\` (contains emoji)
- CORRECT: \`={{ $('Previous Node').item.json.field }}\`
This format is essential for n8n to properly process the expression.
## IF Node Configuration (CRITICAL)
The IF node allows conditional branching based on comparing values. It has two outputs:
- Output 0: TRUE branch (when conditions are met)
- Output 1: FALSE branch (when conditions are NOT met)
### Key Points for IF Node:
1. MATCH OPERATOR TYPE TO DATA TYPE - Use the correct operator type that matches your data:
- For string values: use "type": "string" with operations like "equals", "contains", "exists"
- For number values: use "type": "number" with operations like "equals", "gt", "lt"
- For boolean values: use "type": "boolean" with operations like "equals", "true", "false"
- For arrays: use "type": "array" with operations like "empty", "contains"
- For objects: use "type": "object" with operations like "exists", "empty"
- For dates: use "type": "dateTime" with operations like "before", "after"
2. USE SINGLE VALUE OPERATORS CORRECTLY:
- Some operators like "exists", "notExists", "empty" don't need a right value
- For these operators, include "singleValue": true in the operator object
- Example: Checking if a string exists: "operator": { "type": "string", "operation": "exists", "singleValue": true }
3. USE CORRECT DATA TYPES FOR RIGHT VALUES:
- Number comparisons: use actual numbers (without quotes) like 5, not "5"
- Boolean comparisons: use true or false (without quotes), not "true" or "false"
- String comparisons: use quoted strings like "text"
- When using expressions for the right value, include the proper format: "={{ expression }}"
### IF Node Examples
#### Example 1: Check if a number is greater than 5
\`\`\`json
{
"parameters": {
"conditions": {
"options": {
"caseSensitive": false,
"leftValue": "",
"typeValidation": "loose"
},
"conditions": [
{
"leftValue": "={{ $('Previous Node').item.json.amount }}",
"rightValue": 5,
"operator": {
"type": "number",
"operation": "gt"
}
}
],
"combinator": "and"
},
"options": {
"ignoreCase": true,
"looseTypeValidation": true
}
}
}
\`\`\`
#### Example 2: Check if a string exists
\`\`\`json
{
"parameters": {
"conditions": {
"options": {
"caseSensitive": false,
"leftValue": "",
"typeValidation": "loose"
},
"conditions": [
{
"leftValue": "={{ $('Previous Node').item.json.email }}",
"rightValue": "",
"operator": {
"type": "string",
"operation": "exists",
"singleValue": true
}
}
],
"combinator": "and"
},
"options": {
"ignoreCase": true,
"looseTypeValidation": true
}
}
}
\`\`\`
#### Example 3: Check if a boolean is true
\`\`\`json
{
"parameters": {
"conditions": {
"options": {
"caseSensitive": false,
"leftValue": "",
"typeValidation": "loose"
},
"conditions": [
{
"leftValue": "={{ $('Previous Node').item.json.isActive }}",
"rightValue": "",
"operator": {
"type": "boolean",
"operation": "true",
"singleValue": true
}
}
],
"combinator": "and"
},
"options": {
"ignoreCase": true,
"looseTypeValidation": true
}
}
}
\`\`\`
#### Example 4: Compare string value
\`\`\`json
{
"parameters": {
"conditions": {
"options": {
"caseSensitive": false,
"leftValue": "",
"typeValidation": "loose"
},
"conditions": [
{
"leftValue": "={{ $('Previous Node').item.json.status }}",
"rightValue": "active",
"operator": {
"type": "string",
"operation": "equals"
}
}
],
"combinator": "and"
},
"options": {
"ignoreCase": true,
"looseTypeValidation": true
}
}
}
\`\`\`
#### Example 5: Compare boolean value
\`\`\`json
{
"parameters": {
"conditions": {
"options": {
"caseSensitive": false,
"leftValue": "",
"typeValidation": "loose"
},
"conditions": [
{
"leftValue": "={{ $('Previous Node').item.json.isVerified }}",
"rightValue": true,
"operator": {
"type": "boolean",
"operation": "equals"
}
}
],
"combinator": "and"
},
"options": {
"ignoreCase": true,
"looseTypeValidation": true
}
}
}
\`\`\`
### Common Operator Types and Operations
#### String Operators:
- "exists", "notExists", "empty", "notEmpty" (use with "singleValue": true)
- "equals", "notEquals", "contains", "notContains", "startsWith", "endsWith", "regex"
#### Number Operators:
- "exists", "notExists" (use with "singleValue": true)
- "equals", "notEquals", "gt" (greater than), "lt" (less than), "gte" (greater than or equal), "lte" (less than or equal)
#### Boolean Operators:
- "exists", "notExists" (use with "singleValue": true)
- "true", "false" (use with "singleValue": true)
- "equals", "notEquals"
#### Array Operators:
- "exists", "notExists", "empty", "notEmpty" (use with "singleValue": true)
- "contains", "notContains", "lengthEquals", "lengthNotEquals"
## Other Important Node Structures
### Set Node Structure
\`\`\`json
{
"parameters": {
"assignments": {
"assignments": [
{
"id": "unique-id-1",
"name": "property_name_1",
"value": "property_value_1",
"type": "string"
}
]
},
"options": {}
}
}
\`\`\`
### HTTP Request Node Structures
#### GET Request
\`\`\`json
{
"parameters": {
"url": "https://example.com",
"sendHeaders": true,
"headerParameters": {
"parameters": [
{
"name": "header-name",
"value": "header-value"
}
]
},
"options": {}
}
}
\`\`\`
#### POST Request
\`\`\`json
{
"parameters": {
"method": "POST",
"url": "https://example.com",
"sendHeaders": true,
"headerParameters": {
"parameters": [
{
"name": "header-name",
"value": "header-value"
}
]
},
"sendBody": true,
"bodyParameters": {
"parameters": [
{
"name": "field-name",
"value": "field-value"
}
]
},
"options": {}
}
}
\`\`\`
### Sticky Note Structure
\`\`\`json
{
"parameters": {
"content": "Note content here"
},
"name": "Descriptive Name",
"type": "n8n-nodes-base.stickyNote",
"notes": true
}
\`\`\`
## Expression Examples
1. Reference a field from another node:
\`\`\`
"value": "={{ $('Previous Node').item.json.fieldName }}"
\`\`\`
2. Use an expression with string concatenation:
\`\`\`
"value": "={{ 'Hello ' + $('User Input').item.json.name }}"
\`\`\`
3. Access an array item:
\`\`\`
"value": "={{ $('Data Node').item.json.items[0].id }}"
\`\`\`
4. IMPORTANT: How to properly format text fields with expressions
### PREFERRED METHOD: Embedding expressions directly within text
\`\`\`
"text": "=ALERT: It is currently raining in {{ $('Weather Node').item.json.city }}! Temperature: {{ $('Weather Node').item.json.main.temp }}°C"
\`\`\`
### Alternative method: Using string concatenation (use only when needed for complex operations)
\`\`\`
"text": "={{ 'ALERT: It is currently raining in ' + $('Weather Node').item.json.city + '! Temperature: ' + $('Weather Node').item.json.temp + '°C' }}"
\`\`\`
## CRITICAL: Formatting Text Fields with Expressions
### KEY RULES FOR THE PREFERRED METHOD (Embedding expressions in text):
- Start the string with just "=" (not "={{")
- Place each expression inside {{ }} without the = prefix
- MOST READABLE and RECOMMENDED approach
- Example: "text": "=Status: {{ $('Node').item.json.status }} at {{ $('Node').item.json.time }}"
### KEY RULES FOR THE ALTERNATIVE METHOD (String concatenation):
- Only use when you need complex operations not possible with embedded expressions
- Enclose the entire text in a single expression with "={{ }}"
- Put all static text in quotes and connect with + operators
- Example: "text": "={{ 'Status: ' + $('Node').item.json.status + ' at ' + $('Node').item.json.time }}"
### EXAMPLES OF PREFERRED USAGE:
1. Slack message (PREFERRED):
\`\`\`json
"text": "=ALERT: It is currently raining in {{ $('Weather Node').item.json.city }}! Temperature: {{ $('Weather Node').item.json.main.temp }}°C"
\`\`\`
2. Email subject (PREFERRED):
\`\`\`json
"subject": "=Order #{{ $('Order Node').item.json.orderId }} Status Update"
\`\`\`
3. Image prompt (PREFERRED):
\`\`\`json
"prompt": "=Create an image of {{ $('Location Node').item.json.city }} during {{ $('Weather Node').item.json.weather[0].description }}"
\`\`\`
4. Slack message with multiple data points (PREFERRED):
\`\`\`json
"text": "=Customer {{ $('Customer Data').item.json.name }} has placed order #{{ $('Order Data').item.json.id }} for {{ $('Order Data').item.json.amount }}€"
\`\`\`
5. HTTP request URL (PREFERRED):
\`\`\`json
"url": "=https://api.example.com/users/{{ $('User Data').item.json.id }}/orders?status={{ $('Filter').item.json.status }}"
\`\`\`
### COMMON MISTAKES TO AVOID:
- INCORRECT: "text": "ALERT: Temperature is {{ $('Weather Node').item.json.temp }}°C" (missing = prefix)
- INCORRECT: "text": "={{ $('Weather Node').item.json.temp }}" (using expression for dynamic part only)
- INCORRECT: "text": "={{ $('⚠️ Weather').item.json.temp }}" (emoji in node name)
- INCORRECT: "text": "={{ 'ALERT' }} {{ $('Weather').item.json.city }}" (mixing methods)
## Output Format
Return valid JSON that can be consumed by the n8n platform. Your response must match the tool's required schema.`);
const humanTemplate = `
<user_workflow_prompt>
{user_workflow_prompt}
</user_workflow_prompt>
<selected_n8n_nodes>
{nodes}
</selected_n8n_nodes>
`;
export const nodesComposerPrompt = ChatPromptTemplate.fromMessages([
systemPrompt,
HumanMessagePromptTemplate.fromTemplate(humanTemplate),
]);
const nodeConfigSchema = z.object({
nodes: z
.array(
z
.object({
parameters: z
.record(z.string(), z.any())
.describe(
"The node's configuration parameters. Must include all required parameters for the node type to function properly. For expressions referencing other nodes, use the format: \"={{ $('Node Name').item.json.field }}\"",
)
.refine((data) => Object.keys(data).length > 0, {
message: 'Parameters cannot be empty',
}),
type: z
.string()
.describe('The full node type identifier (e.g., "n8n-nodes-base.httpRequest")'),
name: z
.string()
.describe(
'A descriptive name for the node that clearly indicates its purpose in the workflow',
),
})
.describe('A complete n8n node configuration'),
)
.describe('Array of all nodes for the workflow with their complete configurations'),
});
const generateNodeConfigTool = new DynamicStructuredTool({
name: 'generate_n8n_nodes',
description:
'Generate fully configured n8n nodes with appropriate parameters based on the workflow requirements and selected node types.',
schema: nodeConfigSchema,
func: async (input) => {
return { nodes: input.nodes };
},
});
export const nodesComposerChain = (llm: BaseChatModel) => {
if (!llm.bindTools) {
throw new OperationalError("LLM doesn't support binding tools");
}
return nodesComposerPrompt
.pipe(
llm.bindTools([generateNodeConfigTool], {
tool_choice: generateNodeConfigTool.name,
}),
)
.pipe((x: AIMessageChunk) => {
const toolCall = x.tool_calls?.[0];
return (toolCall?.args as z.infer<typeof nodeConfigSchema>).nodes;
});
};

View File

@@ -0,0 +1,94 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { AIMessageChunk } from '@langchain/core/messages';
import { SystemMessage } from '@langchain/core/messages';
import { ChatPromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts';
import { DynamicStructuredTool } from '@langchain/core/tools';
import { OperationalError } from 'n8n-workflow';
import { z } from 'zod';
export const plannerPrompt = new SystemMessage(
`You are a Workflow Planner for n8n, a platform that helps users automate processes across different services and APIs.
## Your Task
Convert user requests into clear, sequential workflow steps that can be implemented with n8n nodes.
## Guidelines
1. Analyze the user request to understand their end goal and required process
2. Break down the automation into logical steps based on complexity - simpler workflows need fewer steps, complex ones may need more
3. Focus on actions (fetch data, transform, filter, send notification, etc.)
4. Create steps that can be mapped to n8n nodes later
5. Order steps sequentially from trigger to final action
6. Be specific about data transformations needed
7. Include error handling steps when appropriate
8. Only recommend raw HTTP requests if you think there isn't a suitable n8n node
## Output Format
Return ONLY a JSON object with this structure:
\`\`\`json
{
"steps": [
"[Brief action-oriented description]",
"[Brief action-oriented description]",
...
]
}
\`\`\`
## Examples of Good Step Descriptions
- "Trigger when a new email arrives in Gmail inbox"
- "Filter emails to only include those with attachments"
- "Extract data from CSV attachments"
- "Transform data to required format for the API"
- "Send HTTP request to external API with extracted data"
- "Post success message to Slack channel"
IMPORTANT: Do not include HTML tags, markdown formatting, or explanations outside the JSON.`,
);
const planSchema = z.object({
steps: z
.array(
z
.string()
.describe(
'A clear, action-oriented description of a single workflow step. Do not include "Step N" or similar, just the action',
),
)
.min(1)
.describe(
'An ordered list of workflow steps that, when implemented, will fulfill the user request. Each step should be concise, action-oriented, and implementable with n8n nodes.',
),
});
const generatePlanTool = new DynamicStructuredTool({
name: 'generate_plan',
description:
'Convert a user workflow request into a logical sequence of clear, achievable steps that can be implemented with n8n nodes.',
schema: planSchema,
func: async (input) => {
return { steps: input.steps };
},
});
const humanTemplate = '{prompt}';
const chatPrompt = ChatPromptTemplate.fromMessages([
plannerPrompt,
HumanMessagePromptTemplate.fromTemplate(humanTemplate),
]);
export const plannerChain = (llm: BaseChatModel) => {
if (!llm.bindTools) {
throw new OperationalError("LLM doesn't support binding tools");
}
return chatPrompt
.pipe(
llm.bindTools([generatePlanTool], {
tool_choice: generatePlanTool.name,
}),
)
.pipe((x: AIMessageChunk) => {
const toolCall = x.tool_calls?.[0];
return (toolCall?.args as z.infer<typeof planSchema>).steps;
});
};

View File

@@ -0,0 +1,4 @@
export * from './ai-workflow-builder.service';
export * from './types';
export * from './workflow-state';
export * from './interfaces';

View File

@@ -0,0 +1,4 @@
export interface ILicenseService {
loadCertStr(): Promise<string>;
getConsumerId(): string;
}

View File

@@ -0,0 +1,41 @@
import { ChatAnthropic } from '@langchain/anthropic';
import { ChatOpenAI } from '@langchain/openai';
type LLMConfig = {
apiKey: string;
baseUrl?: string;
headers?: Record<string, string>;
};
export const o4mini = (config: LLMConfig) =>
new ChatOpenAI({
modelName: 'o4-mini-2025-04-16',
apiKey: config.apiKey,
configuration: {
baseURL: config.baseUrl,
defaultHeaders: config.headers,
},
});
export const gpt41mini = (config: LLMConfig) =>
new ChatOpenAI({
modelName: 'gpt-4.1-mini-2025-04-14',
apiKey: config.apiKey,
temperature: 0,
configuration: {
baseURL: config.baseUrl,
defaultHeaders: config.headers,
},
});
export const anthropicClaude37Sonnet = (config: LLMConfig) =>
new ChatAnthropic({
modelName: 'claude-3-7-sonnet-20250219',
apiKey: config.apiKey,
temperature: 0,
maxTokens: 16000,
anthropicApiUrl: config.baseUrl,
clientOptions: {
defaultHeaders: config.headers,
},
});

View File

@@ -0,0 +1,105 @@
import type { IWorkflowBase } from 'n8n-workflow';
export type SimpleWorkflow = Pick<IWorkflowBase, 'nodes' | 'connections'>;
export interface CodeDiffMessage {
role: 'assistant';
type: 'code-diff';
description?: string;
codeDiff?: string;
suggestionId: string;
solution_count: number;
}
export interface QuickReplyOption {
text: string;
type: string;
isFeedback?: boolean;
}
export interface AssistantChatMessage {
role: 'assistant';
type: 'message';
text: string;
step?: string;
codeSnippet?: string;
}
export interface AssistantSummaryMessage {
role: 'assistant';
type: 'summary';
title: string;
content: string;
}
export interface EndSessionMessage {
role: 'assistant';
type: 'event';
eventName: 'end-session';
}
export interface AgentChatMessage {
role: 'assistant';
type: 'agent-suggestion';
title: string;
text: string;
}
export interface AgentThinkingStep {
role: 'assistant';
type: 'intermediate-step';
text: string;
step: string;
}
export interface WorkflowStepMessage {
role: 'assistant';
type: 'workflow-step';
steps: string[];
id: string;
read: boolean;
}
export interface WorkflowNodeMessage {
role: 'assistant';
type: 'workflow-node';
nodes: string[];
id: string;
read: boolean;
}
export interface WorkflowComposedMessage {
role: 'assistant';
type: 'workflow-composed';
nodes: Array<{
parameters: Record<string, unknown>;
type: string;
name: string;
position: [number, number];
}>;
id: string;
read: boolean;
}
export interface WorkflowConnectionsMessage {
role: 'assistant';
type: 'workflow-connections';
workflowJSON: SimpleWorkflow;
id: string;
read: boolean;
}
export type MessageResponse =
| ((
| AssistantChatMessage
| CodeDiffMessage
| AssistantSummaryMessage
| AgentChatMessage
| AgentThinkingStep
| WorkflowStepMessage
| WorkflowNodeMessage
| WorkflowComposedMessage
| WorkflowConnectionsMessage
) & {
quickReplies?: QuickReplyOption[];
})
| EndSessionMessage;

View File

@@ -0,0 +1,22 @@
import type { BaseMessage } from '@langchain/core/messages';
import { Annotation, END } from '@langchain/langgraph';
import type { SimpleWorkflow } from './types';
export const WorkflowState = Annotation.Root({
messages: Annotation<BaseMessage[]>({
reducer: (x, y) => x.concat(y),
}),
// The original prompt from the user.
prompt: Annotation<string>({ reducer: (x, y) => y ?? x ?? '' }),
// The list of logically derived workflow steps.
steps: Annotation<string[]>({ reducer: (x, y) => y ?? x ?? [] }),
// The list of candidate or selected n8n node names.
nodes: Annotation<string[]>({ reducer: (x, y) => y ?? x ?? [] }),
// The JSON representation of the workflow being built.
workflowJSON: Annotation<SimpleWorkflow>({
reducer: (x, y) => y ?? x ?? { nodes: [], connections: {} },
}),
// The next phase to be executed in the workflow graph.
next: Annotation<string>({ reducer: (x, y) => y ?? x ?? END, default: () => END }),
});

View File

@@ -0,0 +1,11 @@
{
"extends": ["./tsconfig.json", "@n8n/typescript-config/tsconfig.build.json"],
"compilerOptions": {
"composite": true,
"rootDir": "src",
"outDir": "dist",
"tsBuildInfoFile": "dist/build.tsbuildinfo"
},
"include": ["src/**/*.ts"],
"exclude": ["src/**/__tests__/**"]
}

View File

@@ -0,0 +1,17 @@
{
"extends": [
"@n8n/typescript-config/tsconfig.common.json",
"@n8n/typescript-config/tsconfig.backend.json"
],
"compilerOptions": {
"rootDir": ".",
"emitDecoratorMetadata": true,
"experimentalDecorators": true,
"baseUrl": "src",
"paths": {
"@/*": ["./*"]
},
"tsBuildInfoFile": "dist/typecheck.tsbuildinfo"
},
"include": ["src/**/*.ts"]
}

View File

@@ -0,0 +1,8 @@
import { z } from 'zod';
import { Z } from 'zod-class';
export class AiBuilderChatRequestDto extends Z.class({
payload: z.object({
question: z.string(),
}),
}) {}

View File

@@ -1,5 +1,6 @@
export { AiAskRequestDto } from './ai/ai-ask-request.dto';
export { AiChatRequestDto } from './ai/ai-chat-request.dto';
export { AiBuilderChatRequestDto } from './ai/ai-build-request.dto';
export { AiApplySuggestionRequestDto } from './ai/ai-apply-suggestion-request.dto';
export { AiFreeCreditsRequestDto } from './ai/ai-free-credits-request.dto';

View File

@@ -155,10 +155,10 @@
"@google-cloud/resource-manager": "5.3.0",
"@google/generative-ai": "0.21.0",
"@huggingface/inference": "2.8.0",
"@langchain/anthropic": "0.3.14",
"@langchain/anthropic": "catalog:",
"@langchain/aws": "0.1.3",
"@langchain/cohere": "0.3.2",
"@langchain/community": "0.3.24",
"@langchain/community": "catalog:",
"@langchain/core": "catalog:",
"@langchain/google-genai": "0.1.6",
"@langchain/google-vertexai": "0.1.8",
@@ -166,7 +166,7 @@
"@langchain/mistralai": "0.2.0",
"@langchain/mongodb": "^0.1.0",
"@langchain/ollama": "0.1.4",
"@langchain/openai": "0.3.17",
"@langchain/openai": "catalog:",
"@langchain/pinecone": "0.1.3",
"@langchain/qdrant": "0.1.1",
"@langchain/redis": "0.1.0",

View File

@@ -100,6 +100,7 @@
"@n8n/n8n-nodes-langchain": "workspace:*",
"@n8n/permissions": "workspace:*",
"@n8n/task-runner": "workspace:*",
"@n8n/ai-workflow-builder": "workspace:*",
"@n8n/typeorm": "0.3.20-12",
"@n8n_io/ai-assistant-sdk": "1.13.0",
"@n8n_io/license-sdk": "2.20.0",

View File

@@ -8,14 +8,15 @@ import { mock } from 'jest-mock-extended';
import { InternalServerError } from '@/errors/response-errors/internal-server.error';
import type { AuthenticatedRequest } from '@/requests';
import type { WorkflowBuilderService } from '@/services/ai-workflow-builder.service';
import type { AiService } from '@/services/ai.service';
import { AiController, type FlushableResponse } from '../ai.controller';
describe('AiController', () => {
const aiService = mock<AiService>();
const controller = new AiController(aiService, mock(), mock());
const workflowBuilderService = mock<WorkflowBuilderService>();
const controller = new AiController(aiService, workflowBuilderService, mock(), mock());
const request = mock<AuthenticatedRequest>({
user: { id: 'user123' },

View File

@@ -4,6 +4,7 @@ import {
AiApplySuggestionRequestDto,
AiAskRequestDto,
AiFreeCreditsRequestDto,
AiBuilderChatRequestDto,
} from '@n8n/api-types';
import { Body, Post, RestController } from '@n8n/decorators';
import type { AiAssistantSDK } from '@n8n_io/ai-assistant-sdk';
@@ -16,6 +17,7 @@ import { FREE_AI_CREDITS_CREDENTIAL_NAME } from '@/constants';
import { CredentialsService } from '@/credentials/credentials.service';
import { InternalServerError } from '@/errors/response-errors/internal-server.error';
import { AuthenticatedRequest } from '@/requests';
import { WorkflowBuilderService } from '@/services/ai-workflow-builder.service';
import { AiService } from '@/services/ai.service';
import { UserService } from '@/services/user.service';
@@ -25,10 +27,40 @@ export type FlushableResponse = Response & { flush: () => void };
export class AiController {
constructor(
private readonly aiService: AiService,
private readonly workflowBuilderService: WorkflowBuilderService,
private readonly credentialsService: CredentialsService,
private readonly userService: UserService,
) {}
@Post('/build', { rateLimit: { limit: 100 } })
async build(
req: AuthenticatedRequest,
res: FlushableResponse,
@Body payload: AiBuilderChatRequestDto,
) {
try {
const aiResponse = this.workflowBuilderService.chat(
{
question: payload.payload.question ?? '',
},
req.user,
);
res.header('Content-type', 'application/json-lines').flush();
// Handle the stream
for await (const chunk of aiResponse) {
res.flush();
res.write(JSON.stringify(chunk) + '⧉⇋⇋➽⌑⧉§§\n');
}
res.end();
} catch (e) {
assert(e instanceof Error);
throw new InternalServerError(e.message, e);
}
}
@Post('/chat', { rateLimit: { limit: 100 } })
async chat(req: AuthenticatedRequest, res: FlushableResponse, @Body payload: AiChatRequestDto) {
try {

View File

@@ -0,0 +1,40 @@
import { AiWorkflowBuilderService } from '@n8n/ai-workflow-builder';
import { GlobalConfig } from '@n8n/config';
import { Service } from '@n8n/di';
import type { IUser } from 'n8n-workflow';
import { N8N_VERSION } from '@/constants';
import { License } from '@/license';
import { NodeTypes } from '@/node-types';
/**
* This service wraps the actual AiWorkflowBuilderService to avoid circular dependencies.
* Instead of extending, we're delegating to the real service which is created on-demand.
*/
@Service()
export class WorkflowBuilderService {
private service: AiWorkflowBuilderService | undefined;
constructor(
private readonly nodeTypes: NodeTypes,
private readonly license: License,
private readonly config: GlobalConfig,
) {}
private getService(): AiWorkflowBuilderService {
if (!this.service) {
this.service = new AiWorkflowBuilderService(
this.license,
this.nodeTypes,
this.config,
N8N_VERSION,
);
}
return this.service;
}
async *chat(payload: { question: string }, user: IUser) {
const service = this.getService();
yield* service.chat(payload, user);
}
}

136
pnpm-lock.yaml generated
View File

@@ -6,9 +6,18 @@ settings:
catalogs:
default:
'@langchain/anthropic':
specifier: 0.3.11
version: 0.3.11
'@langchain/community':
specifier: 0.3.24
version: 0.3.24
'@langchain/core':
specifier: 0.3.30
version: 0.3.30
'@langchain/openai':
specifier: 0.3.17
version: 0.3.17
'@sentry/node':
specifier: 8.52.1
version: 8.52.1
@@ -315,6 +324,40 @@ importers:
specifier: workspace:*
version: link:../packages/workflow
packages/@n8n/ai-workflow-builder:
dependencies:
'@langchain/anthropic':
specifier: 'catalog:'
version: 0.3.11(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))(encoding@0.1.13)
'@langchain/core':
specifier: 'catalog:'
version: 0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1))
'@langchain/langgraph':
specifier: 0.2.45
version: 0.2.45(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))(react@18.2.0)
'@langchain/openai':
specifier: 'catalog:'
version: 0.3.17(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))(encoding@0.1.13)
'@n8n/config':
specifier: workspace:*
version: link:../config
'@n8n/di':
specifier: workspace:*
version: link:../di
'@n8n_io/ai-assistant-sdk':
specifier: 1.13.0
version: 1.13.0
n8n-workflow:
specifier: workspace:*
version: link:../../workflow
zod:
specifier: 'catalog:'
version: 3.24.1
devDependencies:
'@n8n/typescript-config':
specifier: workspace:*
version: link:../typescript-config
packages/@n8n/api-types:
dependencies:
'@n8n/permissions':
@@ -595,7 +638,7 @@ importers:
version: 3.666.0(@aws-sdk/client-sts@3.666.0)
'@getzep/zep-cloud':
specifier: 1.0.12
version: 1.0.12(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))(encoding@0.1.13)(langchain@0.3.11(fd386e1130022c8548c06dd951c5cbf0))
version: 1.0.12(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))(encoding@0.1.13)(langchain@0.3.11(6006e4ec6d742ca755e4775e38dc0b4c))
'@getzep/zep-js':
specifier: 0.9.0
version: 0.9.0
@@ -612,8 +655,8 @@ importers:
specifier: 2.8.0
version: 2.8.0
'@langchain/anthropic':
specifier: 0.3.14
version: 0.3.14(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))(encoding@0.1.13)
specifier: 'catalog:'
version: 0.3.11(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))(encoding@0.1.13)
'@langchain/aws':
specifier: 0.1.3
version: 0.1.3(@aws-sdk/client-sso-oidc@3.666.0(@aws-sdk/client-sts@3.666.0))(@aws-sdk/client-sts@3.666.0)(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))
@@ -621,8 +664,8 @@ importers:
specifier: 0.3.2
version: 0.3.2(@aws-sdk/client-sso-oidc@3.666.0(@aws-sdk/client-sts@3.666.0))(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))(encoding@0.1.13)
'@langchain/community':
specifier: 0.3.24
version: 0.3.24(c5fc7e11d6e6167a46cb8d3fd9b490a5)
specifier: 'catalog:'
version: 0.3.24(c9c4611e1b6dc12df1941bdd7ead1452)
'@langchain/core':
specifier: 'catalog:'
version: 0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1))
@@ -645,7 +688,7 @@ importers:
specifier: 0.1.4
version: 0.1.4(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))
'@langchain/openai':
specifier: 0.3.17
specifier: 'catalog:'
version: 0.3.17(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))(encoding@0.1.13)
'@langchain/pinecone':
specifier: 0.1.3
@@ -721,7 +764,7 @@ importers:
version: 23.0.1
langchain:
specifier: 0.3.11
version: 0.3.11(fd386e1130022c8548c06dd951c5cbf0)
version: 0.3.11(6006e4ec6d742ca755e4775e38dc0b4c)
lodash:
specifier: 'catalog:'
version: 4.17.21
@@ -951,6 +994,9 @@ importers:
'@google-cloud/secret-manager':
specifier: 5.6.0
version: 5.6.0(encoding@0.1.13)
'@n8n/ai-workflow-builder':
specifier: workspace:*
version: link:../@n8n/ai-workflow-builder
'@n8n/api-types':
specifier: workspace:*
version: link:../@n8n/api-types
@@ -2464,8 +2510,8 @@ packages:
'@anthropic-ai/sdk@0.27.3':
resolution: {integrity: sha512-IjLt0gd3L4jlOfilxVXTifn42FnVffMgDC04RJK1KDZpmkBWLv0XC92MVVmkxrFZNS/7l3xWgP/I3nqtX1sQHw==}
'@anthropic-ai/sdk@0.37.0':
resolution: {integrity: sha512-tHjX2YbkUBwEgg0JZU3EFSSAQPoK4qQR/NFYa8Vtzd5UAyXzZksCw2In69Rml4R/TyHPBfRYaLK35XiOe33pjw==}
'@anthropic-ai/sdk@0.32.1':
resolution: {integrity: sha512-U9JwTrDvdQ9iWuABVsMLj8nJVwAyQz6QXvgLsVhryhCEPkLsbcP/MXxm+jYcAwLoV8ESbaTTjnD4kuAFa+Hyjg==}
'@apidevtools/json-schema-ref-parser@11.9.3':
resolution: {integrity: sha512-60vepv88RwcJtSHrD6MjIL6Ta3SOYbgfnkHb+ppAVK+o9mXprRtulx7VlRl3lN3bbvysAfCS7WMVfhUYemB0IQ==}
@@ -4110,8 +4156,8 @@ packages:
'@kwsites/promise-deferred@1.1.1':
resolution: {integrity: sha512-GaHYm+c0O9MjZRu0ongGBRbinu8gVAMd2UZjji6jVmqKtZluZnptXGWhz1E8j8D2HJ3f/yMxKAUC0b+57wncIw==}
'@langchain/anthropic@0.3.14':
resolution: {integrity: sha512-zfix+qo/coIkgjTYpadp71IAWGXriIfImYLwMr1HnFsit4/RN9DU+aEOdm0nTwycbaneUpwWs5yfje8IKWHfsA==}
'@langchain/anthropic@0.3.11':
resolution: {integrity: sha512-rYjDZjMwVQ+cYeJd9IoSESdkkG8fc0m3siGRYKNy6qgYMnqCz8sUPKBanXwbZAs6wvspPCGgNK9WONfaCeX97A==}
engines: {node: '>=18'}
peerDependencies:
'@langchain/core': '>=0.2.21 <0.4.0'
@@ -4536,6 +4582,29 @@ packages:
peerDependencies:
'@langchain/core': '>=0.2.21 <0.4.0'
'@langchain/langgraph-checkpoint@0.0.17':
resolution: {integrity: sha512-6b3CuVVYx+7x0uWLG+7YXz9j2iBa+tn2AXvkLxzEvaAsLE6Sij++8PPbS2BZzC+S/FPJdWsz6I5bsrqL0BYrCA==}
engines: {node: '>=18'}
peerDependencies:
'@langchain/core': '>=0.2.31 <0.4.0'
'@langchain/langgraph-sdk@0.0.70':
resolution: {integrity: sha512-O8I12bfeMVz5fOrXnIcK4IdRf50IqyJTO458V56wAIHLNoi4H8/JHM+2M+Y4H2PtslXIGnvomWqlBd0eY5z/Og==}
peerDependencies:
'@langchain/core': '>=0.2.31 <0.4.0'
react: ^18 || ^19
peerDependenciesMeta:
'@langchain/core':
optional: true
react:
optional: true
'@langchain/langgraph@0.2.45':
resolution: {integrity: sha512-yemuA+aTIRLL3WBVQ5TGvFMeEJQm2zoVyjMvHWyekIvg4w7Q4cu3CYB8f+yOXwd6OaxMtnNIX0wGh4hIw/Db+A==}
engines: {node: '>=18'}
peerDependencies:
'@langchain/core': '>=0.2.36 <0.3.0 || >=0.3.9 < 0.4.0'
'@langchain/mistralai@0.2.0':
resolution: {integrity: sha512-VdfbKZopAuSXf/vlXbriGWLK3c7j5s47DoB3S31xpprY2BMSKZZiX9vE9TsgxMfAPuIDPIYcfgU7p1upvTYt8g==}
engines: {node: '>=18'}
@@ -14047,7 +14116,7 @@ snapshots:
- encoding
- supports-color
'@anthropic-ai/sdk@0.37.0(encoding@0.1.13)':
'@anthropic-ai/sdk@0.32.1(encoding@0.1.13)':
dependencies:
'@types/node': 18.16.16
'@types/node-fetch': 2.6.4
@@ -16237,7 +16306,7 @@ snapshots:
'@gar/promisify@1.1.3':
optional: true
'@getzep/zep-cloud@1.0.12(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))(encoding@0.1.13)(langchain@0.3.11(fd386e1130022c8548c06dd951c5cbf0))':
'@getzep/zep-cloud@1.0.12(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))(encoding@0.1.13)(langchain@0.3.11(6006e4ec6d742ca755e4775e38dc0b4c))':
dependencies:
form-data: 4.0.0
node-fetch: 2.7.0(encoding@0.1.13)
@@ -16246,7 +16315,7 @@ snapshots:
zod: 3.24.1
optionalDependencies:
'@langchain/core': 0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1))
langchain: 0.3.11(fd386e1130022c8548c06dd951c5cbf0)
langchain: 0.3.11(6006e4ec6d742ca755e4775e38dc0b4c)
transitivePeerDependencies:
- encoding
@@ -16724,9 +16793,9 @@ snapshots:
'@kwsites/promise-deferred@1.1.1': {}
'@langchain/anthropic@0.3.14(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))(encoding@0.1.13)':
'@langchain/anthropic@0.3.11(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))(encoding@0.1.13)':
dependencies:
'@anthropic-ai/sdk': 0.37.0(encoding@0.1.13)
'@anthropic-ai/sdk': 0.32.1(encoding@0.1.13)
'@langchain/core': 0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1))
fast-xml-parser: 4.4.1
zod: 3.24.1
@@ -16761,7 +16830,7 @@ snapshots:
- aws-crt
- encoding
'@langchain/community@0.3.24(c5fc7e11d6e6167a46cb8d3fd9b490a5)':
'@langchain/community@0.3.24(c9c4611e1b6dc12df1941bdd7ead1452)':
dependencies:
'@browserbasehq/stagehand': 1.9.0(@playwright/test@1.49.1)(deepmerge@4.3.1)(dotenv@16.4.5)(encoding@0.1.13)(openai@4.78.1(encoding@0.1.13)(zod@3.24.1))(zod@3.24.1)
'@ibm-cloud/watsonx-ai': 1.1.2
@@ -16772,7 +16841,7 @@ snapshots:
flat: 5.0.2
ibm-cloud-sdk-core: 5.1.0
js-yaml: 4.1.0
langchain: 0.3.11(fd386e1130022c8548c06dd951c5cbf0)
langchain: 0.3.11(6006e4ec6d742ca755e4775e38dc0b4c)
langsmith: 0.2.15(openai@4.78.1(encoding@0.1.13)(zod@3.24.1))
openai: 4.78.1(encoding@0.1.13)(zod@3.24.1)
uuid: 10.0.0
@@ -16787,7 +16856,7 @@ snapshots:
'@aws-sdk/credential-provider-node': 3.666.0(@aws-sdk/client-sso-oidc@3.666.0(@aws-sdk/client-sts@3.666.0))(@aws-sdk/client-sts@3.666.0)
'@azure/storage-blob': 12.18.0(encoding@0.1.13)
'@browserbasehq/sdk': 2.0.0(encoding@0.1.13)
'@getzep/zep-cloud': 1.0.12(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))(encoding@0.1.13)(langchain@0.3.11(fd386e1130022c8548c06dd951c5cbf0))
'@getzep/zep-cloud': 1.0.12(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))(encoding@0.1.13)(langchain@0.3.11(6006e4ec6d742ca755e4775e38dc0b4c))
'@getzep/zep-js': 0.9.0
'@google-ai/generativelanguage': 2.6.0(encoding@0.1.13)
'@google-cloud/storage': 7.12.1(encoding@0.1.13)
@@ -16903,6 +16972,31 @@ snapshots:
- encoding
- supports-color
'@langchain/langgraph-checkpoint@0.0.17(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))':
dependencies:
'@langchain/core': 0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1))
uuid: 10.0.0
'@langchain/langgraph-sdk@0.0.70(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))(react@18.2.0)':
dependencies:
'@types/json-schema': 7.0.15
p-queue: 6.6.2
p-retry: 4.6.2
uuid: 9.0.1
optionalDependencies:
'@langchain/core': 0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1))
react: 18.2.0
'@langchain/langgraph@0.2.45(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))(react@18.2.0)':
dependencies:
'@langchain/core': 0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1))
'@langchain/langgraph-checkpoint': 0.0.17(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))
'@langchain/langgraph-sdk': 0.0.70(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))(react@18.2.0)
uuid: 10.0.0
zod: 3.24.1
transitivePeerDependencies:
- react
'@langchain/mistralai@0.2.0(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))':
dependencies:
'@langchain/core': 0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1))
@@ -23977,7 +24071,7 @@ snapshots:
kuler@2.0.0: {}
langchain@0.3.11(fd386e1130022c8548c06dd951c5cbf0):
langchain@0.3.11(6006e4ec6d742ca755e4775e38dc0b4c):
dependencies:
'@langchain/core': 0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1))
'@langchain/openai': 0.3.17(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))(encoding@0.1.13)
@@ -23993,7 +24087,7 @@ snapshots:
zod: 3.24.1
zod-to-json-schema: 3.23.3(zod@3.24.1)
optionalDependencies:
'@langchain/anthropic': 0.3.14(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))(encoding@0.1.13)
'@langchain/anthropic': 0.3.11(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))(encoding@0.1.13)
'@langchain/aws': 0.1.3(@aws-sdk/client-sso-oidc@3.666.0(@aws-sdk/client-sts@3.666.0))(@aws-sdk/client-sts@3.666.0)(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))
'@langchain/cohere': 0.3.2(@aws-sdk/client-sso-oidc@3.666.0(@aws-sdk/client-sts@3.666.0))(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))(encoding@0.1.13)
'@langchain/google-genai': 0.1.6(@langchain/core@0.3.30(openai@4.78.1(encoding@0.1.13)(zod@3.24.1)))(zod@3.24.1)

View File

@@ -36,6 +36,9 @@ catalog:
zod: 3.24.1
'zod-to-json-schema': 3.23.3
'@langchain/core': 0.3.30
'@langchain/openai': 0.3.17
'@langchain/anthropic': 0.3.11
'@langchain/community': 0.3.24
catalogs:
frontend: