mirror of
https://github.com/Abdulazizzn/n8n-enterprise-unlocked.git
synced 2025-12-16 09:36:44 +00:00
feat: Add planning step to AI workflow builder (no-changelog) (#18737)
Co-authored-by: Eugene Molodkin <eugene@n8n.io>
This commit is contained in:
@@ -1,11 +1,13 @@
|
|||||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
|
|
||||||
import type { SimpleWorkflow } from '../../src/types/workflow.js';
|
import { PLAN_APPROVAL_MESSAGE } from '../../src/constants';
|
||||||
import type { WorkflowBuilderAgent, ChatPayload } from '../../src/workflow-builder-agent.js';
|
import type { SimpleWorkflow } from '../../src/types/workflow';
|
||||||
import { evaluateWorkflow } from '../chains/workflow-evaluator.js';
|
import type { WorkflowBuilderAgent } from '../../src/workflow-builder-agent';
|
||||||
import type { EvaluationInput, EvaluationResult, TestCase } from '../types/evaluation.js';
|
import { evaluateWorkflow } from '../chains/workflow-evaluator';
|
||||||
import { isWorkflowStateValues } from '../types/langsmith.js';
|
import type { EvaluationInput, EvaluationResult, TestCase } from '../types/evaluation';
|
||||||
import type { TestResult } from '../types/test-result.js';
|
import { isWorkflowStateValues } from '../types/langsmith';
|
||||||
|
import type { TestResult } from '../types/test-result';
|
||||||
|
import { consumeGenerator, getChatPayload } from '../utils/evaluation-helpers';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates an error result for a failed test
|
* Creates an error result for a failed test
|
||||||
@@ -48,19 +50,12 @@ export async function runSingleTest(
|
|||||||
userId: string = 'test-user',
|
userId: string = 'test-user',
|
||||||
): Promise<TestResult> {
|
): Promise<TestResult> {
|
||||||
try {
|
try {
|
||||||
const chatPayload: ChatPayload = {
|
|
||||||
message: testCase.prompt,
|
|
||||||
workflowContext: {
|
|
||||||
currentWorkflow: { id: testCase.id, nodes: [], connections: {} },
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
// Generate workflow
|
// Generate workflow
|
||||||
const startTime = Date.now();
|
const startTime = Date.now();
|
||||||
let messageCount = 0;
|
// First generate plan
|
||||||
for await (const _output of agent.chat(chatPayload, userId)) {
|
await consumeGenerator(agent.chat(getChatPayload(testCase.prompt, testCase.id), userId));
|
||||||
messageCount++;
|
// Confirm plan
|
||||||
}
|
await consumeGenerator(agent.chat(getChatPayload(PLAN_APPROVAL_MESSAGE, testCase.id), userId));
|
||||||
const generationTime = Date.now() - startTime;
|
const generationTime = Date.now() - startTime;
|
||||||
|
|
||||||
// Get generated workflow with validation
|
// Get generated workflow with validation
|
||||||
|
|||||||
@@ -138,7 +138,8 @@ export function createLangsmithEvaluator(
|
|||||||
|
|
||||||
for (const metric of usageMetrics) {
|
for (const metric of usageMetrics) {
|
||||||
if (metric.value !== undefined) {
|
if (metric.value !== undefined) {
|
||||||
results.push({ key: metric.key, score: metric.value });
|
// Langsmith has a limitation on large scores (>99999) so we track in thousands
|
||||||
|
results.push({ key: metric.key, score: metric.value / 1000 });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,20 +1,20 @@
|
|||||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models.js';
|
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
import type { LangChainTracer } from '@langchain/core/tracers/tracer_langchain.js';
|
import type { LangChainTracer } from '@langchain/core/tracers/tracer_langchain';
|
||||||
import { evaluate } from 'langsmith/evaluation';
|
import { evaluate } from 'langsmith/evaluation';
|
||||||
import type { INodeTypeDescription } from 'n8n-workflow';
|
import type { INodeTypeDescription } from 'n8n-workflow';
|
||||||
import pc from 'picocolors';
|
import pc from 'picocolors';
|
||||||
|
|
||||||
import { createLangsmithEvaluator } from './evaluator.js';
|
import { createLangsmithEvaluator } from './evaluator';
|
||||||
import type { ChatPayload } from '../../src/workflow-builder-agent.js';
|
import { PLAN_APPROVAL_MESSAGE } from '../../src/constants';
|
||||||
import type { WorkflowState } from '../../src/workflow-state.js';
|
import type { WorkflowState } from '../../src/workflow-state';
|
||||||
import { setupTestEnvironment, createAgent } from '../core/environment.js';
|
import { setupTestEnvironment, createAgent } from '../core/environment';
|
||||||
import {
|
import {
|
||||||
generateRunId,
|
generateRunId,
|
||||||
safeExtractUsage,
|
safeExtractUsage,
|
||||||
isWorkflowStateValues,
|
isWorkflowStateValues,
|
||||||
extractMessageContent,
|
extractMessageContent,
|
||||||
} from '../types/langsmith.js';
|
} from '../types/langsmith';
|
||||||
import { formatHeader } from '../utils/evaluation-helpers.js';
|
import { consumeGenerator, formatHeader, getChatPayload } from '../utils/evaluation-helpers';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a workflow generation function for Langsmith evaluation
|
* Creates a workflow generation function for Langsmith evaluation
|
||||||
@@ -44,18 +44,14 @@ function createWorkflowGenerator(
|
|||||||
// Create agent for this run
|
// Create agent for this run
|
||||||
const agent = createAgent(parsedNodeTypes, llm, tracer);
|
const agent = createAgent(parsedNodeTypes, llm, tracer);
|
||||||
|
|
||||||
const chatPayload: ChatPayload = {
|
// First generate plan
|
||||||
message: messageContent,
|
await consumeGenerator(
|
||||||
workflowContext: {
|
agent.chat(getChatPayload(messageContent, runId), 'langsmith-eval-user'),
|
||||||
currentWorkflow: { id: runId, nodes: [], connections: {} },
|
);
|
||||||
},
|
// Confirm plan
|
||||||
};
|
await consumeGenerator(
|
||||||
|
agent.chat(getChatPayload(PLAN_APPROVAL_MESSAGE, runId), 'langsmith-eval-user'),
|
||||||
// Generate workflow
|
);
|
||||||
let messageCount = 0;
|
|
||||||
for await (const _output of agent.chat(chatPayload, 'langsmith-eval-user')) {
|
|
||||||
messageCount++;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get generated workflow with validation
|
// Get generated workflow with validation
|
||||||
const state = await agent.getState(runId, 'langsmith-eval-user');
|
const state = await agent.getState(runId, 'langsmith-eval-user');
|
||||||
@@ -77,7 +73,7 @@ function createWorkflowGenerator(
|
|||||||
|
|
||||||
return {
|
return {
|
||||||
workflow: generatedWorkflow,
|
workflow: generatedWorkflow,
|
||||||
prompt: chatPayload.message,
|
prompt: messageContent,
|
||||||
usage,
|
usage,
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -7,10 +7,11 @@ import type { INodeTypeDescription } from 'n8n-workflow';
|
|||||||
import { join } from 'path';
|
import { join } from 'path';
|
||||||
import pc from 'picocolors';
|
import pc from 'picocolors';
|
||||||
|
|
||||||
import { anthropicClaudeSonnet4 } from '../../src/llm-config.js';
|
import { anthropicClaudeSonnet4 } from '../../src/llm-config';
|
||||||
import { WorkflowBuilderAgent } from '../../src/workflow-builder-agent.js';
|
import type { ChatPayload } from '../../src/workflow-builder-agent';
|
||||||
import type { Violation } from '../types/evaluation.js';
|
import { WorkflowBuilderAgent } from '../../src/workflow-builder-agent';
|
||||||
import type { TestResult } from '../types/test-result.js';
|
import type { Violation } from '../types/evaluation';
|
||||||
|
import type { TestResult } from '../types/test-result';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets up the LLM with proper configuration
|
* Sets up the LLM with proper configuration
|
||||||
@@ -268,3 +269,18 @@ export function saveEvaluationResults(
|
|||||||
|
|
||||||
return { reportPath, resultsPath };
|
return { reportPath, resultsPath };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export async function consumeGenerator<T>(gen: AsyncGenerator<T>) {
|
||||||
|
for await (const _ of gen) {
|
||||||
|
/* consume all */
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getChatPayload(message: string, id: string): ChatPayload {
|
||||||
|
return {
|
||||||
|
message,
|
||||||
|
workflowContext: {
|
||||||
|
currentWorkflow: { id, nodes: [], connections: {} },
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|||||||
@@ -0,0 +1,333 @@
|
|||||||
|
/* eslint-disable @typescript-eslint/no-unsafe-member-access */
|
||||||
|
|
||||||
|
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
|
import type { BaseMessage, ToolMessage } from '@langchain/core/messages';
|
||||||
|
import { HumanMessage, SystemMessage } from '@langchain/core/messages';
|
||||||
|
import { DynamicStructuredTool } from '@langchain/core/tools';
|
||||||
|
import { StateGraph, MessagesAnnotation, END, START } from '@langchain/langgraph';
|
||||||
|
import { ToolNode } from '@langchain/langgraph/prebuilt';
|
||||||
|
import { jsonParse, type INodeTypeDescription } from 'n8n-workflow';
|
||||||
|
import { z } from 'zod';
|
||||||
|
|
||||||
|
import { isAIMessage } from '@/types/langchain';
|
||||||
|
|
||||||
|
import { LLMServiceError, ToolExecutionError } from '../errors';
|
||||||
|
import { createNodeDetailsTool } from '../tools/node-details.tool';
|
||||||
|
import { createNodeSearchTool } from '../tools/node-search.tool';
|
||||||
|
|
||||||
|
const planNodeSchema = z.object({
|
||||||
|
nodeType: z
|
||||||
|
.string()
|
||||||
|
.describe('The exact n8n node type identifier (e.g., "n8n-nodes-base.httpRequest")'),
|
||||||
|
nodeName: z
|
||||||
|
.string()
|
||||||
|
.describe('A descriptive name for this node instance (e.g., "Get Weather Data")'),
|
||||||
|
reasoning: z
|
||||||
|
.string()
|
||||||
|
.describe('Brief explanation of why this node is needed and what it will do'),
|
||||||
|
});
|
||||||
|
|
||||||
|
const workflowPlanSchema = z.object({
|
||||||
|
intro: z.string().describe('A concise summary of the workflow plan'),
|
||||||
|
plan: z
|
||||||
|
.array(planNodeSchema)
|
||||||
|
.min(1)
|
||||||
|
.describe('Ordered list of nodes that will be used to build the workflow'),
|
||||||
|
});
|
||||||
|
|
||||||
|
const generateWorkflowPlanTool = new DynamicStructuredTool({
|
||||||
|
name: 'generate_workflow_plan',
|
||||||
|
description:
|
||||||
|
'Create a structured plan of n8n nodes based on user requirements and available node information',
|
||||||
|
schema: workflowPlanSchema,
|
||||||
|
func: async (input) => {
|
||||||
|
return input;
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
export type WorkflowPlanNode = z.infer<typeof planNodeSchema>;
|
||||||
|
export type WorkflowPlan = z.infer<typeof workflowPlanSchema>;
|
||||||
|
|
||||||
|
const SYSTEM_PROMPT = `You are a Workflow Planning Assistant for n8n. Your task is to analyze user requests and create a detailed plan of n8n nodes that will be used to build the workflow.
|
||||||
|
|
||||||
|
## Your Process
|
||||||
|
1. Analyze the user's request to understand what they want to automate
|
||||||
|
2. Use the search_nodes tool to find relevant n8n nodes for each part of the workflow
|
||||||
|
3. Use the get_node_details tool to understand specific nodes' capabilities
|
||||||
|
4. Use the generate_workflow_plan tool to create the final structured plan
|
||||||
|
|
||||||
|
## Guidelines
|
||||||
|
- Be thorough in your search - search for different keywords and concepts
|
||||||
|
- Use exact node type identifiers (e.g., "n8n-nodes-base.httpRequest")
|
||||||
|
- Order nodes logically from trigger/start to final output
|
||||||
|
- Place sub-nodes (e.g., AI tools) immediately after their root node (e.g. AI Agent)
|
||||||
|
- Only include nodes that directly fulfill the user's requirements
|
||||||
|
- Consider data transformation needs between nodes
|
||||||
|
- For AI workflows, search for AI-related nodes and sub-nodes
|
||||||
|
- ALWAYS start with a trigger node and workflow configuration node (see Workflow Structure Requirements)
|
||||||
|
|
||||||
|
## Workflow Structure Requirements
|
||||||
|
CRITICAL: Every workflow MUST follow this structure:
|
||||||
|
|
||||||
|
1. **First Node - Trigger (MANDATORY)**
|
||||||
|
- Every workflow MUST start with a trigger node
|
||||||
|
- Choose the appropriate trigger based on user intent (see Trigger Selection Logic)
|
||||||
|
- If no trigger is specified, intelligently select the most appropriate one
|
||||||
|
|
||||||
|
2. **Second Node - Workflow Configuration (MANDATORY)**
|
||||||
|
- ALWAYS add an Edit Fields (Set) node immediately after the trigger
|
||||||
|
- Name it "Workflow Configuration"
|
||||||
|
- This node serves as the main configuration point for the workflow
|
||||||
|
- Downstream nodes will reference values from this node via expressions
|
||||||
|
- This centralizes key workflow parameters and makes configuration easier
|
||||||
|
|
||||||
|
## Trigger Selection Logic
|
||||||
|
Choose the trigger based on the workflow's purpose:
|
||||||
|
|
||||||
|
### Manual Trigger (n8n-nodes-base.manualTrigger)
|
||||||
|
Use when:
|
||||||
|
- User wants to test or debug the workflow
|
||||||
|
- It's a one-time or ad-hoc process
|
||||||
|
- User hasn't specified any trigger requirements
|
||||||
|
- The workflow is for data processing or transformation tasks
|
||||||
|
|
||||||
|
### Chat Trigger (n8n-nodes-langchain.chatTrigger)
|
||||||
|
Use when:
|
||||||
|
- Building conversational AI or chatbot workflows
|
||||||
|
- User mentions chat, conversation, or interactive communication
|
||||||
|
- The workflow needs to respond to user messages
|
||||||
|
- Building AI agents that interact with users
|
||||||
|
|
||||||
|
### Webhook Trigger (n8n-nodes-base.webhook)
|
||||||
|
Use when:
|
||||||
|
- Integrating with external systems or APIs
|
||||||
|
- User mentions webhooks, API calls, or external events
|
||||||
|
- The workflow needs to be triggered by external applications
|
||||||
|
- Building automated responses to system events
|
||||||
|
|
||||||
|
## Search Strategy
|
||||||
|
- Search for nodes by functionality (e.g., "email", "database", "api")
|
||||||
|
- Search for specific service names mentioned by the user
|
||||||
|
- For AI workflows, search for sub-nodes using connection types:
|
||||||
|
- NodeConnectionTypes.AiLanguageModel for LLM providers
|
||||||
|
- NodeConnectionTypes.AiTool for AI tools
|
||||||
|
- NodeConnectionTypes.AiMemory for memory nodes
|
||||||
|
- NodeConnectionTypes.AiEmbedding for embeddings
|
||||||
|
- NodeConnectionTypes.AiVectorStore for vector stores
|
||||||
|
|
||||||
|
## Connection Parameter Rules
|
||||||
|
When planning nodes, consider their connection requirements:
|
||||||
|
|
||||||
|
### Static vs Dynamic Nodes
|
||||||
|
- **Static nodes** (standard inputs/outputs): HTTP Request, Set, Code
|
||||||
|
- **Dynamic nodes** (parameter-dependent connections): AI nodes, Vector Stores, Document Loaders
|
||||||
|
|
||||||
|
### Dynamic Node Parameters That Affect Connections
|
||||||
|
- AI Agent: hasOutputParser creates additional input for schema
|
||||||
|
- Vector Store: mode parameter affects available connections (insert vs retrieve-as-tool)
|
||||||
|
- Document Loader: textSplittingMode and dataType affect input structure
|
||||||
|
|
||||||
|
## AI Node Connection Patterns
|
||||||
|
CRITICAL: AI sub-nodes PROVIDE capabilities, making them the SOURCE in connections:
|
||||||
|
|
||||||
|
### Main AI Connections
|
||||||
|
- OpenAI Chat Model → AI Agent [ai_languageModel]
|
||||||
|
- Calculator Tool → AI Agent [ai_tool]
|
||||||
|
- Window Buffer Memory → AI Agent [ai_memory]
|
||||||
|
- Token Splitter → Default Data Loader [ai_textSplitter]
|
||||||
|
- Default Data Loader → Vector Store [ai_document]
|
||||||
|
- Embeddings OpenAI → Vector Store [ai_embedding]
|
||||||
|
|
||||||
|
Why: Sub-nodes enhance main nodes with their capabilities
|
||||||
|
|
||||||
|
## RAG Workflow Pattern
|
||||||
|
CRITICAL: For RAG (Retrieval-Augmented Generation) workflows, follow this specific pattern:
|
||||||
|
|
||||||
|
Main data flow:
|
||||||
|
- Data source (e.g., HTTP Request) → Vector Store [main connection]
|
||||||
|
- The Vector Store receives the actual data through its main input
|
||||||
|
|
||||||
|
AI capability connections:
|
||||||
|
- Document Loader → Vector Store [ai_document] - provides document processing
|
||||||
|
- Embeddings → Vector Store [ai_embedding] - provides embedding generation
|
||||||
|
- Text Splitter → Document Loader [ai_textSplitter] - provides text chunking
|
||||||
|
|
||||||
|
Common mistake to avoid:
|
||||||
|
- NEVER connect Document Loader to main data outputs
|
||||||
|
- Document Loader is NOT a data processor in the main flow
|
||||||
|
- Document Loader is an AI sub-node that gives Vector Store the ability to process documents
|
||||||
|
|
||||||
|
## Agent Node Distinction
|
||||||
|
CRITICAL: Distinguish between two different agent node types:
|
||||||
|
|
||||||
|
1. **AI Agent** (n8n-nodes-langchain.agent)
|
||||||
|
- Main workflow node that orchestrates AI tasks
|
||||||
|
- Accepts inputs: trigger data, memory, tools, language models
|
||||||
|
- Use for: Primary AI logic, chatbots, autonomous workflows
|
||||||
|
|
||||||
|
2. **AI Agent Tool** (n8n-nodes-langchain.agentTool)
|
||||||
|
- Sub-node that acts as a tool for another AI Agent
|
||||||
|
- Provides agent-as-a-tool capability to parent agents
|
||||||
|
- Use for: Multi-agent systems where one agent calls another
|
||||||
|
|
||||||
|
Default assumption: When users ask for "an agent" or "AI agent", they mean the main AI Agent node unless they explicitly mention "tool", "sub-agent", or "agent for another agent".
|
||||||
|
|
||||||
|
## Output Format
|
||||||
|
After searching and analyzing available nodes, use the generate_workflow_plan tool to create a structured plan with:
|
||||||
|
- The exact node type to use
|
||||||
|
- A descriptive name for the node instance
|
||||||
|
- Clear reasoning for why this node is needed AND how it connects to other nodes
|
||||||
|
- Consider connection requirements in your reasoning
|
||||||
|
|
||||||
|
Your plan MUST always include:
|
||||||
|
1. An appropriate trigger node as the first node
|
||||||
|
2. An Edit Fields (Set) node named "Workflow Configuration" as the second node
|
||||||
|
3. All other nodes needed to fulfill the user's requirements
|
||||||
|
|
||||||
|
After using the generate_workflow_plan tool, only respond with a single word "DONE" to indicate the plan is complete.
|
||||||
|
Remember: Be precise about node types, understand connection patterns, and always include trigger and configuration nodes.`;
|
||||||
|
|
||||||
|
function formatPlanFeedback(previousPlan: WorkflowPlan, feedback: string) {
|
||||||
|
return `Previous plan: ${JSON.stringify(previousPlan, null, 2)}\n\nUser feedback: ${feedback}\n\nPlease adjust the plan based on the feedback.`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a workflow planner agent that can search for and analyze nodes
|
||||||
|
*/
|
||||||
|
export function createWorkflowPlannerAgent(llm: BaseChatModel, nodeTypes: INodeTypeDescription[]) {
|
||||||
|
if (!llm.bindTools) {
|
||||||
|
throw new LLMServiceError("LLM doesn't support binding tools", { llmModel: llm._llmType() });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the tools for the planner
|
||||||
|
const tools = [
|
||||||
|
createNodeSearchTool(nodeTypes).tool,
|
||||||
|
createNodeDetailsTool(nodeTypes).tool,
|
||||||
|
generateWorkflowPlanTool,
|
||||||
|
];
|
||||||
|
|
||||||
|
// Create a ToolNode with our tools
|
||||||
|
const toolNode = new ToolNode(tools);
|
||||||
|
|
||||||
|
// Bind tools to the LLM
|
||||||
|
const modelWithTools = llm.bindTools(tools);
|
||||||
|
|
||||||
|
// Define the function that determines whether to continue
|
||||||
|
const shouldContinue = (state: typeof MessagesAnnotation.State) => {
|
||||||
|
const { messages } = state;
|
||||||
|
const lastMessage = messages[messages.length - 1];
|
||||||
|
|
||||||
|
// Check if the last message has tool calls
|
||||||
|
if (
|
||||||
|
'tool_calls' in lastMessage &&
|
||||||
|
Array.isArray(lastMessage.tool_calls) &&
|
||||||
|
lastMessage.tool_calls?.length
|
||||||
|
) {
|
||||||
|
// Check if one of the tool calls is the final plan generation
|
||||||
|
const hasPlanTool = lastMessage.tool_calls.some((tc) => tc.name === 'generate_workflow_plan');
|
||||||
|
|
||||||
|
if (hasPlanTool) {
|
||||||
|
// If we're generating the plan, still need to execute the tool
|
||||||
|
return 'tools';
|
||||||
|
}
|
||||||
|
|
||||||
|
return 'tools';
|
||||||
|
}
|
||||||
|
return END;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Define the function that calls the model
|
||||||
|
const callModel = async (state: typeof MessagesAnnotation.State) => {
|
||||||
|
const { messages } = state;
|
||||||
|
const response = await modelWithTools.invoke(messages);
|
||||||
|
return { messages: [response] };
|
||||||
|
};
|
||||||
|
|
||||||
|
// Build the graph
|
||||||
|
const workflow = new StateGraph(MessagesAnnotation)
|
||||||
|
.addNode('agent', callModel)
|
||||||
|
.addNode('tools', toolNode)
|
||||||
|
.addEdge(START, 'agent')
|
||||||
|
.addConditionalEdges('agent', shouldContinue, ['tools', END])
|
||||||
|
.addEdge('tools', 'agent');
|
||||||
|
|
||||||
|
const app = workflow.compile();
|
||||||
|
|
||||||
|
return {
|
||||||
|
async plan(
|
||||||
|
userRequest: string,
|
||||||
|
previousPlan?: WorkflowPlan,
|
||||||
|
feedback?: string,
|
||||||
|
): Promise<
|
||||||
|
| {
|
||||||
|
plan: WorkflowPlan;
|
||||||
|
toolMessages: BaseMessage[];
|
||||||
|
}
|
||||||
|
| { text: string }
|
||||||
|
> {
|
||||||
|
// Prepare the initial messages
|
||||||
|
const systemMessage = new SystemMessage(SYSTEM_PROMPT);
|
||||||
|
|
||||||
|
let userMessage = userRequest;
|
||||||
|
if (previousPlan && feedback) {
|
||||||
|
userMessage += '\n\n';
|
||||||
|
userMessage += formatPlanFeedback(previousPlan, feedback);
|
||||||
|
}
|
||||||
|
|
||||||
|
const humanMessage = new HumanMessage(userMessage);
|
||||||
|
|
||||||
|
// Invoke the graph
|
||||||
|
const result = await app.invoke({
|
||||||
|
messages: [systemMessage, humanMessage],
|
||||||
|
});
|
||||||
|
|
||||||
|
// Extract tools messages
|
||||||
|
const toolMessages = result.messages.filter((msg) => {
|
||||||
|
if (['system', 'human'].includes(msg.getType())) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do not include final AI message
|
||||||
|
if (isAIMessage(msg) && (msg.tool_calls ?? []).length === 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
});
|
||||||
|
|
||||||
|
const workflowPlanToolCall = result.messages.findLast((msg): msg is ToolMessage => {
|
||||||
|
return msg.name === 'generate_workflow_plan';
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!workflowPlanToolCall) {
|
||||||
|
const lastAiMessage = result.messages.findLast((msg) => {
|
||||||
|
return isAIMessage(msg) && (msg.tool_calls ?? []).length === 0;
|
||||||
|
});
|
||||||
|
|
||||||
|
if (lastAiMessage) {
|
||||||
|
return {
|
||||||
|
text: lastAiMessage.text,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
throw new ToolExecutionError('Invalid response from agent - no plan generated');
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
if (typeof workflowPlanToolCall.content !== 'string') {
|
||||||
|
throw new ToolExecutionError('Workflow plan tool call content is not a string');
|
||||||
|
}
|
||||||
|
|
||||||
|
const workflowPlan = jsonParse<WorkflowPlan>(workflowPlanToolCall.content);
|
||||||
|
return {
|
||||||
|
plan: workflowPlan,
|
||||||
|
toolMessages,
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
throw new ToolExecutionError(
|
||||||
|
`Failed to parse workflow plan: ${error instanceof Error ? error.message : 'Unknown error'}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -1,102 +0,0 @@
|
|||||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
|
||||||
import type { AIMessageChunk } from '@langchain/core/messages';
|
|
||||||
import { SystemMessage } from '@langchain/core/messages';
|
|
||||||
import { ChatPromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts';
|
|
||||||
import { DynamicStructuredTool } from '@langchain/core/tools';
|
|
||||||
import { z } from 'zod';
|
|
||||||
|
|
||||||
import { LLMServiceError } from '../errors';
|
|
||||||
|
|
||||||
export const plannerPrompt = new SystemMessage(
|
|
||||||
`You are a Workflow Planner for n8n, a platform that helps users automate processes across different services and APIs.
|
|
||||||
|
|
||||||
## Your Task
|
|
||||||
Convert user requests into clear, sequential workflow steps that can be implemented with n8n nodes. ONLY include steps that are explicitly stated or directly implied in the user request.
|
|
||||||
|
|
||||||
## Guidelines
|
|
||||||
1. Analyze the user request to understand their end goal and required process
|
|
||||||
2. Break down the automation into logical steps based on complexity - simpler workflows need fewer steps, complex ones may need more
|
|
||||||
3. Focus ONLY on actions mentioned directly in the user prompt
|
|
||||||
4. Create steps that can be mapped to n8n nodes later
|
|
||||||
5. Order steps sequentially from trigger to final action
|
|
||||||
6. Be specific about data transformations needed ONLY if mentioned in the request
|
|
||||||
7. NEVER add extra steps like storing data or sending notifications unless explicitly requested
|
|
||||||
8. Only recommend raw HTTP requests if you think there isn't a suitable n8n node
|
|
||||||
|
|
||||||
## CRITICAL REQUIREMENTS
|
|
||||||
- DO NOT add any steps not directly mentioned or implied in the user request
|
|
||||||
- DO NOT assume the user wants to store data in a database unless explicitly stated
|
|
||||||
- DO NOT assume the user wants to send notifications or emails unless explicitly stated
|
|
||||||
- DO NOT add any "nice to have" steps that aren't clearly part of the user's request
|
|
||||||
- Keep the workflow EXACTLY focused on what was requested, nothing more
|
|
||||||
|
|
||||||
## Output Format
|
|
||||||
Return ONLY a JSON object with this structure:
|
|
||||||
\`\`\`json
|
|
||||||
{
|
|
||||||
"steps": [
|
|
||||||
"[Brief action-oriented description]",
|
|
||||||
"[Brief action-oriented description]",
|
|
||||||
...
|
|
||||||
]
|
|
||||||
}
|
|
||||||
\`\`\`
|
|
||||||
|
|
||||||
## Examples of Good Step Descriptions
|
|
||||||
- "Trigger when a new email arrives in Gmail inbox"
|
|
||||||
- "Filter emails to only include those with attachments"
|
|
||||||
- "Extract data from CSV attachments"
|
|
||||||
- "Transform data to required format for the API"
|
|
||||||
- "Send HTTP request to external API with extracted data"
|
|
||||||
- "Post success message to Slack channel"
|
|
||||||
|
|
||||||
IMPORTANT: Do not include HTML tags, markdown formatting, or explanations outside the JSON.`,
|
|
||||||
);
|
|
||||||
|
|
||||||
const planSchema = z.object({
|
|
||||||
steps: z
|
|
||||||
.array(
|
|
||||||
z
|
|
||||||
.string()
|
|
||||||
.describe(
|
|
||||||
'A clear, action-oriented description of a single workflow step. Do not include "Step N" or similar, just the action',
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.min(1)
|
|
||||||
.describe(
|
|
||||||
'An ordered list of workflow steps that, when implemented, will fulfill the user request. Each step should be concise, action-oriented, and implementable with n8n nodes.',
|
|
||||||
),
|
|
||||||
});
|
|
||||||
|
|
||||||
const generatePlanTool = new DynamicStructuredTool({
|
|
||||||
name: 'generate_plan',
|
|
||||||
description:
|
|
||||||
'Convert a user workflow request into a logical sequence of clear, achievable steps that can be implemented with n8n nodes.',
|
|
||||||
schema: planSchema,
|
|
||||||
func: async (input) => {
|
|
||||||
return { steps: input.steps };
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
const humanTemplate = '{prompt}';
|
|
||||||
const chatPrompt = ChatPromptTemplate.fromMessages([
|
|
||||||
plannerPrompt,
|
|
||||||
HumanMessagePromptTemplate.fromTemplate(humanTemplate),
|
|
||||||
]);
|
|
||||||
|
|
||||||
export const plannerChain = (llm: BaseChatModel) => {
|
|
||||||
if (!llm.bindTools) {
|
|
||||||
throw new LLMServiceError("LLM doesn't support binding tools", { llmModel: llm._llmType() });
|
|
||||||
}
|
|
||||||
|
|
||||||
return chatPrompt
|
|
||||||
.pipe(
|
|
||||||
llm.bindTools([generatePlanTool], {
|
|
||||||
tool_choice: generatePlanTool.name,
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
.pipe((x: AIMessageChunk) => {
|
|
||||||
const toolCall = x.tool_calls?.[0];
|
|
||||||
return (toolCall?.args as z.infer<typeof planSchema>).steps;
|
|
||||||
});
|
|
||||||
};
|
|
||||||
@@ -34,3 +34,5 @@ export const MAX_WORKFLOW_LENGTH_TOKENS = 30_000; // Tokens
|
|||||||
* Used for rough token count estimation from character counts.
|
* Used for rough token count estimation from character counts.
|
||||||
*/
|
*/
|
||||||
export const AVG_CHARS_PER_TOKEN_ANTHROPIC = 2.5;
|
export const AVG_CHARS_PER_TOKEN_ANTHROPIC = 2.5;
|
||||||
|
|
||||||
|
export const PLAN_APPROVAL_MESSAGE = 'Proceed with the plan';
|
||||||
|
|||||||
@@ -1,14 +1,17 @@
|
|||||||
|
/* eslint-disable @typescript-eslint/require-await */
|
||||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
import type { ToolMessage } from '@langchain/core/messages';
|
import type { ToolMessage } from '@langchain/core/messages';
|
||||||
import { AIMessage, HumanMessage } from '@langchain/core/messages';
|
import { AIMessage, HumanMessage } from '@langchain/core/messages';
|
||||||
import type { MemorySaver } from '@langchain/langgraph';
|
import type { MemorySaver } from '@langchain/langgraph';
|
||||||
import { GraphRecursionError } from '@langchain/langgraph';
|
import { GraphRecursionError, Command } from '@langchain/langgraph';
|
||||||
import type { Logger } from '@n8n/backend-common';
|
import type { Logger } from '@n8n/backend-common';
|
||||||
import { mock } from 'jest-mock-extended';
|
import { mock } from 'jest-mock-extended';
|
||||||
import type { INodeTypeDescription } from 'n8n-workflow';
|
import type { INodeTypeDescription } from 'n8n-workflow';
|
||||||
import { ApplicationError } from 'n8n-workflow';
|
import { ApplicationError } from 'n8n-workflow';
|
||||||
|
|
||||||
import { MAX_AI_BUILDER_PROMPT_LENGTH } from '@/constants';
|
import type { WorkflowPlanNode } from '@/agents/workflow-planner-agent';
|
||||||
|
import { createWorkflowPlannerAgent } from '@/agents/workflow-planner-agent';
|
||||||
|
import { MAX_AI_BUILDER_PROMPT_LENGTH, PLAN_APPROVAL_MESSAGE } from '@/constants';
|
||||||
import { ValidationError } from '@/errors';
|
import { ValidationError } from '@/errors';
|
||||||
import type { StreamOutput } from '@/types/streaming';
|
import type { StreamOutput } from '@/types/streaming';
|
||||||
import { createStreamProcessor, formatMessages } from '@/utils/stream-processor';
|
import { createStreamProcessor, formatMessages } from '@/utils/stream-processor';
|
||||||
@@ -57,6 +60,12 @@ jest.mock('@/utils/tool-executor', () => ({
|
|||||||
jest.mock('@/chains/conversation-compact', () => ({
|
jest.mock('@/chains/conversation-compact', () => ({
|
||||||
conversationCompactChain: jest.fn(),
|
conversationCompactChain: jest.fn(),
|
||||||
}));
|
}));
|
||||||
|
jest.mock('@/chains/workflow-name', () => ({
|
||||||
|
workflowNameChain: jest.fn(),
|
||||||
|
}));
|
||||||
|
jest.mock('@/agents/workflow-planner-agent', () => ({
|
||||||
|
createWorkflowPlannerAgent: jest.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
const mockRandomUUID = jest.fn();
|
const mockRandomUUID = jest.fn();
|
||||||
Object.defineProperty(global, 'crypto', {
|
Object.defineProperty(global, 'crypto', {
|
||||||
@@ -361,4 +370,420 @@ describe('WorkflowBuilderAgent', () => {
|
|||||||
expect(result.sessions[0].messages).toHaveLength(0);
|
expect(result.sessions[0].messages).toHaveLength(0);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe('Workflow Planning', () => {
|
||||||
|
let mockPlannerAgent: ReturnType<typeof createWorkflowPlannerAgent>;
|
||||||
|
const mockCreateWorkflowPlannerAgent = createWorkflowPlannerAgent as jest.MockedFunction<
|
||||||
|
typeof createWorkflowPlannerAgent
|
||||||
|
>;
|
||||||
|
|
||||||
|
// Helper function to mock stream processor with custom output
|
||||||
|
const mockStreamProcessor = (output: StreamOutput | Error) => {
|
||||||
|
if (output instanceof Error) {
|
||||||
|
mockCreateStreamProcessor.mockImplementation(() => {
|
||||||
|
// eslint-disable-next-line require-yield
|
||||||
|
return (async function* () {
|
||||||
|
throw output;
|
||||||
|
})();
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
mockCreateStreamProcessor.mockImplementation(() => {
|
||||||
|
return (async function* () {
|
||||||
|
yield output;
|
||||||
|
})();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Helper function to run chat and collect results
|
||||||
|
const runChatAndCollectResults = async (payload: ChatPayload) => {
|
||||||
|
const generator = agent.chat(payload);
|
||||||
|
const results = [];
|
||||||
|
for await (const result of generator) {
|
||||||
|
results.push(result);
|
||||||
|
}
|
||||||
|
return results;
|
||||||
|
};
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
// Reset the mock stream processor for planning tests
|
||||||
|
mockCreateStreamProcessor.mockReset();
|
||||||
|
|
||||||
|
mockPlannerAgent = {
|
||||||
|
plan: jest.fn(),
|
||||||
|
};
|
||||||
|
mockCreateWorkflowPlannerAgent.mockReturnValue(mockPlannerAgent);
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('create_plan', () => {
|
||||||
|
it('should create a workflow plan from user message', async () => {
|
||||||
|
const payload: ChatPayload = {
|
||||||
|
message: 'Create a workflow to process data',
|
||||||
|
workflowContext: {
|
||||||
|
currentWorkflow: { nodes: [] },
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
mockStreamProcessor({
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: 'assistant',
|
||||||
|
type: 'message',
|
||||||
|
text: 'Creating workflow plan...',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
} as StreamOutput);
|
||||||
|
|
||||||
|
const results = await runChatAndCollectResults(payload);
|
||||||
|
|
||||||
|
expect(results).toHaveLength(1);
|
||||||
|
expect(results[0]).toHaveProperty('messages');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle planning errors gracefully', async () => {
|
||||||
|
const payload: ChatPayload = {
|
||||||
|
message: 'Create invalid workflow',
|
||||||
|
workflowContext: {
|
||||||
|
currentWorkflow: { nodes: [] },
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
mockStreamProcessor(new ValidationError('Invalid plan request'));
|
||||||
|
|
||||||
|
await expect(runChatAndCollectResults(payload)).rejects.toThrow(ValidationError);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('reviewPlan', () => {
|
||||||
|
it('should handle plan approval via interrupt', async () => {
|
||||||
|
const mockPlan: WorkflowPlanNode[] = [
|
||||||
|
{
|
||||||
|
nodeType: 'n8n-nodes-base.manualTrigger',
|
||||||
|
nodeName: 'Manual Trigger',
|
||||||
|
reasoning: 'Start the workflow manually',
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const payload: ChatPayload = {
|
||||||
|
message: PLAN_APPROVAL_MESSAGE,
|
||||||
|
workflowContext: {
|
||||||
|
currentWorkflow: { nodes: [] },
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
const testAgent = new WorkflowBuilderAgent(config);
|
||||||
|
|
||||||
|
// Mock the agent with a pending interrupt
|
||||||
|
const mockCompiledAgent = {
|
||||||
|
stream: jest.fn().mockImplementation(async function* (input: unknown) {
|
||||||
|
// If it's a Command with resume, it means plan was approved
|
||||||
|
if (input instanceof Command && input.resume) {
|
||||||
|
yield [
|
||||||
|
'agent',
|
||||||
|
{
|
||||||
|
planStatus: 'approved',
|
||||||
|
messages: [new AIMessage('Plan approved, executing...')],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
getState: jest.fn().mockResolvedValue({
|
||||||
|
values: {
|
||||||
|
messages: [],
|
||||||
|
workflowPlan: {
|
||||||
|
intro: 'Test plan',
|
||||||
|
plan: mockPlan,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
tasks: [
|
||||||
|
{
|
||||||
|
interrupts: [
|
||||||
|
{
|
||||||
|
value: {
|
||||||
|
plan: mockPlan,
|
||||||
|
message: 'Test plan',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}),
|
||||||
|
updateState: jest.fn(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||||
|
jest.spyOn(testAgent as any, 'createWorkflow').mockReturnValue({
|
||||||
|
compile: jest.fn().mockReturnValue(mockCompiledAgent),
|
||||||
|
});
|
||||||
|
|
||||||
|
mockStreamProcessor({
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: 'assistant',
|
||||||
|
type: 'message',
|
||||||
|
text: 'Processing...',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
} as StreamOutput);
|
||||||
|
|
||||||
|
const generator = testAgent.chat(payload);
|
||||||
|
const results = [];
|
||||||
|
for await (const result of generator) {
|
||||||
|
results.push(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that stream was called with a Command containing approval
|
||||||
|
expect(mockCompiledAgent.stream).toHaveBeenCalledWith(
|
||||||
|
expect.objectContaining({
|
||||||
|
resume: {
|
||||||
|
action: 'approve',
|
||||||
|
feedback: undefined,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
expect.any(Object),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle plan rejection with feedback', async () => {
|
||||||
|
const mockPlan: WorkflowPlanNode[] = [
|
||||||
|
{
|
||||||
|
nodeType: 'n8n-nodes-base.manualTrigger',
|
||||||
|
nodeName: 'Manual Trigger',
|
||||||
|
reasoning: 'Start the workflow manually',
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const feedback = 'Please add error handling';
|
||||||
|
const payload: ChatPayload = {
|
||||||
|
message: feedback,
|
||||||
|
workflowContext: {
|
||||||
|
currentWorkflow: { nodes: [] },
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
const testAgent = new WorkflowBuilderAgent(config);
|
||||||
|
|
||||||
|
// Mock the agent with a pending interrupt
|
||||||
|
const mockCompiledAgent = {
|
||||||
|
stream: jest.fn().mockImplementation(async function* (input: unknown) {
|
||||||
|
// If it's a Command with resume and feedback, it means plan was rejected
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
|
||||||
|
if (input instanceof Command && input.resume?.action === 'adjust') {
|
||||||
|
yield [
|
||||||
|
'adjustPlan',
|
||||||
|
{
|
||||||
|
planStatus: 'rejected',
|
||||||
|
planFeedback: feedback,
|
||||||
|
messages: [new HumanMessage(feedback)],
|
||||||
|
},
|
||||||
|
];
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
getState: jest.fn().mockResolvedValue({
|
||||||
|
values: {
|
||||||
|
messages: [],
|
||||||
|
workflowPlan: {
|
||||||
|
intro: 'Test plan',
|
||||||
|
plan: mockPlan,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
tasks: [
|
||||||
|
{
|
||||||
|
interrupts: [
|
||||||
|
{
|
||||||
|
value: {
|
||||||
|
plan: mockPlan,
|
||||||
|
message: 'Test plan',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}),
|
||||||
|
updateState: jest.fn(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||||
|
jest.spyOn(testAgent as any, 'createWorkflow').mockReturnValue({
|
||||||
|
compile: jest.fn().mockReturnValue(mockCompiledAgent),
|
||||||
|
});
|
||||||
|
|
||||||
|
mockStreamProcessor({
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: 'assistant',
|
||||||
|
type: 'message',
|
||||||
|
text: 'Processing...',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
} as StreamOutput);
|
||||||
|
|
||||||
|
const generator = testAgent.chat(payload);
|
||||||
|
const results = [];
|
||||||
|
for await (const result of generator) {
|
||||||
|
results.push(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that stream was called with a Command containing rejection and feedback
|
||||||
|
expect(mockCompiledAgent.stream).toHaveBeenCalledWith(
|
||||||
|
expect.objectContaining({
|
||||||
|
resume: {
|
||||||
|
action: 'adjust',
|
||||||
|
feedback,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
expect.any(Object),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('adjustPlan', () => {
|
||||||
|
it('should adjust plan based on user feedback', async () => {
|
||||||
|
const payload: ChatPayload = {
|
||||||
|
message: 'Add error handling',
|
||||||
|
workflowContext: {
|
||||||
|
currentWorkflow: { nodes: [] },
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
mockStreamProcessor({
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: 'assistant',
|
||||||
|
type: 'message',
|
||||||
|
text: 'Adjusting plan with error handling...',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
} as StreamOutput);
|
||||||
|
|
||||||
|
const results = await runChatAndCollectResults(payload);
|
||||||
|
|
||||||
|
expect(results).toHaveLength(1);
|
||||||
|
expect(results[0].messages).toBeDefined();
|
||||||
|
expect(results[0].messages[0]).toHaveProperty('text');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should remove previous plan tool messages when adjusting', async () => {
|
||||||
|
const payload: ChatPayload = {
|
||||||
|
message: 'Use webhook instead',
|
||||||
|
workflowContext: {
|
||||||
|
currentWorkflow: { nodes: [] },
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
mockStreamProcessor({
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: 'assistant',
|
||||||
|
type: 'message',
|
||||||
|
text: 'Adjusting plan to use webhook...',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
} as StreamOutput);
|
||||||
|
|
||||||
|
const results = await runChatAndCollectResults(payload);
|
||||||
|
|
||||||
|
expect(results).toHaveLength(1);
|
||||||
|
expect(results[0].messages).toBeDefined();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Plan state routing', () => {
|
||||||
|
it('should route to createPlan when no plan exists', async () => {
|
||||||
|
const payload: ChatPayload = {
|
||||||
|
message: 'Build a workflow',
|
||||||
|
workflowContext: {
|
||||||
|
currentWorkflow: { nodes: [] },
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
mockStreamProcessor({
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: 'assistant',
|
||||||
|
type: 'message',
|
||||||
|
text: 'Creating plan...',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
} as StreamOutput);
|
||||||
|
|
||||||
|
const results = await runChatAndCollectResults(payload);
|
||||||
|
|
||||||
|
expect(results).toHaveLength(1);
|
||||||
|
expect(results[0]).toHaveProperty('messages');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should route to agent when plan is approved', async () => {
|
||||||
|
const payload: ChatPayload = {
|
||||||
|
message: 'Continue building',
|
||||||
|
workflowContext: {
|
||||||
|
currentWorkflow: { nodes: [] },
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
mockStreamProcessor({
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: 'assistant',
|
||||||
|
type: 'message',
|
||||||
|
text: 'Building workflow based on approved plan...',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
} as StreamOutput);
|
||||||
|
|
||||||
|
const results = await runChatAndCollectResults(payload);
|
||||||
|
|
||||||
|
expect(results).toHaveLength(1);
|
||||||
|
expect(results[0]).toHaveProperty('messages');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Interrupt handling', () => {
|
||||||
|
it('should properly handle interrupt for plan review', async () => {
|
||||||
|
// This test verifies that the interrupt mechanism is properly set up
|
||||||
|
// The actual interrupt is handled by LangGraph, we just verify the setup
|
||||||
|
const payload: ChatPayload = {
|
||||||
|
message: 'Create workflow to fetch data',
|
||||||
|
workflowContext: {
|
||||||
|
currentWorkflow: { nodes: [] },
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
// Mock the stream processor to simulate interrupt handling
|
||||||
|
mockCreateStreamProcessor.mockImplementation(() => {
|
||||||
|
return (async function* () {
|
||||||
|
yield {
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: 'assistant',
|
||||||
|
type: 'message',
|
||||||
|
text: 'Creating plan for review...',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
} as StreamOutput;
|
||||||
|
|
||||||
|
yield {
|
||||||
|
messages: [
|
||||||
|
{
|
||||||
|
role: 'assistant',
|
||||||
|
type: 'message',
|
||||||
|
text: 'Plan created, awaiting approval...',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
} as StreamOutput;
|
||||||
|
})();
|
||||||
|
});
|
||||||
|
|
||||||
|
const generator = agent.chat(payload);
|
||||||
|
const results = [];
|
||||||
|
for await (const result of generator) {
|
||||||
|
results.push(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify we got results from the stream
|
||||||
|
expect(results.length).toBeGreaterThan(1);
|
||||||
|
expect(results[0]).toHaveProperty('messages');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
import { ChatPromptTemplate } from '@langchain/core/prompts';
|
import { ChatPromptTemplate } from '@langchain/core/prompts';
|
||||||
|
|
||||||
import { instanceUrlPrompt } from '@/chains/prompts/instance-url';
|
import type { WorkflowPlan } from '../../agents/workflow-planner-agent';
|
||||||
|
import { instanceUrlPrompt } from '../../chains/prompts/instance-url';
|
||||||
|
|
||||||
const systemPrompt = `You are an AI assistant specialized in creating and editing n8n workflows. Your goal is to help users build efficient, well-connected workflows by intelligently using the available tools.
|
const systemPrompt = `You are an AI assistant specialized in creating and editing n8n workflows. Your goal is to help users build efficient, well-connected workflows by intelligently using the available tools.
|
||||||
<core_principle>
|
<core_principle>
|
||||||
@@ -177,6 +178,35 @@ Common failures from relying on defaults:
|
|||||||
ALWAYS check node details obtained in Analysis Phase and configure accordingly. Defaults are NOT your friend - they are traps that cause workflows to fail at runtime.
|
ALWAYS check node details obtained in Analysis Phase and configure accordingly. Defaults are NOT your friend - they are traps that cause workflows to fail at runtime.
|
||||||
</node_defaults_warning>
|
</node_defaults_warning>
|
||||||
|
|
||||||
|
<workflow_configuration_node>
|
||||||
|
CRITICAL: Always include a Workflow Configuration node at the start of every workflow.
|
||||||
|
|
||||||
|
The Workflow Configuration node (n8n-nodes-base.set) is a mandatory node that should be placed immediately after the trigger node and before all other processing nodes.
|
||||||
|
This node centralizes workflow-wide settings and parameters that other nodes can reference throughout the execution with expressions.
|
||||||
|
|
||||||
|
Placement rules:
|
||||||
|
- ALWAYS add between trigger and first processing node
|
||||||
|
- Connect: Trigger → Workflow Configuration → First processing node
|
||||||
|
- This creates a single source of truth for workflow parameters
|
||||||
|
|
||||||
|
Configuration approach:
|
||||||
|
- Include URLs, thresholds, string constants and any reusable values
|
||||||
|
- Other nodes reference these via expressions: {{ $('Workflow Configuration').first().json.variableName }}
|
||||||
|
- Add only parameters that are used by other nodes, DO NOT add unnecessary fields
|
||||||
|
|
||||||
|
Workflow configuration node usage example:
|
||||||
|
1. Schedule Trigger → Workflow Configuration → HTTP Request → Process Data
|
||||||
|
2. Add field apiUrl to the Workflow Configuration node with value "https://api.example.com/data"
|
||||||
|
3. Reference in HTTP Request node: "{{ $('Workflow Configuration').first().json.apiUrl }}" instead of directly setting the URL
|
||||||
|
|
||||||
|
IMPORTANT:
|
||||||
|
- Workflow Configuration node is not meant for credentials or sensitive data.
|
||||||
|
- Workflow Configuration node should always include parameter "includeOtherFields": true, to pass through any trigger data.
|
||||||
|
- Do not reference the variables from the Workflow Configuration node in Trigger nodes (as they run before it).
|
||||||
|
|
||||||
|
Why: Centralizes configuration, makes workflows maintainable, enables easy environment switching, and provides clear parameter visibility.
|
||||||
|
</workflow_configuration_node>
|
||||||
|
|
||||||
<configuration_requirements>
|
<configuration_requirements>
|
||||||
ALWAYS configure nodes after adding and connecting them. This is NOT optional.
|
ALWAYS configure nodes after adding and connecting them. This is NOT optional.
|
||||||
|
|
||||||
@@ -375,6 +405,34 @@ const previousConversationSummary = `
|
|||||||
{previousSummary}
|
{previousSummary}
|
||||||
</previous_summary>`;
|
</previous_summary>`;
|
||||||
|
|
||||||
|
const workflowPlan = '{workflowPlan}';
|
||||||
|
|
||||||
|
export const planFormatter = (plan?: WorkflowPlan | null) => {
|
||||||
|
if (!plan) return '<workflow_plan>EMPTY</workflow_plan>';
|
||||||
|
|
||||||
|
const nodesPlan = plan.plan.map((node) => {
|
||||||
|
return `
|
||||||
|
<workflow_plan_node>
|
||||||
|
<type>${node.nodeType}</type>
|
||||||
|
<name>${node.nodeName}</name>
|
||||||
|
<reasoning>${node.reasoning}</reasoning>
|
||||||
|
</workflow_plan_node>
|
||||||
|
`;
|
||||||
|
});
|
||||||
|
|
||||||
|
return `
|
||||||
|
<workflow_plan>
|
||||||
|
<workflow_plan_intro>
|
||||||
|
${plan.intro}
|
||||||
|
</workflow_plan_intro>
|
||||||
|
|
||||||
|
<workflow_plan_nodes>
|
||||||
|
${nodesPlan.join('\n')}
|
||||||
|
</workflow_plan_nodes>
|
||||||
|
</workflow_plan>
|
||||||
|
`;
|
||||||
|
};
|
||||||
|
|
||||||
export const mainAgentPrompt = ChatPromptTemplate.fromMessages([
|
export const mainAgentPrompt = ChatPromptTemplate.fromMessages([
|
||||||
[
|
[
|
||||||
'system',
|
'system',
|
||||||
@@ -410,6 +468,11 @@ export const mainAgentPrompt = ChatPromptTemplate.fromMessages([
|
|||||||
text: previousConversationSummary,
|
text: previousConversationSummary,
|
||||||
cache_control: { type: 'ephemeral' },
|
cache_control: { type: 'ephemeral' },
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: workflowPlan,
|
||||||
|
cache_control: { type: 'ephemeral' },
|
||||||
|
},
|
||||||
],
|
],
|
||||||
],
|
],
|
||||||
['placeholder', '{messages}'],
|
['placeholder', '{messages}'],
|
||||||
|
|||||||
@@ -0,0 +1,5 @@
|
|||||||
|
import type { AIMessage, BaseMessage } from '@langchain/core/messages';
|
||||||
|
|
||||||
|
export function isAIMessage(msg: BaseMessage): msg is AIMessage {
|
||||||
|
return msg.getType() === 'ai';
|
||||||
|
}
|
||||||
@@ -47,26 +47,11 @@ export interface AgentChatMessage {
|
|||||||
text: string;
|
text: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Prompt validation message
|
|
||||||
*/
|
|
||||||
export interface PromptValidationMessage {
|
|
||||||
role: 'assistant';
|
|
||||||
type: 'prompt-validation';
|
|
||||||
isWorkflowPrompt: boolean;
|
|
||||||
id: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Union type for all possible message responses
|
* Union type for all possible message responses
|
||||||
*/
|
*/
|
||||||
export type MessageResponse =
|
export type MessageResponse =
|
||||||
| ((
|
| ((AssistantChatMessage | AssistantSummaryMessage | AgentChatMessage) & {
|
||||||
| AssistantChatMessage
|
|
||||||
| AssistantSummaryMessage
|
|
||||||
| AgentChatMessage
|
|
||||||
| PromptValidationMessage
|
|
||||||
) & {
|
|
||||||
quickReplies?: QuickReplyOption[];
|
quickReplies?: QuickReplyOption[];
|
||||||
})
|
})
|
||||||
| EndSessionMessage;
|
| EndSessionMessage;
|
||||||
|
|||||||
@@ -35,6 +35,15 @@ export interface ExecutionRequestChunk {
|
|||||||
reason: string;
|
reason: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Plan chunk for streaming
|
||||||
|
*/
|
||||||
|
export interface PlanChunk {
|
||||||
|
role: 'assistant';
|
||||||
|
type: 'plan';
|
||||||
|
plan: unknown;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Union type for all stream chunks
|
* Union type for all stream chunks
|
||||||
*/
|
*/
|
||||||
@@ -42,7 +51,8 @@ export type StreamChunk =
|
|||||||
| AgentMessageChunk
|
| AgentMessageChunk
|
||||||
| ToolProgressChunk
|
| ToolProgressChunk
|
||||||
| WorkflowUpdateChunk
|
| WorkflowUpdateChunk
|
||||||
| ExecutionRequestChunk;
|
| ExecutionRequestChunk
|
||||||
|
| PlanChunk;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Stream output containing messages
|
* Stream output containing messages
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ import { AIMessage, HumanMessage, ToolMessage } from '@langchain/core/messages';
|
|||||||
import type { ToolCall } from '@langchain/core/messages/tool';
|
import type { ToolCall } from '@langchain/core/messages/tool';
|
||||||
import type { DynamicStructuredTool } from '@langchain/core/tools';
|
import type { DynamicStructuredTool } from '@langchain/core/tools';
|
||||||
|
|
||||||
|
import type { WorkflowPlan } from '../agents/workflow-planner-agent';
|
||||||
import type {
|
import type {
|
||||||
AgentMessageChunk,
|
AgentMessageChunk,
|
||||||
ToolProgressChunk,
|
ToolProgressChunk,
|
||||||
@@ -44,6 +45,24 @@ export function processStreamChunk(streamMode: string, chunk: unknown): StreamOu
|
|||||||
workflowJSON?: unknown;
|
workflowJSON?: unknown;
|
||||||
workflowOperations?: unknown;
|
workflowOperations?: unknown;
|
||||||
};
|
};
|
||||||
|
create_plan?: {
|
||||||
|
workflowPlan?: unknown;
|
||||||
|
planStatus?: string;
|
||||||
|
messages?: Array<{ content: string | Array<{ type: string; text: string }> }>;
|
||||||
|
};
|
||||||
|
review_plan?: {
|
||||||
|
planStatus?: string;
|
||||||
|
};
|
||||||
|
adjust_plan?: {
|
||||||
|
workflowPlan?: unknown;
|
||||||
|
planStatus?: string;
|
||||||
|
};
|
||||||
|
__interrupt__?: Array<{
|
||||||
|
value: unknown;
|
||||||
|
resumable: boolean;
|
||||||
|
ns: string[];
|
||||||
|
when: string;
|
||||||
|
}>;
|
||||||
};
|
};
|
||||||
|
|
||||||
if ((agentChunk?.delete_messages?.messages ?? []).length > 0) {
|
if ((agentChunk?.delete_messages?.messages ?? []).length > 0) {
|
||||||
@@ -98,6 +117,40 @@ export function processStreamChunk(streamMode: string, chunk: unknown): StreamOu
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Handle plan creation
|
||||||
|
if (agentChunk?.create_plan?.workflowPlan) {
|
||||||
|
const workflowPlan = agentChunk.create_plan.workflowPlan as WorkflowPlan;
|
||||||
|
const planChunk = {
|
||||||
|
role: 'assistant' as const,
|
||||||
|
type: 'plan' as const,
|
||||||
|
plan: workflowPlan.plan,
|
||||||
|
message: workflowPlan.intro,
|
||||||
|
};
|
||||||
|
return { messages: [planChunk] };
|
||||||
|
} else if ((agentChunk?.create_plan?.messages ?? []).length > 0) {
|
||||||
|
// When planner didn't create a plan, but responded with a message
|
||||||
|
const lastMessage =
|
||||||
|
agentChunk.create_plan!.messages![agentChunk.create_plan!.messages!.length - 1];
|
||||||
|
const messageChunk: AgentMessageChunk = {
|
||||||
|
role: 'assistant',
|
||||||
|
type: 'message',
|
||||||
|
text: lastMessage.content as string,
|
||||||
|
};
|
||||||
|
|
||||||
|
return { messages: [messageChunk] };
|
||||||
|
}
|
||||||
|
|
||||||
|
if (agentChunk?.adjust_plan?.workflowPlan) {
|
||||||
|
const workflowPlan = agentChunk.adjust_plan.workflowPlan as WorkflowPlan;
|
||||||
|
const planChunk = {
|
||||||
|
role: 'assistant' as const,
|
||||||
|
type: 'plan' as const,
|
||||||
|
plan: workflowPlan.plan,
|
||||||
|
message: workflowPlan.intro,
|
||||||
|
};
|
||||||
|
return { messages: [planChunk] };
|
||||||
|
}
|
||||||
|
|
||||||
// Handle process_operations updates - emit workflow update after operations are processed
|
// Handle process_operations updates - emit workflow update after operations are processed
|
||||||
if (agentChunk?.process_operations) {
|
if (agentChunk?.process_operations) {
|
||||||
// Check if operations were processed (indicated by cleared operations array)
|
// Check if operations were processed (indicated by cleared operations array)
|
||||||
@@ -197,6 +250,16 @@ function createToolCallMessage(
|
|||||||
toolCall: ToolCall,
|
toolCall: ToolCall,
|
||||||
builderTool?: BuilderTool,
|
builderTool?: BuilderTool,
|
||||||
): Record<string, unknown> {
|
): Record<string, unknown> {
|
||||||
|
if (toolCall.name === 'generate_workflow_plan') {
|
||||||
|
const workflowPlan = toolCall.args as WorkflowPlan;
|
||||||
|
return {
|
||||||
|
role: 'assistant',
|
||||||
|
type: 'plan',
|
||||||
|
plan: workflowPlan.plan,
|
||||||
|
message: workflowPlan.intro,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
id: toolCall.id,
|
id: toolCall.id,
|
||||||
toolCallId: toolCall.id,
|
toolCallId: toolCall.id,
|
||||||
|
|||||||
@@ -388,6 +388,9 @@ describe('operations-processor', () => {
|
|||||||
messages: [],
|
messages: [],
|
||||||
workflowContext: {},
|
workflowContext: {},
|
||||||
previousSummary: 'EMPTY',
|
previousSummary: 'EMPTY',
|
||||||
|
workflowPlan: null,
|
||||||
|
planStatus: null,
|
||||||
|
planFeedback: null,
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should process operations and clear them', () => {
|
it('should process operations and clear them', () => {
|
||||||
|
|||||||
@@ -49,6 +49,9 @@ describe('tool-executor', () => {
|
|||||||
messages,
|
messages,
|
||||||
workflowContext: {},
|
workflowContext: {},
|
||||||
previousSummary: 'EMPTY',
|
previousSummary: 'EMPTY',
|
||||||
|
planFeedback: null,
|
||||||
|
planStatus: null,
|
||||||
|
workflowPlan: null,
|
||||||
});
|
});
|
||||||
|
|
||||||
// Helper to create mock tool
|
// Helper to create mock tool
|
||||||
|
|||||||
@@ -1,9 +1,15 @@
|
|||||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
import type { ToolMessage } from '@langchain/core/messages';
|
import { ToolMessage, AIMessage, HumanMessage, RemoveMessage } from '@langchain/core/messages';
|
||||||
import { AIMessage, HumanMessage, RemoveMessage } from '@langchain/core/messages';
|
|
||||||
import type { RunnableConfig } from '@langchain/core/runnables';
|
import type { RunnableConfig } from '@langchain/core/runnables';
|
||||||
import type { LangChainTracer } from '@langchain/core/tracers/tracer_langchain';
|
import type { LangChainTracer } from '@langchain/core/tracers/tracer_langchain';
|
||||||
import { StateGraph, MemorySaver, END, GraphRecursionError } from '@langchain/langgraph';
|
import {
|
||||||
|
StateGraph,
|
||||||
|
MemorySaver,
|
||||||
|
END,
|
||||||
|
GraphRecursionError,
|
||||||
|
Command,
|
||||||
|
interrupt,
|
||||||
|
} from '@langchain/langgraph';
|
||||||
import type { Logger } from '@n8n/backend-common';
|
import type { Logger } from '@n8n/backend-common';
|
||||||
import {
|
import {
|
||||||
ApplicationError,
|
ApplicationError,
|
||||||
@@ -17,10 +23,12 @@ import {
|
|||||||
DEFAULT_AUTO_COMPACT_THRESHOLD_TOKENS,
|
DEFAULT_AUTO_COMPACT_THRESHOLD_TOKENS,
|
||||||
MAX_AI_BUILDER_PROMPT_LENGTH,
|
MAX_AI_BUILDER_PROMPT_LENGTH,
|
||||||
MAX_INPUT_TOKENS,
|
MAX_INPUT_TOKENS,
|
||||||
|
PLAN_APPROVAL_MESSAGE,
|
||||||
} from '@/constants';
|
} from '@/constants';
|
||||||
import { createGetNodeParameterTool } from '@/tools/get-node-parameter.tool';
|
import { createGetNodeParameterTool } from '@/tools/get-node-parameter.tool';
|
||||||
import { trimWorkflowJSON } from '@/utils/trim-workflow-context';
|
import { trimWorkflowJSON } from '@/utils/trim-workflow-context';
|
||||||
|
|
||||||
|
import { type WorkflowPlanNode, createWorkflowPlannerAgent } from './agents/workflow-planner-agent';
|
||||||
import { conversationCompactChain } from './chains/conversation-compact';
|
import { conversationCompactChain } from './chains/conversation-compact';
|
||||||
import { workflowNameChain } from './chains/workflow-name';
|
import { workflowNameChain } from './chains/workflow-name';
|
||||||
import { LLMServiceError, ValidationError, WorkflowStateError } from './errors';
|
import { LLMServiceError, ValidationError, WorkflowStateError } from './errors';
|
||||||
@@ -28,7 +36,7 @@ import { createAddNodeTool } from './tools/add-node.tool';
|
|||||||
import { createConnectNodesTool } from './tools/connect-nodes.tool';
|
import { createConnectNodesTool } from './tools/connect-nodes.tool';
|
||||||
import { createNodeDetailsTool } from './tools/node-details.tool';
|
import { createNodeDetailsTool } from './tools/node-details.tool';
|
||||||
import { createNodeSearchTool } from './tools/node-search.tool';
|
import { createNodeSearchTool } from './tools/node-search.tool';
|
||||||
import { mainAgentPrompt } from './tools/prompts/main-agent.prompt';
|
import { mainAgentPrompt, planFormatter } from './tools/prompts/main-agent.prompt';
|
||||||
import { createRemoveNodeTool } from './tools/remove-node.tool';
|
import { createRemoveNodeTool } from './tools/remove-node.tool';
|
||||||
import { createUpdateNodeParametersTool } from './tools/update-node-parameters.tool';
|
import { createUpdateNodeParametersTool } from './tools/update-node-parameters.tool';
|
||||||
import type { SimpleWorkflow } from './types/workflow';
|
import type { SimpleWorkflow } from './types/workflow';
|
||||||
@@ -121,6 +129,7 @@ export class WorkflowBuilderAgent {
|
|||||||
workflowJSON: trimWorkflowJSON(state.workflowJSON),
|
workflowJSON: trimWorkflowJSON(state.workflowJSON),
|
||||||
executionData: state.workflowContext?.executionData ?? {},
|
executionData: state.workflowContext?.executionData ?? {},
|
||||||
executionSchema: state.workflowContext?.executionSchema ?? [],
|
executionSchema: state.workflowContext?.executionSchema ?? [],
|
||||||
|
workflowPlan: planFormatter(state.workflowPlan),
|
||||||
instanceUrl: this.instanceUrl,
|
instanceUrl: this.instanceUrl,
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -156,8 +165,145 @@ export class WorkflowBuilderAgent {
|
|||||||
return tokensUsed > this.autoCompactThresholdTokens;
|
return tokensUsed > this.autoCompactThresholdTokens;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a plan for the workflow based on user requirements
|
||||||
|
*/
|
||||||
|
const createPlan = async (state: typeof WorkflowState.State) => {
|
||||||
|
const { messages } = state;
|
||||||
|
const lastHumanMessage = messages.findLast((m) => m instanceof HumanMessage)!;
|
||||||
|
|
||||||
|
if (typeof lastHumanMessage.content !== 'string') {
|
||||||
|
throw new ValidationError('Invalid message content for planning');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the planner agent with tools
|
||||||
|
const plannerAgent = createWorkflowPlannerAgent(this.llmSimpleTask, this.parsedNodeTypes);
|
||||||
|
|
||||||
|
// Generate the workflow plan
|
||||||
|
const plannerResult = await plannerAgent.plan(lastHumanMessage.content);
|
||||||
|
|
||||||
|
// If we got a structured plan, return it
|
||||||
|
if ('plan' in plannerResult) {
|
||||||
|
const { plan, toolMessages } = plannerResult;
|
||||||
|
this.logger?.debug('Generated workflow plan: ' + JSON.stringify(plan, null, 2));
|
||||||
|
|
||||||
|
return {
|
||||||
|
workflowPlan: plan,
|
||||||
|
planStatus: 'pending' as const,
|
||||||
|
messages: toolMessages,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we didn't get a plan, just return the text response
|
||||||
|
this.logger?.debug('Planner returned text response: ' + plannerResult.text);
|
||||||
|
return {
|
||||||
|
messages: [
|
||||||
|
new AIMessage({
|
||||||
|
content: plannerResult.text,
|
||||||
|
}),
|
||||||
|
],
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reviews the plan with the user for approval
|
||||||
|
*/
|
||||||
|
const reviewPlan = async (state: typeof WorkflowState.State) => {
|
||||||
|
const { workflowPlan } = state;
|
||||||
|
|
||||||
|
if (!workflowPlan) {
|
||||||
|
throw new ValidationError('No workflow plan to review');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use interrupt to pause and show the plan to the user
|
||||||
|
// The frontend will display the plan and wait for user action
|
||||||
|
const userResponse = interrupt<
|
||||||
|
{ plan: WorkflowPlanNode[]; message: string },
|
||||||
|
{ action: 'approve' | 'adjust'; feedback?: string }
|
||||||
|
>({
|
||||||
|
plan: workflowPlan.plan,
|
||||||
|
message: workflowPlan.intro,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Process the user's response
|
||||||
|
if (userResponse.action === 'approve') {
|
||||||
|
// User approved the plan, mark as approved and continue
|
||||||
|
return {
|
||||||
|
planStatus: 'approved' as const,
|
||||||
|
};
|
||||||
|
} else if (userResponse.action === 'adjust') {
|
||||||
|
// User wants adjustments, add feedback and mark for adjustment
|
||||||
|
return {
|
||||||
|
planStatus: 'rejected' as const,
|
||||||
|
planFeedback: userResponse.feedback ?? 'Please adjust the plan',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return {};
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adjusts the plan based on user feedback
|
||||||
|
*/
|
||||||
|
const adjustPlan = async (state: typeof WorkflowState.State) => {
|
||||||
|
const { messages, planFeedback, workflowPlan } = state;
|
||||||
|
const lastHumanMessage = messages.findLast((m) => m instanceof HumanMessage)!;
|
||||||
|
|
||||||
|
if (typeof lastHumanMessage.content !== 'string') {
|
||||||
|
throw new ValidationError('Invalid message content for plan adjustment');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the planner agent with tools
|
||||||
|
const plannerAgent = createWorkflowPlannerAgent(this.llmSimpleTask, this.parsedNodeTypes);
|
||||||
|
|
||||||
|
// Generate an adjusted plan with feedback
|
||||||
|
const adjustedPlan = await plannerAgent.plan(
|
||||||
|
lastHumanMessage.content,
|
||||||
|
workflowPlan ?? undefined,
|
||||||
|
planFeedback ?? undefined,
|
||||||
|
);
|
||||||
|
|
||||||
|
// If we get a text response instead of a plan, just return that
|
||||||
|
if ('text' in adjustedPlan) {
|
||||||
|
return {
|
||||||
|
messages: [
|
||||||
|
new AIMessage({
|
||||||
|
content: adjustedPlan.text,
|
||||||
|
}),
|
||||||
|
],
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove previous plan tool messages to avoid confusion
|
||||||
|
const filteredMessages = messages.map((m) => {
|
||||||
|
if (m instanceof ToolMessage && m.name === 'generate_workflow_plan') {
|
||||||
|
return new RemoveMessage({ id: m.id! });
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m instanceof AIMessage && m.tool_calls && m.tool_calls.length > 0) {
|
||||||
|
const hasPlanCall = m.tool_calls.find((tc) => tc.name === 'generate_workflow_plan');
|
||||||
|
if (hasPlanCall) {
|
||||||
|
return new RemoveMessage({ id: m.id! });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return m;
|
||||||
|
});
|
||||||
|
|
||||||
|
const planAdjustmentMessage = new HumanMessage({ content: planFeedback ?? '' });
|
||||||
|
|
||||||
|
this.logger?.debug('Adjusted workflow plan: ' + JSON.stringify(adjustedPlan, null, 2));
|
||||||
|
|
||||||
|
return {
|
||||||
|
workflowPlan: adjustedPlan.plan,
|
||||||
|
messages: [...filteredMessages, planAdjustmentMessage, ...adjustedPlan.toolMessages],
|
||||||
|
planStatus: 'pending' as const,
|
||||||
|
planFeedback: null,
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
const shouldModifyState = (state: typeof WorkflowState.State) => {
|
const shouldModifyState = (state: typeof WorkflowState.State) => {
|
||||||
const { messages, workflowContext } = state;
|
const { messages, workflowContext, planStatus } = state;
|
||||||
const lastHumanMessage = messages.findLast((m) => m instanceof HumanMessage)!; // There always should be at least one human message in the array
|
const lastHumanMessage = messages.findLast((m) => m instanceof HumanMessage)!; // There always should be at least one human message in the array
|
||||||
|
|
||||||
if (lastHumanMessage.content === '/compact') {
|
if (lastHumanMessage.content === '/compact') {
|
||||||
@@ -178,6 +324,11 @@ export class WorkflowBuilderAgent {
|
|||||||
return 'auto_compact_messages';
|
return 'auto_compact_messages';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If we don't have a plan yet, and the workflow is empty, create a plan
|
||||||
|
if (!planStatus && workflowContext?.currentWorkflow?.nodes?.length === 0) {
|
||||||
|
return 'create_plan';
|
||||||
|
}
|
||||||
|
|
||||||
return 'agent';
|
return 'agent';
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -205,6 +356,9 @@ export class WorkflowBuilderAgent {
|
|||||||
connections: {},
|
connections: {},
|
||||||
name: '',
|
name: '',
|
||||||
},
|
},
|
||||||
|
workflowPlan: null,
|
||||||
|
planStatus: null,
|
||||||
|
planFeedback: null,
|
||||||
};
|
};
|
||||||
|
|
||||||
return stateUpdate;
|
return stateUpdate;
|
||||||
@@ -293,11 +447,24 @@ export class WorkflowBuilderAgent {
|
|||||||
.addNode('compact_messages', compactSession)
|
.addNode('compact_messages', compactSession)
|
||||||
.addNode('auto_compact_messages', compactSession)
|
.addNode('auto_compact_messages', compactSession)
|
||||||
.addNode('create_workflow_name', createWorkflowName)
|
.addNode('create_workflow_name', createWorkflowName)
|
||||||
|
.addNode('create_plan', createPlan)
|
||||||
|
.addNode('review_plan', reviewPlan)
|
||||||
|
.addNode('adjust_plan', adjustPlan)
|
||||||
.addConditionalEdges('__start__', shouldModifyState)
|
.addConditionalEdges('__start__', shouldModifyState)
|
||||||
|
// .addEdge('create_plan', 'review_plan')
|
||||||
|
.addConditionalEdges('create_plan', (state) => {
|
||||||
|
// If a plan was created, move to review, otherwise back to planning
|
||||||
|
return state.workflowPlan ? 'review_plan' : END;
|
||||||
|
})
|
||||||
|
.addConditionalEdges('review_plan', (state) => {
|
||||||
|
// Route based on the plan status after review
|
||||||
|
return state.planStatus === 'approved' ? 'agent' : 'adjust_plan';
|
||||||
|
})
|
||||||
|
.addEdge('adjust_plan', 'review_plan')
|
||||||
.addEdge('tools', 'process_operations')
|
.addEdge('tools', 'process_operations')
|
||||||
.addEdge('process_operations', 'agent')
|
.addEdge('process_operations', 'agent')
|
||||||
.addEdge('auto_compact_messages', 'agent')
|
.addEdge('auto_compact_messages', 'agent')
|
||||||
.addEdge('create_workflow_name', 'agent')
|
.addEdge('create_workflow_name', 'create_plan')
|
||||||
.addEdge('delete_messages', END)
|
.addEdge('delete_messages', END)
|
||||||
.addEdge('compact_messages', END)
|
.addEdge('compact_messages', END)
|
||||||
.addConditionalEdges('agent', shouldContinue);
|
.addConditionalEdges('agent', shouldContinue);
|
||||||
@@ -338,7 +505,7 @@ export class WorkflowBuilderAgent {
|
|||||||
);
|
);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const stream = await this.createAgentStream(payload, streamConfig, agent);
|
const stream = await this.createAgentStream(payload, streamConfig, agent, threadConfig);
|
||||||
yield* this.processAgentStream(stream, agent, threadConfig);
|
yield* this.processAgentStream(stream, agent, threadConfig);
|
||||||
} catch (error: unknown) {
|
} catch (error: unknown) {
|
||||||
this.handleStreamError(error);
|
this.handleStreamError(error);
|
||||||
@@ -384,7 +551,33 @@ export class WorkflowBuilderAgent {
|
|||||||
payload: ChatPayload,
|
payload: ChatPayload,
|
||||||
streamConfig: RunnableConfig,
|
streamConfig: RunnableConfig,
|
||||||
agent: ReturnType<ReturnType<typeof this.createWorkflow>['compile']>,
|
agent: ReturnType<ReturnType<typeof this.createWorkflow>['compile']>,
|
||||||
|
threadConfig: RunnableConfig,
|
||||||
) {
|
) {
|
||||||
|
const currentState = await agent.getState(threadConfig);
|
||||||
|
|
||||||
|
// Check if there are pending interrupts (e.g., plan review)
|
||||||
|
const interruptedTask = currentState.tasks.find(
|
||||||
|
(task) => task.interrupts && task.interrupts.length > 0,
|
||||||
|
);
|
||||||
|
|
||||||
|
if (interruptedTask) {
|
||||||
|
// We have a pending interrupt - likely a plan review
|
||||||
|
|
||||||
|
// Check if the message is an approval message, right now we only check for exact match
|
||||||
|
// in the future we might want to use a LLM classifier for more flexibility
|
||||||
|
const action = payload.message.trim() === PLAN_APPROVAL_MESSAGE ? 'approve' : 'adjust';
|
||||||
|
|
||||||
|
// Resume with the appropriate command
|
||||||
|
const resumeCommand = new Command({
|
||||||
|
resume: {
|
||||||
|
action,
|
||||||
|
feedback: action === 'adjust' ? payload.message : undefined,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
return await agent.stream(resumeCommand, streamConfig);
|
||||||
|
}
|
||||||
|
|
||||||
return await agent.stream(
|
return await agent.stream(
|
||||||
{
|
{
|
||||||
messages: [new HumanMessage({ content: payload.message })],
|
messages: [new HumanMessage({ content: payload.message })],
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ import type { BaseMessage } from '@langchain/core/messages';
|
|||||||
import { HumanMessage } from '@langchain/core/messages';
|
import { HumanMessage } from '@langchain/core/messages';
|
||||||
import { Annotation, messagesStateReducer } from '@langchain/langgraph';
|
import { Annotation, messagesStateReducer } from '@langchain/langgraph';
|
||||||
|
|
||||||
|
import type { WorkflowPlan } from './agents/workflow-planner-agent';
|
||||||
import type { SimpleWorkflow, WorkflowOperation } from './types/workflow';
|
import type { SimpleWorkflow, WorkflowOperation } from './types/workflow';
|
||||||
import type { ChatPayload } from './workflow-builder-agent';
|
import type { ChatPayload } from './workflow-builder-agent';
|
||||||
|
|
||||||
@@ -75,7 +76,21 @@ export const WorkflowState = Annotation.Root({
|
|||||||
reducer: operationsReducer,
|
reducer: operationsReducer,
|
||||||
default: () => [],
|
default: () => [],
|
||||||
}),
|
}),
|
||||||
// Whether the user prompt is a workflow prompt.
|
// The planned workflow nodes
|
||||||
|
workflowPlan: Annotation<WorkflowPlan | null>({
|
||||||
|
reducer: (x, y) => y ?? x,
|
||||||
|
default: () => null,
|
||||||
|
}),
|
||||||
|
// Status of the workflow plan
|
||||||
|
planStatus: Annotation<'pending' | 'approved' | 'rejected' | null>({
|
||||||
|
reducer: (x, y) => y ?? x,
|
||||||
|
default: () => null,
|
||||||
|
}),
|
||||||
|
// User feedback on the plan
|
||||||
|
planFeedback: Annotation<string | null>({
|
||||||
|
reducer: (x, y) => y ?? x,
|
||||||
|
default: () => null,
|
||||||
|
}),
|
||||||
// Latest workflow context
|
// Latest workflow context
|
||||||
workflowContext: Annotation<ChatPayload['workflowContext'] | undefined>({
|
workflowContext: Annotation<ChatPayload['workflowContext'] | undefined>({
|
||||||
reducer: (x, y) => y ?? x,
|
reducer: (x, y) => y ?? x,
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ interface Props {
|
|||||||
};
|
};
|
||||||
messages?: ChatUI.AssistantMessage[];
|
messages?: ChatUI.AssistantMessage[];
|
||||||
streaming?: boolean;
|
streaming?: boolean;
|
||||||
|
disabled?: boolean;
|
||||||
loadingMessage?: string;
|
loadingMessage?: string;
|
||||||
sessionId?: string;
|
sessionId?: string;
|
||||||
title?: string;
|
title?: string;
|
||||||
@@ -66,7 +67,9 @@ function normalizeMessages(messages: ChatUI.AssistantMessage[]): ChatUI.Assistan
|
|||||||
|
|
||||||
// filter out these messages so that tool collapsing works correctly
|
// filter out these messages so that tool collapsing works correctly
|
||||||
function filterOutHiddenMessages(messages: ChatUI.AssistantMessage[]): ChatUI.AssistantMessage[] {
|
function filterOutHiddenMessages(messages: ChatUI.AssistantMessage[]): ChatUI.AssistantMessage[] {
|
||||||
return messages.filter((message) => Boolean(getSupportedMessageComponent(message.type)));
|
return messages.filter(
|
||||||
|
(message) => Boolean(getSupportedMessageComponent(message.type)) || message.type === 'custom',
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
function collapseToolMessages(messages: ChatUI.AssistantMessage[]): ChatUI.AssistantMessage[] {
|
function collapseToolMessages(messages: ChatUI.AssistantMessage[]): ChatUI.AssistantMessage[] {
|
||||||
@@ -165,7 +168,7 @@ const sessionEnded = computed(() => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
const sendDisabled = computed(() => {
|
const sendDisabled = computed(() => {
|
||||||
return !textInputValue.value || props.streaming || sessionEnded.value;
|
return !textInputValue.value || props.streaming || sessionEnded.value || props.disabled;
|
||||||
});
|
});
|
||||||
|
|
||||||
const showPlaceholder = computed(() => {
|
const showPlaceholder = computed(() => {
|
||||||
@@ -226,6 +229,13 @@ watch(
|
|||||||
},
|
},
|
||||||
{ immediate: true, deep: true },
|
{ immediate: true, deep: true },
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Expose focusInput method to parent components
|
||||||
|
defineExpose({
|
||||||
|
focusInput: () => {
|
||||||
|
chatInput.value?.focus();
|
||||||
|
},
|
||||||
|
});
|
||||||
</script>
|
</script>
|
||||||
|
|
||||||
<template>
|
<template>
|
||||||
@@ -265,7 +275,11 @@ watch(
|
|||||||
@code-replace="() => emit('codeReplace', i)"
|
@code-replace="() => emit('codeReplace', i)"
|
||||||
@code-undo="() => emit('codeUndo', i)"
|
@code-undo="() => emit('codeUndo', i)"
|
||||||
@feedback="onRateMessage"
|
@feedback="onRateMessage"
|
||||||
/>
|
>
|
||||||
|
<template v-if="$slots['custom-message']" #custom-message="customMessageProps">
|
||||||
|
<slot name="custom-message" v-bind="customMessageProps" />
|
||||||
|
</template>
|
||||||
|
</MessageWrapper>
|
||||||
|
|
||||||
<div
|
<div
|
||||||
v-if="lastMessageQuickReplies.length && i === normalizedMessages.length - 1"
|
v-if="lastMessageQuickReplies.length && i === normalizedMessages.length - 1"
|
||||||
@@ -336,8 +350,8 @@ watch(
|
|||||||
ref="chatInput"
|
ref="chatInput"
|
||||||
v-model="textInputValue"
|
v-model="textInputValue"
|
||||||
class="ignore-key-press-node-creator ignore-key-press-canvas"
|
class="ignore-key-press-node-creator ignore-key-press-canvas"
|
||||||
:class="{ [$style.disabled]: sessionEnded || streaming }"
|
:class="{ [$style.disabled]: sessionEnded || streaming || disabled }"
|
||||||
:disabled="sessionEnded || streaming"
|
:disabled="sessionEnded || streaming || disabled"
|
||||||
:placeholder="placeholder ?? t('assistantChat.inputPlaceholder')"
|
:placeholder="placeholder ?? t('assistantChat.inputPlaceholder')"
|
||||||
rows="1"
|
rows="1"
|
||||||
wrap="hard"
|
wrap="hard"
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -31,16 +31,27 @@ const messageComponent = computed<Component | null>(() => {
|
|||||||
</script>
|
</script>
|
||||||
|
|
||||||
<template>
|
<template>
|
||||||
<component
|
<div>
|
||||||
:is="messageComponent"
|
<component
|
||||||
v-if="messageComponent"
|
:is="messageComponent"
|
||||||
:message="message"
|
v-if="messageComponent"
|
||||||
:is-first-of-role="isFirstOfRole"
|
:message="message"
|
||||||
:user="user"
|
:is-first-of-role="isFirstOfRole"
|
||||||
:streaming="streaming"
|
:user="user"
|
||||||
:is-last-message="isLastMessage"
|
:streaming="streaming"
|
||||||
@code-replace="emit('codeReplace')"
|
:is-last-message="isLastMessage"
|
||||||
@code-undo="emit('codeUndo')"
|
@code-replace="emit('codeReplace')"
|
||||||
@feedback="(feedback: RatingFeedback) => emit('feedback', feedback)"
|
@code-undo="emit('codeUndo')"
|
||||||
/>
|
@feedback="(feedback: RatingFeedback) => emit('feedback', feedback)"
|
||||||
|
/>
|
||||||
|
<slot
|
||||||
|
v-else-if="message.type === 'custom'"
|
||||||
|
name="custom-message"
|
||||||
|
:message="message"
|
||||||
|
:is-first-of-role="isFirstOfRole"
|
||||||
|
:user="user"
|
||||||
|
:streaming="streaming"
|
||||||
|
:is-last-message="isLastMessage"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
</template>
|
</template>
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ export function getSupportedMessageComponent(type: ChatUI.AssistantMessage['type
|
|||||||
return ToolMessage;
|
return ToolMessage;
|
||||||
case 'agent-suggestion':
|
case 'agent-suggestion':
|
||||||
case 'workflow-updated':
|
case 'workflow-updated':
|
||||||
|
case 'custom':
|
||||||
return null;
|
return null;
|
||||||
default:
|
default:
|
||||||
return null;
|
return null;
|
||||||
|
|||||||
@@ -87,6 +87,15 @@ export namespace ChatUI {
|
|||||||
}>;
|
}>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export interface CustomMessage {
|
||||||
|
id?: string;
|
||||||
|
role: 'assistant' | 'user';
|
||||||
|
type: 'custom';
|
||||||
|
message?: string;
|
||||||
|
customType: string;
|
||||||
|
data: unknown;
|
||||||
|
}
|
||||||
|
|
||||||
type MessagesWithReplies = (
|
type MessagesWithReplies = (
|
||||||
| TextMessage
|
| TextMessage
|
||||||
| CodeDiffMessage
|
| CodeDiffMessage
|
||||||
@@ -106,6 +115,7 @@ export namespace ChatUI {
|
|||||||
| AgentSuggestionMessage
|
| AgentSuggestionMessage
|
||||||
| WorkflowUpdatedMessage
|
| WorkflowUpdatedMessage
|
||||||
| ToolMessage
|
| ToolMessage
|
||||||
|
| CustomMessage
|
||||||
) & {
|
) & {
|
||||||
id?: string;
|
id?: string;
|
||||||
read?: boolean;
|
read?: boolean;
|
||||||
@@ -186,6 +196,12 @@ export function isToolMessage(
|
|||||||
return msg.type === 'tool';
|
return msg.type === 'tool';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function isCustomMessage(
|
||||||
|
msg: ChatUI.AssistantMessage,
|
||||||
|
): msg is ChatUI.CustomMessage & { id?: string; read?: boolean } {
|
||||||
|
return msg.type === 'custom';
|
||||||
|
}
|
||||||
|
|
||||||
// Helper to ensure message has required id and read properties
|
// Helper to ensure message has required id and read properties
|
||||||
export function hasRequiredProps<T extends ChatUI.AssistantMessage>(
|
export function hasRequiredProps<T extends ChatUI.AssistantMessage>(
|
||||||
msg: T,
|
msg: T,
|
||||||
|
|||||||
@@ -199,6 +199,10 @@
|
|||||||
"aiAssistant.builder.canvasPrompt.startManually.title": "Start manually",
|
"aiAssistant.builder.canvasPrompt.startManually.title": "Start manually",
|
||||||
"aiAssistant.builder.canvasPrompt.startManually.subTitle": "Add the first node",
|
"aiAssistant.builder.canvasPrompt.startManually.subTitle": "Add the first node",
|
||||||
"aiAssistant.builder.streamAbortedMessage": "[Task aborted]",
|
"aiAssistant.builder.streamAbortedMessage": "[Task aborted]",
|
||||||
|
"aiAssistant.builder.plan.intro": "Do you want to proceed with this plan?",
|
||||||
|
"aiAssistant.builder.plan.approve": "Approve Plan",
|
||||||
|
"aiAssistant.builder.plan.reject": "Request Changes",
|
||||||
|
"aiAssistant.builder.plan.whatToChange": "What would you like to change?",
|
||||||
"aiAssistant.assistant": "AI Assistant",
|
"aiAssistant.assistant": "AI Assistant",
|
||||||
"aiAssistant.newSessionModal.title.part1": "Start new",
|
"aiAssistant.newSessionModal.title.part1": "Start new",
|
||||||
"aiAssistant.newSessionModal.title.part2": "session",
|
"aiAssistant.newSessionModal.title.part2": "session",
|
||||||
|
|||||||
@@ -26,6 +26,7 @@ import { mockedStore } from '@/__tests__/utils';
|
|||||||
import { STORES } from '@n8n/stores';
|
import { STORES } from '@n8n/stores';
|
||||||
import { useWorkflowsStore } from '@/stores/workflows.store';
|
import { useWorkflowsStore } from '@/stores/workflows.store';
|
||||||
import type { INodeUi } from '@/Interface';
|
import type { INodeUi } from '@/Interface';
|
||||||
|
import { PLAN_APPROVAL_MESSAGE } from '@/constants';
|
||||||
|
|
||||||
vi.mock('@/event-bus', () => ({
|
vi.mock('@/event-bus', () => ({
|
||||||
nodeViewEventBus: {
|
nodeViewEventBus: {
|
||||||
@@ -976,4 +977,382 @@ describe('AskAssistantBuild', () => {
|
|||||||
expect(builderStore.initialGeneration).toBe(false);
|
expect(builderStore.initialGeneration).toBe(false);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe('NodesPlan message handling', () => {
|
||||||
|
it('should render plan messages with appropriate controls', async () => {
|
||||||
|
const nodesPlanMessage = {
|
||||||
|
id: faker.string.uuid(),
|
||||||
|
role: 'assistant' as const,
|
||||||
|
type: 'custom' as const,
|
||||||
|
customType: 'nodesPlan',
|
||||||
|
message: 'Here is my plan:',
|
||||||
|
data: [
|
||||||
|
{
|
||||||
|
nodeType: 'n8n-nodes-base.httpRequest',
|
||||||
|
nodeName: 'HTTP Request',
|
||||||
|
reasoning: 'To fetch data from an API',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
const regularMessage = {
|
||||||
|
id: faker.string.uuid(),
|
||||||
|
role: 'assistant' as const,
|
||||||
|
type: 'text' as const,
|
||||||
|
content: 'Regular message',
|
||||||
|
};
|
||||||
|
|
||||||
|
// First test with regular message - should not show plan controls
|
||||||
|
builderStore.$patch({
|
||||||
|
chatMessages: [regularMessage],
|
||||||
|
});
|
||||||
|
|
||||||
|
const { queryByText } = renderComponent();
|
||||||
|
await flushPromises();
|
||||||
|
|
||||||
|
// Regular message should not have plan controls
|
||||||
|
expect(queryByText('aiAssistant.builder.plan.approve')).not.toBeInTheDocument();
|
||||||
|
|
||||||
|
// Now test with plan message
|
||||||
|
builderStore.$patch({
|
||||||
|
chatMessages: [nodesPlanMessage],
|
||||||
|
});
|
||||||
|
await flushPromises();
|
||||||
|
|
||||||
|
// Plan message should have controls
|
||||||
|
expect(queryByText('aiAssistant.builder.plan.approve')).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle plan approval correctly', async () => {
|
||||||
|
// Setup: empty workflow so initialGeneration is true
|
||||||
|
workflowsStore.$patch({ workflow: { nodes: [], connections: {} } });
|
||||||
|
|
||||||
|
const nodesPlanMessage = {
|
||||||
|
id: faker.string.uuid(),
|
||||||
|
role: 'assistant' as const,
|
||||||
|
type: 'custom' as const,
|
||||||
|
customType: 'nodesPlan',
|
||||||
|
message: 'Here is my plan:',
|
||||||
|
data: [
|
||||||
|
{
|
||||||
|
nodeType: 'n8n-nodes-base.httpRequest',
|
||||||
|
nodeName: 'HTTP Request',
|
||||||
|
reasoning: 'To fetch data from an API',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
builderStore.$patch({
|
||||||
|
chatMessages: [nodesPlanMessage],
|
||||||
|
});
|
||||||
|
|
||||||
|
const { getByText } = renderComponent();
|
||||||
|
await flushPromises();
|
||||||
|
|
||||||
|
// Find and click the approve button
|
||||||
|
const approveButton = getByText('aiAssistant.builder.plan.approve');
|
||||||
|
await fireEvent.click(approveButton);
|
||||||
|
await flushPromises();
|
||||||
|
|
||||||
|
// Verify that sendChatMessage was called with approval message
|
||||||
|
expect(builderStore.sendChatMessage).toHaveBeenCalledWith({
|
||||||
|
text: PLAN_APPROVAL_MESSAGE,
|
||||||
|
initialGeneration: true,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle plan rejection correctly', async () => {
|
||||||
|
const nodesPlanMessage = {
|
||||||
|
id: faker.string.uuid(),
|
||||||
|
role: 'assistant' as const,
|
||||||
|
type: 'custom' as const,
|
||||||
|
customType: 'nodesPlan',
|
||||||
|
message: 'Here is my plan:',
|
||||||
|
data: [
|
||||||
|
{
|
||||||
|
nodeType: 'n8n-nodes-base.httpRequest',
|
||||||
|
nodeName: 'HTTP Request',
|
||||||
|
reasoning: 'To fetch data from an API',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
builderStore.$patch({
|
||||||
|
chatMessages: [nodesPlanMessage],
|
||||||
|
});
|
||||||
|
|
||||||
|
const { getByText, queryByText } = renderComponent();
|
||||||
|
await flushPromises();
|
||||||
|
|
||||||
|
// Find and click the reject button
|
||||||
|
const rejectButton = getByText('aiAssistant.builder.plan.reject');
|
||||||
|
await fireEvent.click(rejectButton);
|
||||||
|
await flushPromises();
|
||||||
|
|
||||||
|
// Verify plan controls are hidden after rejection
|
||||||
|
expect(queryByText('aiAssistant.builder.plan.approve')).not.toBeInTheDocument();
|
||||||
|
expect(queryByText('aiAssistant.builder.plan.reject')).not.toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should show plan controls only for the last plan message', async () => {
|
||||||
|
const planMessage1 = {
|
||||||
|
id: 'plan1',
|
||||||
|
role: 'assistant' as const,
|
||||||
|
type: 'custom' as const,
|
||||||
|
customType: 'nodesPlan',
|
||||||
|
message: 'First plan:',
|
||||||
|
data: [
|
||||||
|
{
|
||||||
|
nodeType: 'n8n-nodes-base.httpRequest',
|
||||||
|
nodeName: 'HTTP Request',
|
||||||
|
reasoning: 'First plan',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
const textMessage = {
|
||||||
|
id: 'text1',
|
||||||
|
role: 'assistant' as const,
|
||||||
|
type: 'text' as const,
|
||||||
|
content: 'Some text',
|
||||||
|
};
|
||||||
|
|
||||||
|
const planMessage2 = {
|
||||||
|
id: 'plan2',
|
||||||
|
role: 'assistant' as const,
|
||||||
|
type: 'custom' as const,
|
||||||
|
customType: 'nodesPlan',
|
||||||
|
message: 'Second plan:',
|
||||||
|
data: [
|
||||||
|
{
|
||||||
|
nodeType: 'n8n-nodes-base.emailSend',
|
||||||
|
nodeName: 'Send Email',
|
||||||
|
reasoning: 'Second plan',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
// First set just the first plan message
|
||||||
|
builderStore.$patch({
|
||||||
|
chatMessages: [planMessage1],
|
||||||
|
});
|
||||||
|
|
||||||
|
const { queryByText } = renderComponent();
|
||||||
|
await flushPromises();
|
||||||
|
|
||||||
|
// First plan should show controls when it's alone
|
||||||
|
expect(queryByText('aiAssistant.builder.plan.approve')).toBeInTheDocument();
|
||||||
|
|
||||||
|
// Now add more messages
|
||||||
|
builderStore.$patch({
|
||||||
|
chatMessages: [planMessage1, textMessage, planMessage2],
|
||||||
|
});
|
||||||
|
await flushPromises();
|
||||||
|
|
||||||
|
// Now only the last plan message should have visible controls
|
||||||
|
// We check this by seeing that approve button is still there (from plan2)
|
||||||
|
expect(queryByText('aiAssistant.builder.plan.approve')).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Plan status management in workflow messages watcher', () => {
|
||||||
|
it('should disable chat when plan is pending and enable after action', async () => {
|
||||||
|
// Add a nodes plan message as the last message
|
||||||
|
const nodesPlanMessage = {
|
||||||
|
id: faker.string.uuid(),
|
||||||
|
role: 'assistant' as const,
|
||||||
|
type: 'custom' as const,
|
||||||
|
customType: 'nodesPlan',
|
||||||
|
message: 'Plan message',
|
||||||
|
data: [
|
||||||
|
{
|
||||||
|
nodeType: 'n8n-nodes-base.httpRequest',
|
||||||
|
nodeName: 'HTTP Request',
|
||||||
|
reasoning: 'To fetch data',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
builderStore.$patch({
|
||||||
|
chatMessages: [nodesPlanMessage],
|
||||||
|
});
|
||||||
|
|
||||||
|
// Also need to mock workflowMessages getter
|
||||||
|
builderStore.workflowMessages = [];
|
||||||
|
|
||||||
|
const { getByText, queryByRole } = renderComponent();
|
||||||
|
await flushPromises();
|
||||||
|
|
||||||
|
// Trigger the watcher by updating workflowMessages
|
||||||
|
builderStore.workflowMessages = [
|
||||||
|
{
|
||||||
|
id: faker.string.uuid(),
|
||||||
|
role: 'assistant' as const,
|
||||||
|
type: 'workflow-updated' as const,
|
||||||
|
codeSnippet: '{}',
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
await flushPromises();
|
||||||
|
|
||||||
|
// Chat should be disabled while plan is pending
|
||||||
|
const chatInput = queryByRole('textbox');
|
||||||
|
expect(chatInput).toHaveAttribute('disabled');
|
||||||
|
|
||||||
|
// Approve the plan
|
||||||
|
const approveButton = getByText('aiAssistant.builder.plan.approve');
|
||||||
|
await fireEvent.click(approveButton);
|
||||||
|
await flushPromises();
|
||||||
|
|
||||||
|
// Chat should be enabled after approval
|
||||||
|
const chatInputAfter = queryByRole('textbox');
|
||||||
|
expect(chatInputAfter).not.toHaveAttribute('disabled');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not disable chat when last message is not a nodes plan', async () => {
|
||||||
|
// Add a regular text message as the last message
|
||||||
|
const textMessage = {
|
||||||
|
id: faker.string.uuid(),
|
||||||
|
role: 'assistant' as const,
|
||||||
|
type: 'text' as const,
|
||||||
|
content: 'Regular message',
|
||||||
|
};
|
||||||
|
|
||||||
|
builderStore.$patch({
|
||||||
|
chatMessages: [textMessage],
|
||||||
|
});
|
||||||
|
|
||||||
|
// Mock workflowMessages getter
|
||||||
|
builderStore.workflowMessages = [];
|
||||||
|
|
||||||
|
const { queryByRole } = renderComponent();
|
||||||
|
await flushPromises();
|
||||||
|
|
||||||
|
// Trigger the watcher
|
||||||
|
builderStore.workflowMessages = [
|
||||||
|
{
|
||||||
|
id: faker.string.uuid(),
|
||||||
|
role: 'assistant' as const,
|
||||||
|
type: 'workflow-updated' as const,
|
||||||
|
codeSnippet: '{}',
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
await flushPromises();
|
||||||
|
|
||||||
|
// Chat should remain enabled
|
||||||
|
const chatInput = queryByRole('textbox');
|
||||||
|
expect(chatInput).not.toHaveAttribute('disabled');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('Disabled state when plan is pending', () => {
|
||||||
|
it('should disable chat input when plan status is pending', async () => {
|
||||||
|
const nodesPlanMessage = {
|
||||||
|
id: faker.string.uuid(),
|
||||||
|
role: 'assistant' as const,
|
||||||
|
type: 'custom' as const,
|
||||||
|
customType: 'nodesPlan',
|
||||||
|
message: 'Plan message',
|
||||||
|
data: [
|
||||||
|
{
|
||||||
|
nodeType: 'n8n-nodes-base.httpRequest',
|
||||||
|
nodeName: 'HTTP Request',
|
||||||
|
reasoning: 'To fetch data',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
builderStore.$patch({
|
||||||
|
chatMessages: [nodesPlanMessage],
|
||||||
|
});
|
||||||
|
|
||||||
|
// Mock workflowMessages getter
|
||||||
|
builderStore.workflowMessages = [];
|
||||||
|
|
||||||
|
const { queryByRole } = renderComponent();
|
||||||
|
await flushPromises();
|
||||||
|
|
||||||
|
// Trigger workflow message update to set planStatus to pending
|
||||||
|
builderStore.workflowMessages = [
|
||||||
|
{
|
||||||
|
id: faker.string.uuid(),
|
||||||
|
role: 'assistant' as const,
|
||||||
|
type: 'workflow-updated' as const,
|
||||||
|
codeSnippet: '{}',
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
await flushPromises();
|
||||||
|
|
||||||
|
const chatInput = queryByRole('textbox');
|
||||||
|
expect(chatInput).toHaveAttribute('disabled');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should enable chat input after plan approval', async () => {
|
||||||
|
const nodesPlanMessage = {
|
||||||
|
id: faker.string.uuid(),
|
||||||
|
role: 'assistant' as const,
|
||||||
|
type: 'custom' as const,
|
||||||
|
customType: 'nodesPlan',
|
||||||
|
message: 'Plan message',
|
||||||
|
data: [
|
||||||
|
{
|
||||||
|
nodeType: 'n8n-nodes-base.httpRequest',
|
||||||
|
nodeName: 'HTTP Request',
|
||||||
|
reasoning: 'To fetch data',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
builderStore.$patch({
|
||||||
|
chatMessages: [nodesPlanMessage],
|
||||||
|
});
|
||||||
|
|
||||||
|
const { queryByRole, getByText } = renderComponent();
|
||||||
|
await flushPromises();
|
||||||
|
|
||||||
|
// Click approve button
|
||||||
|
const approveButton = getByText('aiAssistant.builder.plan.approve');
|
||||||
|
await fireEvent.click(approveButton);
|
||||||
|
await flushPromises();
|
||||||
|
|
||||||
|
// Chat should be enabled after approval
|
||||||
|
const chatInput = queryByRole('textbox');
|
||||||
|
expect(chatInput).not.toHaveAttribute('disabled');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should enable chat input after plan rejection', async () => {
|
||||||
|
const nodesPlanMessage = {
|
||||||
|
id: faker.string.uuid(),
|
||||||
|
role: 'assistant' as const,
|
||||||
|
type: 'custom' as const,
|
||||||
|
customType: 'nodesPlan',
|
||||||
|
message: 'Plan message',
|
||||||
|
data: [
|
||||||
|
{
|
||||||
|
nodeType: 'n8n-nodes-base.httpRequest',
|
||||||
|
nodeName: 'HTTP Request',
|
||||||
|
reasoning: 'To fetch data',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
builderStore.$patch({
|
||||||
|
chatMessages: [nodesPlanMessage],
|
||||||
|
});
|
||||||
|
|
||||||
|
const { queryByRole, getByText } = renderComponent();
|
||||||
|
await flushPromises();
|
||||||
|
|
||||||
|
// Click reject button
|
||||||
|
const rejectButton = getByText('aiAssistant.builder.plan.reject');
|
||||||
|
await fireEvent.click(rejectButton);
|
||||||
|
await flushPromises();
|
||||||
|
|
||||||
|
// Chat should be enabled after rejection
|
||||||
|
const chatInput = queryByRole('textbox');
|
||||||
|
expect(chatInput).not.toHaveAttribute('disabled');
|
||||||
|
});
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -1,16 +1,19 @@
|
|||||||
<script lang="ts" setup>
|
<script lang="ts" setup>
|
||||||
import { useBuilderStore } from '@/stores/builder.store';
|
import { useBuilderStore } from '@/stores/builder.store';
|
||||||
import { useUsersStore } from '@/stores/users.store';
|
import { useUsersStore } from '@/stores/users.store';
|
||||||
import { computed, watch, ref } from 'vue';
|
import { computed, watch, ref, nextTick } from 'vue';
|
||||||
import AskAssistantChat from '@n8n/design-system/components/AskAssistantChat/AskAssistantChat.vue';
|
import AskAssistantChat from '@n8n/design-system/components/AskAssistantChat/AskAssistantChat.vue';
|
||||||
import { useTelemetry } from '@/composables/useTelemetry';
|
import { useTelemetry } from '@/composables/useTelemetry';
|
||||||
import { useI18n } from '@n8n/i18n';
|
import { useI18n } from '@n8n/i18n';
|
||||||
import { useWorkflowsStore } from '@/stores/workflows.store';
|
import { useWorkflowsStore } from '@/stores/workflows.store';
|
||||||
import { useRoute, useRouter } from 'vue-router';
|
import { useRoute, useRouter } from 'vue-router';
|
||||||
import { useWorkflowSaving } from '@/composables/useWorkflowSaving';
|
import { useWorkflowSaving } from '@/composables/useWorkflowSaving';
|
||||||
import type { RatingFeedback } from '@n8n/design-system/types/assistant';
|
import type { ChatUI, RatingFeedback } from '@n8n/design-system/types/assistant';
|
||||||
import { isWorkflowUpdatedMessage } from '@n8n/design-system/types/assistant';
|
import { isWorkflowUpdatedMessage } from '@n8n/design-system/types/assistant';
|
||||||
import { nodeViewEventBus } from '@/event-bus';
|
import { nodeViewEventBus } from '@/event-bus';
|
||||||
|
import type { NodesPlanMessageType } from './NodesPlanMessage.vue';
|
||||||
|
import NodesPlanMessage from './NodesPlanMessage.vue';
|
||||||
|
import { PLAN_APPROVAL_MESSAGE } from '@/constants';
|
||||||
|
|
||||||
const emit = defineEmits<{
|
const emit = defineEmits<{
|
||||||
close: [];
|
close: [];
|
||||||
@@ -28,6 +31,8 @@ const workflowSaver = useWorkflowSaving({ router });
|
|||||||
// Track processed workflow updates
|
// Track processed workflow updates
|
||||||
const processedWorkflowUpdates = ref(new Set<string>());
|
const processedWorkflowUpdates = ref(new Set<string>());
|
||||||
const trackedTools = ref(new Set<string>());
|
const trackedTools = ref(new Set<string>());
|
||||||
|
const planStatus = ref<'pending' | 'approved' | 'rejected'>();
|
||||||
|
const assistantChatRef = ref<InstanceType<typeof AskAssistantChat> | null>(null);
|
||||||
|
|
||||||
const user = computed(() => ({
|
const user = computed(() => ({
|
||||||
firstName: usersStore.currentUser?.firstName ?? '',
|
firstName: usersStore.currentUser?.firstName ?? '',
|
||||||
@@ -51,6 +56,63 @@ async function onUserMessage(content: string) {
|
|||||||
builderStore.sendChatMessage({ text: content, initialGeneration: isInitialGeneration });
|
builderStore.sendChatMessage({ text: content, initialGeneration: isInitialGeneration });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function onNewWorkflow() {
|
||||||
|
builderStore.resetBuilderChat();
|
||||||
|
processedWorkflowUpdates.value.clear();
|
||||||
|
trackedTools.value.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
function onFeedback(feedback: RatingFeedback) {
|
||||||
|
if (feedback.rating) {
|
||||||
|
telemetry.track('User rated workflow generation', {
|
||||||
|
helpful: feedback.rating === 'up',
|
||||||
|
workflow_id: workflowsStore.workflowId,
|
||||||
|
session_id: builderStore.trackingSessionId,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
if (feedback.feedback) {
|
||||||
|
telemetry.track('User submitted workflow generation feedback', {
|
||||||
|
feedback: feedback.feedback,
|
||||||
|
workflow_id: workflowsStore.workflowId,
|
||||||
|
session_id: builderStore.trackingSessionId,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function isNodesPlanMessage(message: ChatUI.AssistantMessage): message is NodesPlanMessageType {
|
||||||
|
return (
|
||||||
|
message.type === 'custom' && message.customType === 'nodesPlan' && Array.isArray(message.data)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function onApprovePlan() {
|
||||||
|
planStatus.value = 'approved';
|
||||||
|
|
||||||
|
telemetry.track('User clicked Approve plan', {
|
||||||
|
session_id: builderStore.trackingSessionId,
|
||||||
|
});
|
||||||
|
|
||||||
|
void onUserMessage(PLAN_APPROVAL_MESSAGE);
|
||||||
|
}
|
||||||
|
|
||||||
|
function onRequestChanges() {
|
||||||
|
planStatus.value = 'rejected';
|
||||||
|
|
||||||
|
telemetry.track('User clicked Request changes', {
|
||||||
|
session_id: builderStore.trackingSessionId,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Focus the input after rejecting the plan
|
||||||
|
void nextTick(() => {
|
||||||
|
assistantChatRef.value?.focusInput();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function shouldShowPlanControls(message: NodesPlanMessageType) {
|
||||||
|
const planMessageIndex = builderStore.chatMessages.findIndex((msg) => msg.id === message.id);
|
||||||
|
return planMessageIndex === builderStore.chatMessages.length - 1;
|
||||||
|
}
|
||||||
|
|
||||||
// Watch for workflow updates and apply them
|
// Watch for workflow updates and apply them
|
||||||
watch(
|
watch(
|
||||||
() => builderStore.workflowMessages,
|
() => builderStore.workflowMessages,
|
||||||
@@ -95,6 +157,12 @@ watch(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Check if last message is a plan message and if so, whether to show controls
|
||||||
|
const lastMessage = builderStore.chatMessages[builderStore.chatMessages.length - 1];
|
||||||
|
if (lastMessage && isNodesPlanMessage(lastMessage)) {
|
||||||
|
planStatus.value = 'pending';
|
||||||
|
}
|
||||||
},
|
},
|
||||||
{ deep: true },
|
{ deep: true },
|
||||||
);
|
);
|
||||||
@@ -126,29 +194,6 @@ watch(
|
|||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
function onNewWorkflow() {
|
|
||||||
builderStore.resetBuilderChat();
|
|
||||||
processedWorkflowUpdates.value.clear();
|
|
||||||
trackedTools.value.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
function onFeedback(feedback: RatingFeedback) {
|
|
||||||
if (feedback.rating) {
|
|
||||||
telemetry.track('User rated workflow generation', {
|
|
||||||
helpful: feedback.rating === 'up',
|
|
||||||
workflow_id: workflowsStore.workflowId,
|
|
||||||
session_id: builderStore.trackingSessionId,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
if (feedback.feedback) {
|
|
||||||
telemetry.track('User submitted workflow generation feedback', {
|
|
||||||
feedback: feedback.feedback,
|
|
||||||
workflow_id: workflowsStore.workflowId,
|
|
||||||
session_id: builderStore.trackingSessionId,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset on route change
|
// Reset on route change
|
||||||
watch(currentRoute, () => {
|
watch(currentRoute, () => {
|
||||||
onNewWorkflow();
|
onNewWorkflow();
|
||||||
@@ -158,9 +203,11 @@ watch(currentRoute, () => {
|
|||||||
<template>
|
<template>
|
||||||
<div data-test-id="ask-assistant-chat" tabindex="0" :class="$style.container" @keydown.stop>
|
<div data-test-id="ask-assistant-chat" tabindex="0" :class="$style.container" @keydown.stop>
|
||||||
<AskAssistantChat
|
<AskAssistantChat
|
||||||
|
ref="assistantChatRef"
|
||||||
:user="user"
|
:user="user"
|
||||||
:messages="builderStore.chatMessages"
|
:messages="builderStore.chatMessages"
|
||||||
:streaming="builderStore.streaming"
|
:streaming="builderStore.streaming"
|
||||||
|
:disabled="planStatus === 'pending'"
|
||||||
:loading-message="loadingMessage"
|
:loading-message="loadingMessage"
|
||||||
:mode="i18n.baseText('aiAssistant.builder.mode')"
|
:mode="i18n.baseText('aiAssistant.builder.mode')"
|
||||||
:title="'n8n AI'"
|
:title="'n8n AI'"
|
||||||
@@ -176,6 +223,16 @@ watch(currentRoute, () => {
|
|||||||
<template #header>
|
<template #header>
|
||||||
<slot name="header" />
|
<slot name="header" />
|
||||||
</template>
|
</template>
|
||||||
|
<template #custom-message="{ message, ...props }">
|
||||||
|
<NodesPlanMessage
|
||||||
|
v-if="message.customType === 'nodesPlan' && isNodesPlanMessage(message)"
|
||||||
|
:message="message"
|
||||||
|
:show-controls="shouldShowPlanControls(message)"
|
||||||
|
v-bind="props"
|
||||||
|
@approve-plan="onApprovePlan"
|
||||||
|
@request-changes="onRequestChanges"
|
||||||
|
/>
|
||||||
|
</template>
|
||||||
<template #placeholder>
|
<template #placeholder>
|
||||||
<n8n-text :class="$style.topText">{{
|
<n8n-text :class="$style.topText">{{
|
||||||
i18n.baseText('aiAssistant.builder.placeholder')
|
i18n.baseText('aiAssistant.builder.placeholder')
|
||||||
|
|||||||
@@ -0,0 +1,134 @@
|
|||||||
|
<!-- eslint-disable import-x/extensions -->
|
||||||
|
<script setup lang="ts">
|
||||||
|
import NodeIcon from '@/components/NodeIcon.vue';
|
||||||
|
import { useNodeTypesStore } from '@/stores/nodeTypes.store';
|
||||||
|
import type { ChatUI, RatingFeedback } from '@n8n/design-system';
|
||||||
|
import BaseMessage from '@n8n/design-system/components/AskAssistantChat/messages/BaseMessage.vue';
|
||||||
|
import { useI18n } from '@n8n/i18n';
|
||||||
|
import { ref } from 'vue';
|
||||||
|
|
||||||
|
interface NodesPlan {
|
||||||
|
nodeType: string;
|
||||||
|
nodeName: string;
|
||||||
|
reasoning: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export type NodesPlanMessageType = ChatUI.CustomMessage & {
|
||||||
|
data: NodesPlan[];
|
||||||
|
};
|
||||||
|
|
||||||
|
interface Props {
|
||||||
|
message: NodesPlanMessageType;
|
||||||
|
showControls?: boolean;
|
||||||
|
isFirstOfRole: boolean;
|
||||||
|
user?: {
|
||||||
|
firstName: string;
|
||||||
|
lastName: string;
|
||||||
|
};
|
||||||
|
streaming?: boolean;
|
||||||
|
isLastMessage?: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
const props = defineProps<Props>();
|
||||||
|
|
||||||
|
const nodeTypesStore = useNodeTypesStore();
|
||||||
|
const i18n = useI18n();
|
||||||
|
const showControlsLocal = ref(props.showControls ?? true);
|
||||||
|
const changesRequested = ref(false);
|
||||||
|
|
||||||
|
const emit = defineEmits<{
|
||||||
|
approvePlan: [];
|
||||||
|
requestChanges: [];
|
||||||
|
feedback: [RatingFeedback];
|
||||||
|
}>();
|
||||||
|
|
||||||
|
function onApprovePlan() {
|
||||||
|
showControlsLocal.value = false;
|
||||||
|
emit('approvePlan');
|
||||||
|
}
|
||||||
|
|
||||||
|
function onRequestChanges() {
|
||||||
|
showControlsLocal.value = false;
|
||||||
|
changesRequested.value = true;
|
||||||
|
emit('requestChanges');
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
|
||||||
|
<template>
|
||||||
|
<BaseMessage
|
||||||
|
:class="$style.message"
|
||||||
|
:message="message"
|
||||||
|
:is-first-of-role="true"
|
||||||
|
:user="user"
|
||||||
|
@feedback="(feedback) => emit('feedback', feedback)"
|
||||||
|
>
|
||||||
|
{{ message.message }}
|
||||||
|
<ol :class="$style.nodes">
|
||||||
|
<li v-for="(node, index) in message.data" :key="index" :class="$style.node">
|
||||||
|
<n8n-tooltip placement="left" :show-after="300">
|
||||||
|
<template #content>
|
||||||
|
{{ node.reasoning }}
|
||||||
|
</template>
|
||||||
|
<div :class="$style.node">
|
||||||
|
<NodeIcon
|
||||||
|
:class="$style.nodeIcon"
|
||||||
|
:node-type="nodeTypesStore.getNodeType(node.nodeType)"
|
||||||
|
:node-name="node.nodeName"
|
||||||
|
:show-tooltip="false"
|
||||||
|
:size="12"
|
||||||
|
/>
|
||||||
|
<span>{{ node.nodeName }}</span>
|
||||||
|
</div>
|
||||||
|
</n8n-tooltip>
|
||||||
|
</li>
|
||||||
|
</ol>
|
||||||
|
<template v-if="showControls && showControlsLocal && !streaming">
|
||||||
|
{{ i18n.baseText('aiAssistant.builder.plan.intro') }}
|
||||||
|
<div :class="$style.controls">
|
||||||
|
<n8n-button type="primary" @click="onApprovePlan">{{
|
||||||
|
i18n.baseText('aiAssistant.builder.plan.approve')
|
||||||
|
}}</n8n-button>
|
||||||
|
<n8n-button type="secondary" @click="onRequestChanges">{{
|
||||||
|
i18n.baseText('aiAssistant.builder.plan.reject')
|
||||||
|
}}</n8n-button>
|
||||||
|
</div>
|
||||||
|
</template>
|
||||||
|
<template v-if="changesRequested">
|
||||||
|
<span class="mb-m">{{ i18n.baseText('aiAssistant.builder.plan.whatToChange') }}</span>
|
||||||
|
</template>
|
||||||
|
</BaseMessage>
|
||||||
|
</template>
|
||||||
|
|
||||||
|
<style lang="scss" module>
|
||||||
|
.message {
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
}
|
||||||
|
.nodes {
|
||||||
|
list-style: none;
|
||||||
|
padding: var(--spacing-2xs);
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
gap: var(--spacing-2xs);
|
||||||
|
}
|
||||||
|
.nodeIcon {
|
||||||
|
padding: var(--spacing-3xs);
|
||||||
|
border-radius: var(--border-radius-base);
|
||||||
|
border: 1px solid var(--color-foreground-base);
|
||||||
|
display: inline-flex;
|
||||||
|
}
|
||||||
|
.node {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
gap: var(--spacing-3xs);
|
||||||
|
font-size: var(--font-size-2xs);
|
||||||
|
}
|
||||||
|
.controls {
|
||||||
|
display: flex;
|
||||||
|
gap: var(--spacing-2xs);
|
||||||
|
margin-top: var(--spacing-xs);
|
||||||
|
}
|
||||||
|
.followUpMessage {
|
||||||
|
margin-top: var(--spacing-m);
|
||||||
|
}
|
||||||
|
</style>
|
||||||
@@ -1,7 +1,12 @@
|
|||||||
import type { ChatUI } from '@n8n/design-system/types/assistant';
|
import type { ChatUI } from '@n8n/design-system/types/assistant';
|
||||||
import type { ChatRequest } from '@/types/assistant.types';
|
import type { ChatRequest } from '@/types/assistant.types';
|
||||||
import { useI18n } from '@n8n/i18n';
|
import { useI18n } from '@n8n/i18n';
|
||||||
import { isTextMessage, isWorkflowUpdatedMessage, isToolMessage } from '@/types/assistant.types';
|
import {
|
||||||
|
isTextMessage,
|
||||||
|
isWorkflowUpdatedMessage,
|
||||||
|
isToolMessage,
|
||||||
|
isPlanMessage,
|
||||||
|
} from '@/types/assistant.types';
|
||||||
|
|
||||||
export interface MessageProcessingResult {
|
export interface MessageProcessingResult {
|
||||||
messages: ChatUI.AssistantMessage[];
|
messages: ChatUI.AssistantMessage[];
|
||||||
@@ -119,6 +124,18 @@ export function useBuilderMessages() {
|
|||||||
// Don't clear thinking for workflow updates - they're just state changes
|
// Don't clear thinking for workflow updates - they're just state changes
|
||||||
} else if (isToolMessage(msg)) {
|
} else if (isToolMessage(msg)) {
|
||||||
processToolMessage(messages, msg, messageId);
|
processToolMessage(messages, msg, messageId);
|
||||||
|
} else if (isPlanMessage(msg)) {
|
||||||
|
// Add new plan message
|
||||||
|
messages.push({
|
||||||
|
id: messageId,
|
||||||
|
role: 'assistant',
|
||||||
|
type: 'custom',
|
||||||
|
customType: 'nodesPlan',
|
||||||
|
message: msg.message,
|
||||||
|
data: msg.plan,
|
||||||
|
} satisfies ChatUI.CustomMessage);
|
||||||
|
// Plan messages are informational, clear thinking
|
||||||
|
shouldClearThinking = true;
|
||||||
} else if ('type' in msg && msg.type === 'error' && 'content' in msg) {
|
} else if ('type' in msg && msg.type === 'error' && 'content' in msg) {
|
||||||
// Handle error messages from the API
|
// Handle error messages from the API
|
||||||
// API sends error messages with type: 'error' and content field
|
// API sends error messages with type: 'error' and content field
|
||||||
@@ -145,7 +162,7 @@ export function useBuilderMessages() {
|
|||||||
messageId: string,
|
messageId: string,
|
||||||
): void {
|
): void {
|
||||||
// Use toolCallId as the message ID for consistency across updates
|
// Use toolCallId as the message ID for consistency across updates
|
||||||
const toolMessageId = msg.toolCallId || messageId;
|
const toolMessageId = msg.toolCallId ?? messageId;
|
||||||
|
|
||||||
// Check if we already have this tool message
|
// Check if we already have this tool message
|
||||||
const existingIndex = msg.toolCallId
|
const existingIndex = msg.toolCallId
|
||||||
@@ -214,14 +231,15 @@ export function useBuilderMessages() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if there's any text message after the last completed tool
|
// Check if there's any text or custom message after the last completed tool
|
||||||
// Note: workflow-updated messages shouldn't count as they're just canvas state updates
|
// Note: workflow-updated messages shouldn't count as they're just canvas state updates
|
||||||
let hasTextAfterTools = false;
|
// Custom messages (like plan messages) should count as responses
|
||||||
|
let hasResponseAfterTools = false;
|
||||||
if (lastCompletedToolIndex !== -1) {
|
if (lastCompletedToolIndex !== -1) {
|
||||||
for (let i = lastCompletedToolIndex + 1; i < messages.length; i++) {
|
for (let i = lastCompletedToolIndex + 1; i < messages.length; i++) {
|
||||||
const msg = messages[i];
|
const msg = messages[i];
|
||||||
if (msg.type === 'text') {
|
if (msg.type === 'text' || msg.type === 'custom') {
|
||||||
hasTextAfterTools = true;
|
hasResponseAfterTools = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -229,7 +247,7 @@ export function useBuilderMessages() {
|
|||||||
|
|
||||||
return {
|
return {
|
||||||
hasAnyRunningTools: false,
|
hasAnyRunningTools: false,
|
||||||
isStillThinking: hasCompletedTools && !hasTextAfterTools,
|
isStillThinking: hasCompletedTools && !hasResponseAfterTools,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -372,6 +390,17 @@ export function useBuilderMessages() {
|
|||||||
} satisfies ChatUI.AssistantMessage;
|
} satisfies ChatUI.AssistantMessage;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (isPlanMessage(message)) {
|
||||||
|
return {
|
||||||
|
id,
|
||||||
|
role: 'assistant',
|
||||||
|
type: 'custom',
|
||||||
|
customType: 'nodesPlan',
|
||||||
|
data: message.plan,
|
||||||
|
message: message.message,
|
||||||
|
} satisfies ChatUI.CustomMessage;
|
||||||
|
}
|
||||||
|
|
||||||
// Handle event messages
|
// Handle event messages
|
||||||
if ('type' in message && message.type === 'event') {
|
if ('type' in message && message.type === 'event') {
|
||||||
return {
|
return {
|
||||||
|
|||||||
@@ -886,6 +886,7 @@ export const ASK_AI_MAX_PROMPT_LENGTH = 600;
|
|||||||
export const ASK_AI_MIN_PROMPT_LENGTH = 15;
|
export const ASK_AI_MIN_PROMPT_LENGTH = 15;
|
||||||
export const ASK_AI_LOADING_DURATION_MS = 12000;
|
export const ASK_AI_LOADING_DURATION_MS = 12000;
|
||||||
export const ASK_AI_SLIDE_OUT_DURATION_MS = 200;
|
export const ASK_AI_SLIDE_OUT_DURATION_MS = 200;
|
||||||
|
export const PLAN_APPROVAL_MESSAGE = 'Proceed with the plan';
|
||||||
|
|
||||||
export const APPEND_ATTRIBUTION_DEFAULT_PATH = 'parameters.options.appendAttribution';
|
export const APPEND_ATTRIBUTION_DEFAULT_PATH = 'parameters.options.appendAttribution';
|
||||||
|
|
||||||
|
|||||||
@@ -94,6 +94,10 @@ export const useBuilderStore = defineStore(STORES.BUILDER, () => {
|
|||||||
|
|
||||||
const workflowMessages = computed(() => chatMessages.value.filter(isWorkflowUpdatedMessage));
|
const workflowMessages = computed(() => chatMessages.value.filter(isWorkflowUpdatedMessage));
|
||||||
|
|
||||||
|
const assistantMessages = computed(() =>
|
||||||
|
chatMessages.value.filter((msg) => msg.role === 'assistant'),
|
||||||
|
);
|
||||||
|
|
||||||
// Chat management functions
|
// Chat management functions
|
||||||
/**
|
/**
|
||||||
* Resets the entire chat session to initial state.
|
* Resets the entire chat session to initial state.
|
||||||
@@ -439,6 +443,7 @@ export const useBuilderStore = defineStore(STORES.BUILDER, () => {
|
|||||||
workflowPrompt,
|
workflowPrompt,
|
||||||
toolMessages,
|
toolMessages,
|
||||||
workflowMessages,
|
workflowMessages,
|
||||||
|
assistantMessages,
|
||||||
trackingSessionId,
|
trackingSessionId,
|
||||||
streamingAbortController,
|
streamingAbortController,
|
||||||
initialGeneration,
|
initialGeneration,
|
||||||
|
|||||||
@@ -166,6 +166,16 @@ export namespace ChatRequest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// API-only types
|
// API-only types
|
||||||
|
export interface PlanMessage {
|
||||||
|
role: 'assistant';
|
||||||
|
type: 'plan';
|
||||||
|
plan: Array<{
|
||||||
|
nodeType: string;
|
||||||
|
nodeName: string;
|
||||||
|
reasoning: string;
|
||||||
|
}>;
|
||||||
|
message?: string; // For plan-review messages
|
||||||
|
}
|
||||||
|
|
||||||
export type MessageResponse =
|
export type MessageResponse =
|
||||||
| ((
|
| ((
|
||||||
@@ -177,6 +187,7 @@ export namespace ChatRequest {
|
|||||||
| ChatUI.WorkflowUpdatedMessage
|
| ChatUI.WorkflowUpdatedMessage
|
||||||
| ToolMessage
|
| ToolMessage
|
||||||
| ChatUI.ErrorMessage
|
| ChatUI.ErrorMessage
|
||||||
|
| PlanMessage
|
||||||
) & {
|
) & {
|
||||||
quickReplies?: ChatUI.QuickReply[];
|
quickReplies?: ChatUI.QuickReply[];
|
||||||
})
|
})
|
||||||
@@ -266,3 +277,7 @@ export function isEndSessionMessage(
|
|||||||
): msg is ChatUI.EndSessionMessage {
|
): msg is ChatUI.EndSessionMessage {
|
||||||
return 'type' in msg && msg.type === 'event' && msg.eventName === 'end-session';
|
return 'type' in msg && msg.type === 'event' && msg.eventName === 'end-session';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function isPlanMessage(msg: ChatRequest.MessageResponse): msg is ChatRequest.PlanMessage {
|
||||||
|
return 'type' in msg && msg.type === 'plan' && 'plan' in msg && Array.isArray(msg.plan);
|
||||||
|
}
|
||||||
|
|||||||
@@ -1882,7 +1882,9 @@ watch(
|
|||||||
};
|
};
|
||||||
|
|
||||||
fallbackNodes.value =
|
fallbackNodes.value =
|
||||||
builderStore.isAIBuilderEnabled && builderStore.isAssistantEnabled
|
builderStore.isAIBuilderEnabled &&
|
||||||
|
builderStore.isAssistantEnabled &&
|
||||||
|
builderStore.assistantMessages.length === 0
|
||||||
? [aiPromptItem]
|
? [aiPromptItem]
|
||||||
: [addNodesItem];
|
: [addNodesItem];
|
||||||
},
|
},
|
||||||
|
|||||||
Reference in New Issue
Block a user