mirror of
https://github.com/Abdulazizzn/n8n-enterprise-unlocked.git
synced 2025-12-16 09:36:44 +00:00
chore: Remove AI WF Builder planner step (no-changelog) (#19343)
This commit is contained in:
@@ -1,6 +1,5 @@
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
|
||||
import { PLAN_APPROVAL_MESSAGE } from '../../src/constants';
|
||||
import type { SimpleWorkflow } from '../../src/types/workflow';
|
||||
import type { WorkflowBuilderAgent } from '../../src/workflow-builder-agent';
|
||||
import { evaluateWorkflow } from '../chains/workflow-evaluator';
|
||||
@@ -52,10 +51,7 @@ export async function runSingleTest(
|
||||
try {
|
||||
// Generate workflow
|
||||
const startTime = Date.now();
|
||||
// First generate plan
|
||||
await consumeGenerator(agent.chat(getChatPayload(testCase.prompt, testCase.id), userId));
|
||||
// Confirm plan
|
||||
await consumeGenerator(agent.chat(getChatPayload(PLAN_APPROVAL_MESSAGE, testCase.id), userId));
|
||||
const generationTime = Date.now() - startTime;
|
||||
|
||||
// Get generated workflow with validation
|
||||
|
||||
@@ -5,7 +5,6 @@ import type { INodeTypeDescription } from 'n8n-workflow';
|
||||
import pc from 'picocolors';
|
||||
|
||||
import { createLangsmithEvaluator } from './evaluator';
|
||||
import { PLAN_APPROVAL_MESSAGE } from '../../src/constants';
|
||||
import type { WorkflowState } from '../../src/workflow-state';
|
||||
import { setupTestEnvironment, createAgent } from '../core/environment';
|
||||
import {
|
||||
@@ -43,15 +42,9 @@ function createWorkflowGenerator(
|
||||
|
||||
// Create agent for this run
|
||||
const agent = createAgent(parsedNodeTypes, llm, tracer);
|
||||
|
||||
// First generate plan
|
||||
await consumeGenerator(
|
||||
agent.chat(getChatPayload(messageContent, runId), 'langsmith-eval-user'),
|
||||
);
|
||||
// Confirm plan
|
||||
await consumeGenerator(
|
||||
agent.chat(getChatPayload(PLAN_APPROVAL_MESSAGE, runId), 'langsmith-eval-user'),
|
||||
);
|
||||
|
||||
// Get generated workflow with validation
|
||||
const state = await agent.getState(runId, 'langsmith-eval-user');
|
||||
|
||||
@@ -1,333 +0,0 @@
|
||||
/* eslint-disable @typescript-eslint/no-unsafe-member-access */
|
||||
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import type { BaseMessage, ToolMessage } from '@langchain/core/messages';
|
||||
import { HumanMessage, SystemMessage } from '@langchain/core/messages';
|
||||
import { DynamicStructuredTool } from '@langchain/core/tools';
|
||||
import { StateGraph, MessagesAnnotation, END, START } from '@langchain/langgraph';
|
||||
import { ToolNode } from '@langchain/langgraph/prebuilt';
|
||||
import { jsonParse, type INodeTypeDescription } from 'n8n-workflow';
|
||||
import { z } from 'zod';
|
||||
|
||||
import { isAIMessage } from '@/types/langchain';
|
||||
|
||||
import { LLMServiceError, ToolExecutionError } from '../errors';
|
||||
import { createNodeDetailsTool } from '../tools/node-details.tool';
|
||||
import { createNodeSearchTool } from '../tools/node-search.tool';
|
||||
|
||||
const planNodeSchema = z.object({
|
||||
nodeType: z
|
||||
.string()
|
||||
.describe('The exact n8n node type identifier (e.g., "n8n-nodes-base.httpRequest")'),
|
||||
nodeName: z
|
||||
.string()
|
||||
.describe('A descriptive name for this node instance (e.g., "Get Weather Data")'),
|
||||
reasoning: z
|
||||
.string()
|
||||
.describe('Brief explanation of why this node is needed and what it will do'),
|
||||
});
|
||||
|
||||
const workflowPlanSchema = z.object({
|
||||
intro: z.string().describe('A concise summary of the workflow plan'),
|
||||
plan: z
|
||||
.array(planNodeSchema)
|
||||
.min(1)
|
||||
.describe('Ordered list of nodes that will be used to build the workflow'),
|
||||
});
|
||||
|
||||
const generateWorkflowPlanTool = new DynamicStructuredTool({
|
||||
name: 'generate_workflow_plan',
|
||||
description:
|
||||
'Create a structured plan of n8n nodes based on user requirements and available node information',
|
||||
schema: workflowPlanSchema,
|
||||
func: async (input) => {
|
||||
return input;
|
||||
},
|
||||
});
|
||||
|
||||
export type WorkflowPlanNode = z.infer<typeof planNodeSchema>;
|
||||
export type WorkflowPlan = z.infer<typeof workflowPlanSchema>;
|
||||
|
||||
const SYSTEM_PROMPT = `You are a Workflow Planning Assistant for n8n. Your task is to analyze user requests and create a detailed plan of n8n nodes that will be used to build the workflow.
|
||||
|
||||
## Your Process
|
||||
1. Analyze the user's request to understand what they want to automate
|
||||
2. Use the search_nodes tool to find relevant n8n nodes for each part of the workflow
|
||||
3. Use the get_node_details tool to understand specific nodes' capabilities
|
||||
4. Use the generate_workflow_plan tool to create the final structured plan
|
||||
|
||||
## Guidelines
|
||||
- Be thorough in your search - search for different keywords and concepts
|
||||
- Use exact node type identifiers (e.g., "n8n-nodes-base.httpRequest")
|
||||
- Order nodes logically from trigger/start to final output
|
||||
- Place sub-nodes (e.g., AI tools) immediately after their root node (e.g. AI Agent)
|
||||
- Only include nodes that directly fulfill the user's requirements
|
||||
- Consider data transformation needs between nodes
|
||||
- For AI workflows, search for AI-related nodes and sub-nodes
|
||||
- ALWAYS start with a trigger node and workflow configuration node (see Workflow Structure Requirements)
|
||||
|
||||
## Workflow Structure Requirements
|
||||
CRITICAL: Every workflow MUST follow this structure:
|
||||
|
||||
1. **First Node - Trigger (MANDATORY)**
|
||||
- Every workflow MUST start with a trigger node
|
||||
- Choose the appropriate trigger based on user intent (see Trigger Selection Logic)
|
||||
- If no trigger is specified, intelligently select the most appropriate one
|
||||
|
||||
2. **Second Node - Workflow Configuration (MANDATORY)**
|
||||
- ALWAYS add an Edit Fields (Set) node immediately after the trigger
|
||||
- Name it "Workflow Configuration"
|
||||
- This node serves as the main configuration point for the workflow
|
||||
- Downstream nodes will reference values from this node via expressions
|
||||
- This centralizes key workflow parameters and makes configuration easier
|
||||
|
||||
## Trigger Selection Logic
|
||||
Choose the trigger based on the workflow's purpose:
|
||||
|
||||
### Manual Trigger (n8n-nodes-base.manualTrigger)
|
||||
Use when:
|
||||
- User wants to test or debug the workflow
|
||||
- It's a one-time or ad-hoc process
|
||||
- User hasn't specified any trigger requirements
|
||||
- The workflow is for data processing or transformation tasks
|
||||
|
||||
### Chat Trigger (n8n-nodes-langchain.chatTrigger)
|
||||
Use when:
|
||||
- Building conversational AI or chatbot workflows
|
||||
- User mentions chat, conversation, or interactive communication
|
||||
- The workflow needs to respond to user messages
|
||||
- Building AI agents that interact with users
|
||||
|
||||
### Webhook Trigger (n8n-nodes-base.webhook)
|
||||
Use when:
|
||||
- Integrating with external systems or APIs
|
||||
- User mentions webhooks, API calls, or external events
|
||||
- The workflow needs to be triggered by external applications
|
||||
- Building automated responses to system events
|
||||
|
||||
## Search Strategy
|
||||
- Search for nodes by functionality (e.g., "email", "database", "api")
|
||||
- Search for specific service names mentioned by the user
|
||||
- For AI workflows, search for sub-nodes using connection types:
|
||||
- NodeConnectionTypes.AiLanguageModel for LLM providers
|
||||
- NodeConnectionTypes.AiTool for AI tools
|
||||
- NodeConnectionTypes.AiMemory for memory nodes
|
||||
- NodeConnectionTypes.AiEmbedding for embeddings
|
||||
- NodeConnectionTypes.AiVectorStore for vector stores
|
||||
|
||||
## Connection Parameter Rules
|
||||
When planning nodes, consider their connection requirements:
|
||||
|
||||
### Static vs Dynamic Nodes
|
||||
- **Static nodes** (standard inputs/outputs): HTTP Request, Set, Code
|
||||
- **Dynamic nodes** (parameter-dependent connections): AI nodes, Vector Stores, Document Loaders
|
||||
|
||||
### Dynamic Node Parameters That Affect Connections
|
||||
- AI Agent: hasOutputParser creates additional input for schema
|
||||
- Vector Store: mode parameter affects available connections (insert vs retrieve-as-tool)
|
||||
- Document Loader: textSplittingMode and dataType affect input structure
|
||||
|
||||
## AI Node Connection Patterns
|
||||
CRITICAL: AI sub-nodes PROVIDE capabilities, making them the SOURCE in connections:
|
||||
|
||||
### Main AI Connections
|
||||
- OpenAI Chat Model → AI Agent [ai_languageModel]
|
||||
- Calculator Tool → AI Agent [ai_tool]
|
||||
- Window Buffer Memory → AI Agent [ai_memory]
|
||||
- Token Splitter → Default Data Loader [ai_textSplitter]
|
||||
- Default Data Loader → Vector Store [ai_document]
|
||||
- Embeddings OpenAI → Vector Store [ai_embedding]
|
||||
|
||||
Why: Sub-nodes enhance main nodes with their capabilities
|
||||
|
||||
## RAG Workflow Pattern
|
||||
CRITICAL: For RAG (Retrieval-Augmented Generation) workflows, follow this specific pattern:
|
||||
|
||||
Main data flow:
|
||||
- Data source (e.g., HTTP Request) → Vector Store [main connection]
|
||||
- The Vector Store receives the actual data through its main input
|
||||
|
||||
AI capability connections:
|
||||
- Document Loader → Vector Store [ai_document] - provides document processing
|
||||
- Embeddings → Vector Store [ai_embedding] - provides embedding generation
|
||||
- Text Splitter → Document Loader [ai_textSplitter] - provides text chunking
|
||||
|
||||
Common mistake to avoid:
|
||||
- NEVER connect Document Loader to main data outputs
|
||||
- Document Loader is NOT a data processor in the main flow
|
||||
- Document Loader is an AI sub-node that gives Vector Store the ability to process documents
|
||||
|
||||
## Agent Node Distinction
|
||||
CRITICAL: Distinguish between two different agent node types:
|
||||
|
||||
1. **AI Agent** (n8n-nodes-langchain.agent)
|
||||
- Main workflow node that orchestrates AI tasks
|
||||
- Accepts inputs: trigger data, memory, tools, language models
|
||||
- Use for: Primary AI logic, chatbots, autonomous workflows
|
||||
|
||||
2. **AI Agent Tool** (n8n-nodes-langchain.agentTool)
|
||||
- Sub-node that acts as a tool for another AI Agent
|
||||
- Provides agent-as-a-tool capability to parent agents
|
||||
- Use for: Multi-agent systems where one agent calls another
|
||||
|
||||
Default assumption: When users ask for "an agent" or "AI agent", they mean the main AI Agent node unless they explicitly mention "tool", "sub-agent", or "agent for another agent".
|
||||
|
||||
## Output Format
|
||||
After searching and analyzing available nodes, use the generate_workflow_plan tool to create a structured plan with:
|
||||
- The exact node type to use
|
||||
- A descriptive name for the node instance
|
||||
- Clear reasoning for why this node is needed AND how it connects to other nodes
|
||||
- Consider connection requirements in your reasoning
|
||||
|
||||
Your plan MUST always include:
|
||||
1. An appropriate trigger node as the first node
|
||||
2. An Edit Fields (Set) node named "Workflow Configuration" as the second node
|
||||
3. All other nodes needed to fulfill the user's requirements
|
||||
|
||||
After using the generate_workflow_plan tool, only respond with a single word "DONE" to indicate the plan is complete.
|
||||
Remember: Be precise about node types, understand connection patterns, and always include trigger and configuration nodes.`;
|
||||
|
||||
function formatPlanFeedback(previousPlan: WorkflowPlan, feedback: string) {
|
||||
return `Previous plan: ${JSON.stringify(previousPlan, null, 2)}\n\nUser feedback: ${feedback}\n\nPlease adjust the plan based on the feedback.`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a workflow planner agent that can search for and analyze nodes
|
||||
*/
|
||||
export function createWorkflowPlannerAgent(llm: BaseChatModel, nodeTypes: INodeTypeDescription[]) {
|
||||
if (!llm.bindTools) {
|
||||
throw new LLMServiceError("LLM doesn't support binding tools", { llmModel: llm._llmType() });
|
||||
}
|
||||
|
||||
// Create the tools for the planner
|
||||
const tools = [
|
||||
createNodeSearchTool(nodeTypes).tool,
|
||||
createNodeDetailsTool(nodeTypes).tool,
|
||||
generateWorkflowPlanTool,
|
||||
];
|
||||
|
||||
// Create a ToolNode with our tools
|
||||
const toolNode = new ToolNode(tools);
|
||||
|
||||
// Bind tools to the LLM
|
||||
const modelWithTools = llm.bindTools(tools);
|
||||
|
||||
// Define the function that determines whether to continue
|
||||
const shouldContinue = (state: typeof MessagesAnnotation.State) => {
|
||||
const { messages } = state;
|
||||
const lastMessage = messages[messages.length - 1];
|
||||
|
||||
// Check if the last message has tool calls
|
||||
if (
|
||||
'tool_calls' in lastMessage &&
|
||||
Array.isArray(lastMessage.tool_calls) &&
|
||||
lastMessage.tool_calls?.length
|
||||
) {
|
||||
// Check if one of the tool calls is the final plan generation
|
||||
const hasPlanTool = lastMessage.tool_calls.some((tc) => tc.name === 'generate_workflow_plan');
|
||||
|
||||
if (hasPlanTool) {
|
||||
// If we're generating the plan, still need to execute the tool
|
||||
return 'tools';
|
||||
}
|
||||
|
||||
return 'tools';
|
||||
}
|
||||
return END;
|
||||
};
|
||||
|
||||
// Define the function that calls the model
|
||||
const callModel = async (state: typeof MessagesAnnotation.State) => {
|
||||
const { messages } = state;
|
||||
const response = await modelWithTools.invoke(messages);
|
||||
return { messages: [response] };
|
||||
};
|
||||
|
||||
// Build the graph
|
||||
const workflow = new StateGraph(MessagesAnnotation)
|
||||
.addNode('agent', callModel)
|
||||
.addNode('tools', toolNode)
|
||||
.addEdge(START, 'agent')
|
||||
.addConditionalEdges('agent', shouldContinue, ['tools', END])
|
||||
.addEdge('tools', 'agent');
|
||||
|
||||
const app = workflow.compile();
|
||||
|
||||
return {
|
||||
async plan(
|
||||
userRequest: string,
|
||||
previousPlan?: WorkflowPlan,
|
||||
feedback?: string,
|
||||
): Promise<
|
||||
| {
|
||||
plan: WorkflowPlan;
|
||||
toolMessages: BaseMessage[];
|
||||
}
|
||||
| { text: string }
|
||||
> {
|
||||
// Prepare the initial messages
|
||||
const systemMessage = new SystemMessage(SYSTEM_PROMPT);
|
||||
|
||||
let userMessage = userRequest;
|
||||
if (previousPlan && feedback) {
|
||||
userMessage += '\n\n';
|
||||
userMessage += formatPlanFeedback(previousPlan, feedback);
|
||||
}
|
||||
|
||||
const humanMessage = new HumanMessage(userMessage);
|
||||
|
||||
// Invoke the graph
|
||||
const result = await app.invoke({
|
||||
messages: [systemMessage, humanMessage],
|
||||
});
|
||||
|
||||
// Extract tools messages
|
||||
const toolMessages = result.messages.filter((msg) => {
|
||||
if (['system', 'human'].includes(msg.getType())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Do not include final AI message
|
||||
if (isAIMessage(msg) && (msg.tool_calls ?? []).length === 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
});
|
||||
|
||||
const workflowPlanToolCall = result.messages.findLast((msg): msg is ToolMessage => {
|
||||
return msg.name === 'generate_workflow_plan';
|
||||
});
|
||||
|
||||
if (!workflowPlanToolCall) {
|
||||
const lastAiMessage = result.messages.findLast((msg) => {
|
||||
return isAIMessage(msg) && (msg.tool_calls ?? []).length === 0;
|
||||
});
|
||||
|
||||
if (lastAiMessage) {
|
||||
return {
|
||||
text: lastAiMessage.text,
|
||||
};
|
||||
}
|
||||
|
||||
throw new ToolExecutionError('Invalid response from agent - no plan generated');
|
||||
}
|
||||
|
||||
try {
|
||||
if (typeof workflowPlanToolCall.content !== 'string') {
|
||||
throw new ToolExecutionError('Workflow plan tool call content is not a string');
|
||||
}
|
||||
|
||||
const workflowPlan = jsonParse<WorkflowPlan>(workflowPlanToolCall.content);
|
||||
return {
|
||||
plan: workflowPlan,
|
||||
toolMessages,
|
||||
};
|
||||
} catch (error) {
|
||||
throw new ToolExecutionError(
|
||||
`Failed to parse workflow plan: ${error instanceof Error ? error.message : 'Unknown error'}`,
|
||||
);
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
@@ -34,5 +34,3 @@ export const MAX_WORKFLOW_LENGTH_TOKENS = 30_000; // Tokens
|
||||
* Used for rough token count estimation from character counts.
|
||||
*/
|
||||
export const AVG_CHARS_PER_TOKEN_ANTHROPIC = 2.5;
|
||||
|
||||
export const PLAN_APPROVAL_MESSAGE = 'Proceed with the plan';
|
||||
|
||||
@@ -3,15 +3,13 @@ import type { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import type { ToolMessage } from '@langchain/core/messages';
|
||||
import { AIMessage, HumanMessage } from '@langchain/core/messages';
|
||||
import type { MemorySaver } from '@langchain/langgraph';
|
||||
import { GraphRecursionError, Command } from '@langchain/langgraph';
|
||||
import { GraphRecursionError } from '@langchain/langgraph';
|
||||
import type { Logger } from '@n8n/backend-common';
|
||||
import { mock } from 'jest-mock-extended';
|
||||
import type { INodeTypeDescription } from 'n8n-workflow';
|
||||
import { ApplicationError } from 'n8n-workflow';
|
||||
|
||||
import type { WorkflowPlanNode } from '@/agents/workflow-planner-agent';
|
||||
import { createWorkflowPlannerAgent } from '@/agents/workflow-planner-agent';
|
||||
import { MAX_AI_BUILDER_PROMPT_LENGTH, PLAN_APPROVAL_MESSAGE } from '@/constants';
|
||||
import { MAX_AI_BUILDER_PROMPT_LENGTH } from '@/constants';
|
||||
import { ValidationError } from '@/errors';
|
||||
import type { StreamOutput } from '@/types/streaming';
|
||||
import { createStreamProcessor, formatMessages } from '@/utils/stream-processor';
|
||||
@@ -63,9 +61,6 @@ jest.mock('@/chains/conversation-compact', () => ({
|
||||
jest.mock('@/chains/workflow-name', () => ({
|
||||
workflowNameChain: jest.fn(),
|
||||
}));
|
||||
jest.mock('@/agents/workflow-planner-agent', () => ({
|
||||
createWorkflowPlannerAgent: jest.fn(),
|
||||
}));
|
||||
|
||||
const mockRandomUUID = jest.fn();
|
||||
Object.defineProperty(global, 'crypto', {
|
||||
@@ -370,420 +365,4 @@ describe('WorkflowBuilderAgent', () => {
|
||||
expect(result.sessions[0].messages).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Workflow Planning', () => {
|
||||
let mockPlannerAgent: ReturnType<typeof createWorkflowPlannerAgent>;
|
||||
const mockCreateWorkflowPlannerAgent = createWorkflowPlannerAgent as jest.MockedFunction<
|
||||
typeof createWorkflowPlannerAgent
|
||||
>;
|
||||
|
||||
// Helper function to mock stream processor with custom output
|
||||
const mockStreamProcessor = (output: StreamOutput | Error) => {
|
||||
if (output instanceof Error) {
|
||||
mockCreateStreamProcessor.mockImplementation(() => {
|
||||
// eslint-disable-next-line require-yield
|
||||
return (async function* () {
|
||||
throw output;
|
||||
})();
|
||||
});
|
||||
} else {
|
||||
mockCreateStreamProcessor.mockImplementation(() => {
|
||||
return (async function* () {
|
||||
yield output;
|
||||
})();
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
// Helper function to run chat and collect results
|
||||
const runChatAndCollectResults = async (payload: ChatPayload) => {
|
||||
const generator = agent.chat(payload);
|
||||
const results = [];
|
||||
for await (const result of generator) {
|
||||
results.push(result);
|
||||
}
|
||||
return results;
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
// Reset the mock stream processor for planning tests
|
||||
mockCreateStreamProcessor.mockReset();
|
||||
|
||||
mockPlannerAgent = {
|
||||
plan: jest.fn(),
|
||||
};
|
||||
mockCreateWorkflowPlannerAgent.mockReturnValue(mockPlannerAgent);
|
||||
});
|
||||
|
||||
describe('create_plan', () => {
|
||||
it('should create a workflow plan from user message', async () => {
|
||||
const payload: ChatPayload = {
|
||||
message: 'Create a workflow to process data',
|
||||
workflowContext: {
|
||||
currentWorkflow: { nodes: [] },
|
||||
},
|
||||
};
|
||||
|
||||
mockStreamProcessor({
|
||||
messages: [
|
||||
{
|
||||
role: 'assistant',
|
||||
type: 'message',
|
||||
text: 'Creating workflow plan...',
|
||||
},
|
||||
],
|
||||
} as StreamOutput);
|
||||
|
||||
const results = await runChatAndCollectResults(payload);
|
||||
|
||||
expect(results).toHaveLength(1);
|
||||
expect(results[0]).toHaveProperty('messages');
|
||||
});
|
||||
|
||||
it('should handle planning errors gracefully', async () => {
|
||||
const payload: ChatPayload = {
|
||||
message: 'Create invalid workflow',
|
||||
workflowContext: {
|
||||
currentWorkflow: { nodes: [] },
|
||||
},
|
||||
};
|
||||
|
||||
mockStreamProcessor(new ValidationError('Invalid plan request'));
|
||||
|
||||
await expect(runChatAndCollectResults(payload)).rejects.toThrow(ValidationError);
|
||||
});
|
||||
});
|
||||
|
||||
describe('reviewPlan', () => {
|
||||
it('should handle plan approval via interrupt', async () => {
|
||||
const mockPlan: WorkflowPlanNode[] = [
|
||||
{
|
||||
nodeType: 'n8n-nodes-base.manualTrigger',
|
||||
nodeName: 'Manual Trigger',
|
||||
reasoning: 'Start the workflow manually',
|
||||
},
|
||||
];
|
||||
|
||||
const payload: ChatPayload = {
|
||||
message: PLAN_APPROVAL_MESSAGE,
|
||||
workflowContext: {
|
||||
currentWorkflow: { nodes: [] },
|
||||
},
|
||||
};
|
||||
|
||||
const testAgent = new WorkflowBuilderAgent(config);
|
||||
|
||||
// Mock the agent with a pending interrupt
|
||||
const mockCompiledAgent = {
|
||||
stream: jest.fn().mockImplementation(async function* (input: unknown) {
|
||||
// If it's a Command with resume, it means plan was approved
|
||||
if (input instanceof Command && input.resume) {
|
||||
yield [
|
||||
'agent',
|
||||
{
|
||||
planStatus: 'approved',
|
||||
messages: [new AIMessage('Plan approved, executing...')],
|
||||
},
|
||||
];
|
||||
}
|
||||
}),
|
||||
getState: jest.fn().mockResolvedValue({
|
||||
values: {
|
||||
messages: [],
|
||||
workflowPlan: {
|
||||
intro: 'Test plan',
|
||||
plan: mockPlan,
|
||||
},
|
||||
},
|
||||
tasks: [
|
||||
{
|
||||
interrupts: [
|
||||
{
|
||||
value: {
|
||||
plan: mockPlan,
|
||||
message: 'Test plan',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
}),
|
||||
updateState: jest.fn(),
|
||||
};
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
jest.spyOn(testAgent as any, 'createWorkflow').mockReturnValue({
|
||||
compile: jest.fn().mockReturnValue(mockCompiledAgent),
|
||||
});
|
||||
|
||||
mockStreamProcessor({
|
||||
messages: [
|
||||
{
|
||||
role: 'assistant',
|
||||
type: 'message',
|
||||
text: 'Processing...',
|
||||
},
|
||||
],
|
||||
} as StreamOutput);
|
||||
|
||||
const generator = testAgent.chat(payload);
|
||||
const results = [];
|
||||
for await (const result of generator) {
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
// Verify that stream was called with a Command containing approval
|
||||
expect(mockCompiledAgent.stream).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
resume: {
|
||||
action: 'approve',
|
||||
feedback: undefined,
|
||||
},
|
||||
}),
|
||||
expect.any(Object),
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle plan rejection with feedback', async () => {
|
||||
const mockPlan: WorkflowPlanNode[] = [
|
||||
{
|
||||
nodeType: 'n8n-nodes-base.manualTrigger',
|
||||
nodeName: 'Manual Trigger',
|
||||
reasoning: 'Start the workflow manually',
|
||||
},
|
||||
];
|
||||
|
||||
const feedback = 'Please add error handling';
|
||||
const payload: ChatPayload = {
|
||||
message: feedback,
|
||||
workflowContext: {
|
||||
currentWorkflow: { nodes: [] },
|
||||
},
|
||||
};
|
||||
|
||||
const testAgent = new WorkflowBuilderAgent(config);
|
||||
|
||||
// Mock the agent with a pending interrupt
|
||||
const mockCompiledAgent = {
|
||||
stream: jest.fn().mockImplementation(async function* (input: unknown) {
|
||||
// If it's a Command with resume and feedback, it means plan was rejected
|
||||
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
|
||||
if (input instanceof Command && input.resume?.action === 'adjust') {
|
||||
yield [
|
||||
'adjustPlan',
|
||||
{
|
||||
planStatus: 'rejected',
|
||||
planFeedback: feedback,
|
||||
messages: [new HumanMessage(feedback)],
|
||||
},
|
||||
];
|
||||
}
|
||||
}),
|
||||
getState: jest.fn().mockResolvedValue({
|
||||
values: {
|
||||
messages: [],
|
||||
workflowPlan: {
|
||||
intro: 'Test plan',
|
||||
plan: mockPlan,
|
||||
},
|
||||
},
|
||||
tasks: [
|
||||
{
|
||||
interrupts: [
|
||||
{
|
||||
value: {
|
||||
plan: mockPlan,
|
||||
message: 'Test plan',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
}),
|
||||
updateState: jest.fn(),
|
||||
};
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
jest.spyOn(testAgent as any, 'createWorkflow').mockReturnValue({
|
||||
compile: jest.fn().mockReturnValue(mockCompiledAgent),
|
||||
});
|
||||
|
||||
mockStreamProcessor({
|
||||
messages: [
|
||||
{
|
||||
role: 'assistant',
|
||||
type: 'message',
|
||||
text: 'Processing...',
|
||||
},
|
||||
],
|
||||
} as StreamOutput);
|
||||
|
||||
const generator = testAgent.chat(payload);
|
||||
const results = [];
|
||||
for await (const result of generator) {
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
// Verify that stream was called with a Command containing rejection and feedback
|
||||
expect(mockCompiledAgent.stream).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
resume: {
|
||||
action: 'adjust',
|
||||
feedback,
|
||||
},
|
||||
}),
|
||||
expect.any(Object),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('adjustPlan', () => {
|
||||
it('should adjust plan based on user feedback', async () => {
|
||||
const payload: ChatPayload = {
|
||||
message: 'Add error handling',
|
||||
workflowContext: {
|
||||
currentWorkflow: { nodes: [] },
|
||||
},
|
||||
};
|
||||
|
||||
mockStreamProcessor({
|
||||
messages: [
|
||||
{
|
||||
role: 'assistant',
|
||||
type: 'message',
|
||||
text: 'Adjusting plan with error handling...',
|
||||
},
|
||||
],
|
||||
} as StreamOutput);
|
||||
|
||||
const results = await runChatAndCollectResults(payload);
|
||||
|
||||
expect(results).toHaveLength(1);
|
||||
expect(results[0].messages).toBeDefined();
|
||||
expect(results[0].messages[0]).toHaveProperty('text');
|
||||
});
|
||||
|
||||
it('should remove previous plan tool messages when adjusting', async () => {
|
||||
const payload: ChatPayload = {
|
||||
message: 'Use webhook instead',
|
||||
workflowContext: {
|
||||
currentWorkflow: { nodes: [] },
|
||||
},
|
||||
};
|
||||
|
||||
mockStreamProcessor({
|
||||
messages: [
|
||||
{
|
||||
role: 'assistant',
|
||||
type: 'message',
|
||||
text: 'Adjusting plan to use webhook...',
|
||||
},
|
||||
],
|
||||
} as StreamOutput);
|
||||
|
||||
const results = await runChatAndCollectResults(payload);
|
||||
|
||||
expect(results).toHaveLength(1);
|
||||
expect(results[0].messages).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Plan state routing', () => {
|
||||
it('should route to createPlan when no plan exists', async () => {
|
||||
const payload: ChatPayload = {
|
||||
message: 'Build a workflow',
|
||||
workflowContext: {
|
||||
currentWorkflow: { nodes: [] },
|
||||
},
|
||||
};
|
||||
|
||||
mockStreamProcessor({
|
||||
messages: [
|
||||
{
|
||||
role: 'assistant',
|
||||
type: 'message',
|
||||
text: 'Creating plan...',
|
||||
},
|
||||
],
|
||||
} as StreamOutput);
|
||||
|
||||
const results = await runChatAndCollectResults(payload);
|
||||
|
||||
expect(results).toHaveLength(1);
|
||||
expect(results[0]).toHaveProperty('messages');
|
||||
});
|
||||
|
||||
it('should route to agent when plan is approved', async () => {
|
||||
const payload: ChatPayload = {
|
||||
message: 'Continue building',
|
||||
workflowContext: {
|
||||
currentWorkflow: { nodes: [] },
|
||||
},
|
||||
};
|
||||
|
||||
mockStreamProcessor({
|
||||
messages: [
|
||||
{
|
||||
role: 'assistant',
|
||||
type: 'message',
|
||||
text: 'Building workflow based on approved plan...',
|
||||
},
|
||||
],
|
||||
} as StreamOutput);
|
||||
|
||||
const results = await runChatAndCollectResults(payload);
|
||||
|
||||
expect(results).toHaveLength(1);
|
||||
expect(results[0]).toHaveProperty('messages');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Interrupt handling', () => {
|
||||
it('should properly handle interrupt for plan review', async () => {
|
||||
// This test verifies that the interrupt mechanism is properly set up
|
||||
// The actual interrupt is handled by LangGraph, we just verify the setup
|
||||
const payload: ChatPayload = {
|
||||
message: 'Create workflow to fetch data',
|
||||
workflowContext: {
|
||||
currentWorkflow: { nodes: [] },
|
||||
},
|
||||
};
|
||||
|
||||
// Mock the stream processor to simulate interrupt handling
|
||||
mockCreateStreamProcessor.mockImplementation(() => {
|
||||
return (async function* () {
|
||||
yield {
|
||||
messages: [
|
||||
{
|
||||
role: 'assistant',
|
||||
type: 'message',
|
||||
text: 'Creating plan for review...',
|
||||
},
|
||||
],
|
||||
} as StreamOutput;
|
||||
|
||||
yield {
|
||||
messages: [
|
||||
{
|
||||
role: 'assistant',
|
||||
type: 'message',
|
||||
text: 'Plan created, awaiting approval...',
|
||||
},
|
||||
],
|
||||
} as StreamOutput;
|
||||
})();
|
||||
});
|
||||
|
||||
const generator = agent.chat(payload);
|
||||
const results = [];
|
||||
for await (const result of generator) {
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
// Verify we got results from the stream
|
||||
expect(results.length).toBeGreaterThan(1);
|
||||
expect(results[0]).toHaveProperty('messages');
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import { ChatPromptTemplate } from '@langchain/core/prompts';
|
||||
|
||||
import type { WorkflowPlan } from '../../agents/workflow-planner-agent';
|
||||
import { instanceUrlPrompt } from '../../chains/prompts/instance-url';
|
||||
|
||||
const systemPrompt = `You are an AI assistant specialized in creating and editing n8n workflows. Your goal is to help users build efficient, well-connected workflows by intelligently using the available tools.
|
||||
@@ -408,34 +407,6 @@ const previousConversationSummary = `
|
||||
{previousSummary}
|
||||
</previous_summary>`;
|
||||
|
||||
const workflowPlan = '{workflowPlan}';
|
||||
|
||||
export const planFormatter = (plan?: WorkflowPlan | null) => {
|
||||
if (!plan) return '<workflow_plan>EMPTY</workflow_plan>';
|
||||
|
||||
const nodesPlan = plan.plan.map((node) => {
|
||||
return `
|
||||
<workflow_plan_node>
|
||||
<type>${node.nodeType}</type>
|
||||
<name>${node.nodeName}</name>
|
||||
<reasoning>${node.reasoning}</reasoning>
|
||||
</workflow_plan_node>
|
||||
`;
|
||||
});
|
||||
|
||||
return `
|
||||
<workflow_plan>
|
||||
<workflow_plan_intro>
|
||||
${plan.intro}
|
||||
</workflow_plan_intro>
|
||||
|
||||
<workflow_plan_nodes>
|
||||
${nodesPlan.join('\n')}
|
||||
</workflow_plan_nodes>
|
||||
</workflow_plan>
|
||||
`;
|
||||
};
|
||||
|
||||
export const mainAgentPrompt = ChatPromptTemplate.fromMessages([
|
||||
[
|
||||
'system',
|
||||
@@ -471,11 +442,6 @@ export const mainAgentPrompt = ChatPromptTemplate.fromMessages([
|
||||
text: previousConversationSummary,
|
||||
cache_control: { type: 'ephemeral' },
|
||||
},
|
||||
{
|
||||
type: 'text',
|
||||
text: workflowPlan,
|
||||
cache_control: { type: 'ephemeral' },
|
||||
},
|
||||
],
|
||||
],
|
||||
['placeholder', '{messages}'],
|
||||
|
||||
@@ -35,15 +35,6 @@ export interface ExecutionRequestChunk {
|
||||
reason: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Plan chunk for streaming
|
||||
*/
|
||||
export interface PlanChunk {
|
||||
role: 'assistant';
|
||||
type: 'plan';
|
||||
plan: unknown;
|
||||
}
|
||||
|
||||
/**
|
||||
* Union type for all stream chunks
|
||||
*/
|
||||
@@ -51,8 +42,7 @@ export type StreamChunk =
|
||||
| AgentMessageChunk
|
||||
| ToolProgressChunk
|
||||
| WorkflowUpdateChunk
|
||||
| ExecutionRequestChunk
|
||||
| PlanChunk;
|
||||
| ExecutionRequestChunk;
|
||||
|
||||
/**
|
||||
* Stream output containing messages
|
||||
|
||||
@@ -2,7 +2,6 @@ import { AIMessage, HumanMessage, ToolMessage } from '@langchain/core/messages';
|
||||
import type { ToolCall } from '@langchain/core/messages/tool';
|
||||
import type { DynamicStructuredTool } from '@langchain/core/tools';
|
||||
|
||||
import type { WorkflowPlan } from '../agents/workflow-planner-agent';
|
||||
import type {
|
||||
AgentMessageChunk,
|
||||
ToolProgressChunk,
|
||||
@@ -45,24 +44,6 @@ export function processStreamChunk(streamMode: string, chunk: unknown): StreamOu
|
||||
workflowJSON?: unknown;
|
||||
workflowOperations?: unknown;
|
||||
};
|
||||
create_plan?: {
|
||||
workflowPlan?: unknown;
|
||||
planStatus?: string;
|
||||
messages?: Array<{ content: string | Array<{ type: string; text: string }> }>;
|
||||
};
|
||||
review_plan?: {
|
||||
planStatus?: string;
|
||||
};
|
||||
adjust_plan?: {
|
||||
workflowPlan?: unknown;
|
||||
planStatus?: string;
|
||||
};
|
||||
__interrupt__?: Array<{
|
||||
value: unknown;
|
||||
resumable: boolean;
|
||||
ns: string[];
|
||||
when: string;
|
||||
}>;
|
||||
};
|
||||
|
||||
if ((agentChunk?.delete_messages?.messages ?? []).length > 0) {
|
||||
@@ -117,40 +98,6 @@ export function processStreamChunk(streamMode: string, chunk: unknown): StreamOu
|
||||
}
|
||||
}
|
||||
|
||||
// Handle plan creation
|
||||
if (agentChunk?.create_plan?.workflowPlan) {
|
||||
const workflowPlan = agentChunk.create_plan.workflowPlan as WorkflowPlan;
|
||||
const planChunk = {
|
||||
role: 'assistant' as const,
|
||||
type: 'plan' as const,
|
||||
plan: workflowPlan.plan,
|
||||
message: workflowPlan.intro,
|
||||
};
|
||||
return { messages: [planChunk] };
|
||||
} else if ((agentChunk?.create_plan?.messages ?? []).length > 0) {
|
||||
// When planner didn't create a plan, but responded with a message
|
||||
const lastMessage =
|
||||
agentChunk.create_plan!.messages![agentChunk.create_plan!.messages!.length - 1];
|
||||
const messageChunk: AgentMessageChunk = {
|
||||
role: 'assistant',
|
||||
type: 'message',
|
||||
text: lastMessage.content as string,
|
||||
};
|
||||
|
||||
return { messages: [messageChunk] };
|
||||
}
|
||||
|
||||
if (agentChunk?.adjust_plan?.workflowPlan) {
|
||||
const workflowPlan = agentChunk.adjust_plan.workflowPlan as WorkflowPlan;
|
||||
const planChunk = {
|
||||
role: 'assistant' as const,
|
||||
type: 'plan' as const,
|
||||
plan: workflowPlan.plan,
|
||||
message: workflowPlan.intro,
|
||||
};
|
||||
return { messages: [planChunk] };
|
||||
}
|
||||
|
||||
// Handle process_operations updates - emit workflow update after operations are processed
|
||||
if (agentChunk?.process_operations) {
|
||||
// Check if operations were processed (indicated by cleared operations array)
|
||||
@@ -250,16 +197,6 @@ function createToolCallMessage(
|
||||
toolCall: ToolCall,
|
||||
builderTool?: BuilderTool,
|
||||
): Record<string, unknown> {
|
||||
if (toolCall.name === 'generate_workflow_plan') {
|
||||
const workflowPlan = toolCall.args as WorkflowPlan;
|
||||
return {
|
||||
role: 'assistant',
|
||||
type: 'plan',
|
||||
plan: workflowPlan.plan,
|
||||
message: workflowPlan.intro,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
id: toolCall.id,
|
||||
toolCallId: toolCall.id,
|
||||
|
||||
@@ -388,9 +388,6 @@ describe('operations-processor', () => {
|
||||
messages: [],
|
||||
workflowContext: {},
|
||||
previousSummary: 'EMPTY',
|
||||
workflowPlan: null,
|
||||
planStatus: null,
|
||||
planFeedback: null,
|
||||
});
|
||||
|
||||
it('should process operations and clear them', () => {
|
||||
|
||||
@@ -49,9 +49,6 @@ describe('tool-executor', () => {
|
||||
messages,
|
||||
workflowContext: {},
|
||||
previousSummary: 'EMPTY',
|
||||
planFeedback: null,
|
||||
planStatus: null,
|
||||
workflowPlan: null,
|
||||
});
|
||||
|
||||
// Helper to create mock tool
|
||||
|
||||
@@ -127,7 +127,7 @@ describe('trimWorkflowJSON', () => {
|
||||
type: 'n8n-nodes-base.test',
|
||||
parameters: {
|
||||
nullValue: null,
|
||||
// eslint-disable-next-line no-undefined
|
||||
|
||||
undefinedValue: undefined,
|
||||
validValue: 'test',
|
||||
},
|
||||
|
||||
@@ -1,15 +1,9 @@
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { ToolMessage, AIMessage, HumanMessage, RemoveMessage } from '@langchain/core/messages';
|
||||
import type { ToolMessage } from '@langchain/core/messages';
|
||||
import { AIMessage, HumanMessage, RemoveMessage } from '@langchain/core/messages';
|
||||
import type { RunnableConfig } from '@langchain/core/runnables';
|
||||
import type { LangChainTracer } from '@langchain/core/tracers/tracer_langchain';
|
||||
import {
|
||||
StateGraph,
|
||||
MemorySaver,
|
||||
END,
|
||||
GraphRecursionError,
|
||||
Command,
|
||||
interrupt,
|
||||
} from '@langchain/langgraph';
|
||||
import { StateGraph, MemorySaver, END, GraphRecursionError } from '@langchain/langgraph';
|
||||
import type { Logger } from '@n8n/backend-common';
|
||||
import {
|
||||
ApplicationError,
|
||||
@@ -23,12 +17,10 @@ import {
|
||||
DEFAULT_AUTO_COMPACT_THRESHOLD_TOKENS,
|
||||
MAX_AI_BUILDER_PROMPT_LENGTH,
|
||||
MAX_INPUT_TOKENS,
|
||||
PLAN_APPROVAL_MESSAGE,
|
||||
} from '@/constants';
|
||||
import { createGetNodeParameterTool } from '@/tools/get-node-parameter.tool';
|
||||
import { trimWorkflowJSON } from '@/utils/trim-workflow-context';
|
||||
|
||||
import { type WorkflowPlanNode, createWorkflowPlannerAgent } from './agents/workflow-planner-agent';
|
||||
import { conversationCompactChain } from './chains/conversation-compact';
|
||||
import { workflowNameChain } from './chains/workflow-name';
|
||||
import { LLMServiceError, ValidationError, WorkflowStateError } from './errors';
|
||||
@@ -36,7 +28,7 @@ import { createAddNodeTool } from './tools/add-node.tool';
|
||||
import { createConnectNodesTool } from './tools/connect-nodes.tool';
|
||||
import { createNodeDetailsTool } from './tools/node-details.tool';
|
||||
import { createNodeSearchTool } from './tools/node-search.tool';
|
||||
import { mainAgentPrompt, planFormatter } from './tools/prompts/main-agent.prompt';
|
||||
import { mainAgentPrompt } from './tools/prompts/main-agent.prompt';
|
||||
import { createRemoveNodeTool } from './tools/remove-node.tool';
|
||||
import { createUpdateNodeParametersTool } from './tools/update-node-parameters.tool';
|
||||
import type { SimpleWorkflow } from './types/workflow';
|
||||
@@ -129,7 +121,6 @@ export class WorkflowBuilderAgent {
|
||||
workflowJSON: trimWorkflowJSON(state.workflowJSON),
|
||||
executionData: state.workflowContext?.executionData ?? {},
|
||||
executionSchema: state.workflowContext?.executionSchema ?? [],
|
||||
workflowPlan: planFormatter(state.workflowPlan),
|
||||
instanceUrl: this.instanceUrl,
|
||||
});
|
||||
|
||||
@@ -165,145 +156,8 @@ export class WorkflowBuilderAgent {
|
||||
return tokensUsed > this.autoCompactThresholdTokens;
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a plan for the workflow based on user requirements
|
||||
*/
|
||||
const createPlan = async (state: typeof WorkflowState.State) => {
|
||||
const { messages } = state;
|
||||
const lastHumanMessage = messages.findLast((m) => m instanceof HumanMessage)!;
|
||||
|
||||
if (typeof lastHumanMessage.content !== 'string') {
|
||||
throw new ValidationError('Invalid message content for planning');
|
||||
}
|
||||
|
||||
// Create the planner agent with tools
|
||||
const plannerAgent = createWorkflowPlannerAgent(this.llmSimpleTask, this.parsedNodeTypes);
|
||||
|
||||
// Generate the workflow plan
|
||||
const plannerResult = await plannerAgent.plan(lastHumanMessage.content);
|
||||
|
||||
// If we got a structured plan, return it
|
||||
if ('plan' in plannerResult) {
|
||||
const { plan, toolMessages } = plannerResult;
|
||||
this.logger?.debug('Generated workflow plan: ' + JSON.stringify(plan, null, 2));
|
||||
|
||||
return {
|
||||
workflowPlan: plan,
|
||||
planStatus: 'pending' as const,
|
||||
messages: toolMessages,
|
||||
};
|
||||
}
|
||||
|
||||
// If we didn't get a plan, just return the text response
|
||||
this.logger?.debug('Planner returned text response: ' + plannerResult.text);
|
||||
return {
|
||||
messages: [
|
||||
new AIMessage({
|
||||
content: plannerResult.text,
|
||||
}),
|
||||
],
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Reviews the plan with the user for approval
|
||||
*/
|
||||
const reviewPlan = async (state: typeof WorkflowState.State) => {
|
||||
const { workflowPlan } = state;
|
||||
|
||||
if (!workflowPlan) {
|
||||
throw new ValidationError('No workflow plan to review');
|
||||
}
|
||||
|
||||
// Use interrupt to pause and show the plan to the user
|
||||
// The frontend will display the plan and wait for user action
|
||||
const userResponse = interrupt<
|
||||
{ plan: WorkflowPlanNode[]; message: string },
|
||||
{ action: 'approve' | 'adjust'; feedback?: string }
|
||||
>({
|
||||
plan: workflowPlan.plan,
|
||||
message: workflowPlan.intro,
|
||||
});
|
||||
|
||||
// Process the user's response
|
||||
if (userResponse.action === 'approve') {
|
||||
// User approved the plan, mark as approved and continue
|
||||
return {
|
||||
planStatus: 'approved' as const,
|
||||
};
|
||||
} else if (userResponse.action === 'adjust') {
|
||||
// User wants adjustments, add feedback and mark for adjustment
|
||||
return {
|
||||
planStatus: 'rejected' as const,
|
||||
planFeedback: userResponse.feedback ?? 'Please adjust the plan',
|
||||
};
|
||||
}
|
||||
|
||||
return {};
|
||||
};
|
||||
|
||||
/**
|
||||
* Adjusts the plan based on user feedback
|
||||
*/
|
||||
const adjustPlan = async (state: typeof WorkflowState.State) => {
|
||||
const { messages, planFeedback, workflowPlan } = state;
|
||||
const lastHumanMessage = messages.findLast((m) => m instanceof HumanMessage)!;
|
||||
|
||||
if (typeof lastHumanMessage.content !== 'string') {
|
||||
throw new ValidationError('Invalid message content for plan adjustment');
|
||||
}
|
||||
|
||||
// Create the planner agent with tools
|
||||
const plannerAgent = createWorkflowPlannerAgent(this.llmSimpleTask, this.parsedNodeTypes);
|
||||
|
||||
// Generate an adjusted plan with feedback
|
||||
const adjustedPlan = await plannerAgent.plan(
|
||||
lastHumanMessage.content,
|
||||
workflowPlan ?? undefined,
|
||||
planFeedback ?? undefined,
|
||||
);
|
||||
|
||||
// If we get a text response instead of a plan, just return that
|
||||
if ('text' in adjustedPlan) {
|
||||
return {
|
||||
messages: [
|
||||
new AIMessage({
|
||||
content: adjustedPlan.text,
|
||||
}),
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
// Remove previous plan tool messages to avoid confusion
|
||||
const filteredMessages = messages.map((m) => {
|
||||
if (m instanceof ToolMessage && m.name === 'generate_workflow_plan') {
|
||||
return new RemoveMessage({ id: m.id! });
|
||||
}
|
||||
|
||||
if (m instanceof AIMessage && m.tool_calls && m.tool_calls.length > 0) {
|
||||
const hasPlanCall = m.tool_calls.find((tc) => tc.name === 'generate_workflow_plan');
|
||||
if (hasPlanCall) {
|
||||
return new RemoveMessage({ id: m.id! });
|
||||
}
|
||||
}
|
||||
|
||||
return m;
|
||||
});
|
||||
|
||||
const planAdjustmentMessage = new HumanMessage({ content: planFeedback ?? '' });
|
||||
|
||||
this.logger?.debug('Adjusted workflow plan: ' + JSON.stringify(adjustedPlan, null, 2));
|
||||
|
||||
return {
|
||||
workflowPlan: adjustedPlan.plan,
|
||||
messages: [...filteredMessages, planAdjustmentMessage, ...adjustedPlan.toolMessages],
|
||||
planStatus: 'pending' as const,
|
||||
planFeedback: null,
|
||||
};
|
||||
};
|
||||
|
||||
const shouldModifyState = (state: typeof WorkflowState.State) => {
|
||||
const { messages, workflowContext, planStatus } = state;
|
||||
const { messages, workflowContext } = state;
|
||||
const lastHumanMessage = messages.findLast((m) => m instanceof HumanMessage)!; // There always should be at least one human message in the array
|
||||
|
||||
if (lastHumanMessage.content === '/compact') {
|
||||
@@ -324,11 +178,6 @@ export class WorkflowBuilderAgent {
|
||||
return 'auto_compact_messages';
|
||||
}
|
||||
|
||||
// If we don't have a plan yet, and the workflow is empty, create a plan
|
||||
if (!planStatus && workflowContext?.currentWorkflow?.nodes?.length === 0) {
|
||||
return 'create_plan';
|
||||
}
|
||||
|
||||
return 'agent';
|
||||
};
|
||||
|
||||
@@ -356,9 +205,6 @@ export class WorkflowBuilderAgent {
|
||||
connections: {},
|
||||
name: '',
|
||||
},
|
||||
workflowPlan: null,
|
||||
planStatus: null,
|
||||
planFeedback: null,
|
||||
};
|
||||
|
||||
return stateUpdate;
|
||||
@@ -447,24 +293,11 @@ export class WorkflowBuilderAgent {
|
||||
.addNode('compact_messages', compactSession)
|
||||
.addNode('auto_compact_messages', compactSession)
|
||||
.addNode('create_workflow_name', createWorkflowName)
|
||||
.addNode('create_plan', createPlan)
|
||||
.addNode('review_plan', reviewPlan)
|
||||
.addNode('adjust_plan', adjustPlan)
|
||||
.addConditionalEdges('__start__', shouldModifyState)
|
||||
// .addEdge('create_plan', 'review_plan')
|
||||
.addConditionalEdges('create_plan', (state) => {
|
||||
// If a plan was created, move to review, otherwise back to planning
|
||||
return state.workflowPlan ? 'review_plan' : END;
|
||||
})
|
||||
.addConditionalEdges('review_plan', (state) => {
|
||||
// Route based on the plan status after review
|
||||
return state.planStatus === 'approved' ? 'agent' : 'adjust_plan';
|
||||
})
|
||||
.addEdge('adjust_plan', 'review_plan')
|
||||
.addEdge('tools', 'process_operations')
|
||||
.addEdge('process_operations', 'agent')
|
||||
.addEdge('auto_compact_messages', 'agent')
|
||||
.addEdge('create_workflow_name', 'create_plan')
|
||||
.addEdge('create_workflow_name', 'agent')
|
||||
.addEdge('delete_messages', END)
|
||||
.addEdge('compact_messages', END)
|
||||
.addConditionalEdges('agent', shouldContinue);
|
||||
@@ -505,7 +338,7 @@ export class WorkflowBuilderAgent {
|
||||
);
|
||||
|
||||
try {
|
||||
const stream = await this.createAgentStream(payload, streamConfig, agent, threadConfig);
|
||||
const stream = await this.createAgentStream(payload, streamConfig, agent);
|
||||
yield* this.processAgentStream(stream, agent, threadConfig);
|
||||
} catch (error: unknown) {
|
||||
this.handleStreamError(error);
|
||||
@@ -551,33 +384,7 @@ export class WorkflowBuilderAgent {
|
||||
payload: ChatPayload,
|
||||
streamConfig: RunnableConfig,
|
||||
agent: ReturnType<ReturnType<typeof this.createWorkflow>['compile']>,
|
||||
threadConfig: RunnableConfig,
|
||||
) {
|
||||
const currentState = await agent.getState(threadConfig);
|
||||
|
||||
// Check if there are pending interrupts (e.g., plan review)
|
||||
const interruptedTask = currentState.tasks.find(
|
||||
(task) => task.interrupts && task.interrupts.length > 0,
|
||||
);
|
||||
|
||||
if (interruptedTask) {
|
||||
// We have a pending interrupt - likely a plan review
|
||||
|
||||
// Check if the message is an approval message, right now we only check for exact match
|
||||
// in the future we might want to use a LLM classifier for more flexibility
|
||||
const action = payload.message.trim() === PLAN_APPROVAL_MESSAGE ? 'approve' : 'adjust';
|
||||
|
||||
// Resume with the appropriate command
|
||||
const resumeCommand = new Command({
|
||||
resume: {
|
||||
action,
|
||||
feedback: action === 'adjust' ? payload.message : undefined,
|
||||
},
|
||||
});
|
||||
|
||||
return await agent.stream(resumeCommand, streamConfig);
|
||||
}
|
||||
|
||||
return await agent.stream(
|
||||
{
|
||||
messages: [new HumanMessage({ content: payload.message })],
|
||||
|
||||
@@ -2,7 +2,6 @@ import type { BaseMessage } from '@langchain/core/messages';
|
||||
import { HumanMessage } from '@langchain/core/messages';
|
||||
import { Annotation, messagesStateReducer } from '@langchain/langgraph';
|
||||
|
||||
import type { WorkflowPlan } from './agents/workflow-planner-agent';
|
||||
import type { SimpleWorkflow, WorkflowOperation } from './types/workflow';
|
||||
import type { ChatPayload } from './workflow-builder-agent';
|
||||
|
||||
@@ -76,21 +75,6 @@ export const WorkflowState = Annotation.Root({
|
||||
reducer: operationsReducer,
|
||||
default: () => [],
|
||||
}),
|
||||
// The planned workflow nodes
|
||||
workflowPlan: Annotation<WorkflowPlan | null>({
|
||||
reducer: (x, y) => y ?? x,
|
||||
default: () => null,
|
||||
}),
|
||||
// Status of the workflow plan
|
||||
planStatus: Annotation<'pending' | 'approved' | 'rejected' | null>({
|
||||
reducer: (x, y) => y ?? x,
|
||||
default: () => null,
|
||||
}),
|
||||
// User feedback on the plan
|
||||
planFeedback: Annotation<string | null>({
|
||||
reducer: (x, y) => y ?? x,
|
||||
default: () => null,
|
||||
}),
|
||||
// Latest workflow context
|
||||
workflowContext: Annotation<ChatPayload['workflowContext'] | undefined>({
|
||||
reducer: (x, y) => y ?? x,
|
||||
|
||||
@@ -200,10 +200,6 @@
|
||||
"aiAssistant.builder.canvasPrompt.startManually.title": "Start manually",
|
||||
"aiAssistant.builder.canvasPrompt.startManually.subTitle": "Add the first node",
|
||||
"aiAssistant.builder.streamAbortedMessage": "[Task aborted]",
|
||||
"aiAssistant.builder.plan.intro": "Do you want to proceed with this plan?",
|
||||
"aiAssistant.builder.plan.approve": "Approve Plan",
|
||||
"aiAssistant.builder.plan.reject": "Request Changes",
|
||||
"aiAssistant.builder.plan.whatToChange": "What would you like to change?",
|
||||
"aiAssistant.assistant": "AI Assistant",
|
||||
"aiAssistant.newSessionModal.title.part1": "Start new",
|
||||
"aiAssistant.newSessionModal.title.part2": "session",
|
||||
|
||||
@@ -26,7 +26,6 @@ import { mockedStore } from '@/__tests__/utils';
|
||||
import { STORES } from '@n8n/stores';
|
||||
import { useWorkflowsStore } from '@/stores/workflows.store';
|
||||
import type { INodeUi } from '@/Interface';
|
||||
import { PLAN_APPROVAL_MESSAGE } from '@/constants';
|
||||
|
||||
vi.mock('@/event-bus', () => ({
|
||||
nodeViewEventBus: {
|
||||
@@ -978,384 +977,6 @@ describe('AskAssistantBuild', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('NodesPlan message handling', () => {
|
||||
it('should render plan messages with appropriate controls', async () => {
|
||||
const nodesPlanMessage = {
|
||||
id: faker.string.uuid(),
|
||||
role: 'assistant' as const,
|
||||
type: 'custom' as const,
|
||||
customType: 'nodesPlan',
|
||||
message: 'Here is my plan:',
|
||||
data: [
|
||||
{
|
||||
nodeType: 'n8n-nodes-base.httpRequest',
|
||||
nodeName: 'HTTP Request',
|
||||
reasoning: 'To fetch data from an API',
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const regularMessage = {
|
||||
id: faker.string.uuid(),
|
||||
role: 'assistant' as const,
|
||||
type: 'text' as const,
|
||||
content: 'Regular message',
|
||||
};
|
||||
|
||||
// First test with regular message - should not show plan controls
|
||||
builderStore.$patch({
|
||||
chatMessages: [regularMessage],
|
||||
});
|
||||
|
||||
const { queryByText } = renderComponent();
|
||||
await flushPromises();
|
||||
|
||||
// Regular message should not have plan controls
|
||||
expect(queryByText('aiAssistant.builder.plan.approve')).not.toBeInTheDocument();
|
||||
|
||||
// Now test with plan message
|
||||
builderStore.$patch({
|
||||
chatMessages: [nodesPlanMessage],
|
||||
});
|
||||
await flushPromises();
|
||||
|
||||
// Plan message should have controls
|
||||
expect(queryByText('aiAssistant.builder.plan.approve')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('should handle plan approval correctly', async () => {
|
||||
// Setup: empty workflow so initialGeneration is true
|
||||
workflowsStore.$patch({ workflow: { nodes: [], connections: {} } });
|
||||
|
||||
const nodesPlanMessage = {
|
||||
id: faker.string.uuid(),
|
||||
role: 'assistant' as const,
|
||||
type: 'custom' as const,
|
||||
customType: 'nodesPlan',
|
||||
message: 'Here is my plan:',
|
||||
data: [
|
||||
{
|
||||
nodeType: 'n8n-nodes-base.httpRequest',
|
||||
nodeName: 'HTTP Request',
|
||||
reasoning: 'To fetch data from an API',
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
builderStore.$patch({
|
||||
chatMessages: [nodesPlanMessage],
|
||||
});
|
||||
|
||||
const { getByText } = renderComponent();
|
||||
await flushPromises();
|
||||
|
||||
// Find and click the approve button
|
||||
const approveButton = getByText('aiAssistant.builder.plan.approve');
|
||||
await fireEvent.click(approveButton);
|
||||
await flushPromises();
|
||||
|
||||
// Verify that sendChatMessage was called with approval message
|
||||
expect(builderStore.sendChatMessage).toHaveBeenCalledWith({
|
||||
text: PLAN_APPROVAL_MESSAGE,
|
||||
initialGeneration: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle plan rejection correctly', async () => {
|
||||
const nodesPlanMessage = {
|
||||
id: faker.string.uuid(),
|
||||
role: 'assistant' as const,
|
||||
type: 'custom' as const,
|
||||
customType: 'nodesPlan',
|
||||
message: 'Here is my plan:',
|
||||
data: [
|
||||
{
|
||||
nodeType: 'n8n-nodes-base.httpRequest',
|
||||
nodeName: 'HTTP Request',
|
||||
reasoning: 'To fetch data from an API',
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
builderStore.$patch({
|
||||
chatMessages: [nodesPlanMessage],
|
||||
});
|
||||
|
||||
const { getByText, queryByText } = renderComponent();
|
||||
await flushPromises();
|
||||
|
||||
// Find and click the reject button
|
||||
const rejectButton = getByText('aiAssistant.builder.plan.reject');
|
||||
await fireEvent.click(rejectButton);
|
||||
await flushPromises();
|
||||
|
||||
// Verify plan controls are hidden after rejection
|
||||
expect(queryByText('aiAssistant.builder.plan.approve')).not.toBeInTheDocument();
|
||||
expect(queryByText('aiAssistant.builder.plan.reject')).not.toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('should show plan controls only for the last plan message', async () => {
|
||||
const planMessage1 = {
|
||||
id: 'plan1',
|
||||
role: 'assistant' as const,
|
||||
type: 'custom' as const,
|
||||
customType: 'nodesPlan',
|
||||
message: 'First plan:',
|
||||
data: [
|
||||
{
|
||||
nodeType: 'n8n-nodes-base.httpRequest',
|
||||
nodeName: 'HTTP Request',
|
||||
reasoning: 'First plan',
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const textMessage = {
|
||||
id: 'text1',
|
||||
role: 'assistant' as const,
|
||||
type: 'text' as const,
|
||||
content: 'Some text',
|
||||
};
|
||||
|
||||
const planMessage2 = {
|
||||
id: 'plan2',
|
||||
role: 'assistant' as const,
|
||||
type: 'custom' as const,
|
||||
customType: 'nodesPlan',
|
||||
message: 'Second plan:',
|
||||
data: [
|
||||
{
|
||||
nodeType: 'n8n-nodes-base.emailSend',
|
||||
nodeName: 'Send Email',
|
||||
reasoning: 'Second plan',
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
// First set just the first plan message
|
||||
builderStore.$patch({
|
||||
chatMessages: [planMessage1],
|
||||
});
|
||||
|
||||
const { queryByText } = renderComponent();
|
||||
await flushPromises();
|
||||
|
||||
// First plan should show controls when it's alone
|
||||
expect(queryByText('aiAssistant.builder.plan.approve')).toBeInTheDocument();
|
||||
|
||||
// Now add more messages
|
||||
builderStore.$patch({
|
||||
chatMessages: [planMessage1, textMessage, planMessage2],
|
||||
});
|
||||
await flushPromises();
|
||||
|
||||
// Now only the last plan message should have visible controls
|
||||
// We check this by seeing that approve button is still there (from plan2)
|
||||
expect(queryByText('aiAssistant.builder.plan.approve')).toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Plan status management in workflow messages watcher', () => {
|
||||
it('should disable chat when plan is pending and enable after action', async () => {
|
||||
// Add a nodes plan message as the last message
|
||||
const nodesPlanMessage = {
|
||||
id: faker.string.uuid(),
|
||||
role: 'assistant' as const,
|
||||
type: 'custom' as const,
|
||||
customType: 'nodesPlan',
|
||||
message: 'Plan message',
|
||||
data: [
|
||||
{
|
||||
nodeType: 'n8n-nodes-base.httpRequest',
|
||||
nodeName: 'HTTP Request',
|
||||
reasoning: 'To fetch data',
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
builderStore.$patch({
|
||||
chatMessages: [nodesPlanMessage],
|
||||
});
|
||||
|
||||
// Also need to mock workflowMessages getter
|
||||
builderStore.workflowMessages = [];
|
||||
|
||||
const { getByText, queryByRole } = renderComponent();
|
||||
await flushPromises();
|
||||
|
||||
// Trigger the watcher by updating workflowMessages
|
||||
builderStore.workflowMessages = [
|
||||
{
|
||||
id: faker.string.uuid(),
|
||||
role: 'assistant' as const,
|
||||
type: 'workflow-updated' as const,
|
||||
codeSnippet: '{}',
|
||||
},
|
||||
];
|
||||
|
||||
await flushPromises();
|
||||
|
||||
// Chat should be disabled while plan is pending
|
||||
const chatInput = queryByRole('textbox');
|
||||
expect(chatInput).toHaveAttribute('disabled');
|
||||
|
||||
// Approve the plan
|
||||
const approveButton = getByText('aiAssistant.builder.plan.approve');
|
||||
await fireEvent.click(approveButton);
|
||||
await flushPromises();
|
||||
|
||||
// Chat should be enabled after approval
|
||||
const chatInputAfter = queryByRole('textbox');
|
||||
expect(chatInputAfter).not.toHaveAttribute('disabled');
|
||||
});
|
||||
|
||||
it('should not disable chat when last message is not a nodes plan', async () => {
|
||||
// Add a regular text message as the last message
|
||||
const textMessage = {
|
||||
id: faker.string.uuid(),
|
||||
role: 'assistant' as const,
|
||||
type: 'text' as const,
|
||||
content: 'Regular message',
|
||||
};
|
||||
|
||||
builderStore.$patch({
|
||||
chatMessages: [textMessage],
|
||||
});
|
||||
|
||||
// Mock workflowMessages getter
|
||||
builderStore.workflowMessages = [];
|
||||
|
||||
const { queryByRole } = renderComponent();
|
||||
await flushPromises();
|
||||
|
||||
// Trigger the watcher
|
||||
builderStore.workflowMessages = [
|
||||
{
|
||||
id: faker.string.uuid(),
|
||||
role: 'assistant' as const,
|
||||
type: 'workflow-updated' as const,
|
||||
codeSnippet: '{}',
|
||||
},
|
||||
];
|
||||
|
||||
await flushPromises();
|
||||
|
||||
// Chat should remain enabled
|
||||
const chatInput = queryByRole('textbox');
|
||||
expect(chatInput).not.toHaveAttribute('disabled');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Disabled state when plan is pending', () => {
|
||||
it('should disable chat input when plan status is pending', async () => {
|
||||
const nodesPlanMessage = {
|
||||
id: faker.string.uuid(),
|
||||
role: 'assistant' as const,
|
||||
type: 'custom' as const,
|
||||
customType: 'nodesPlan',
|
||||
message: 'Plan message',
|
||||
data: [
|
||||
{
|
||||
nodeType: 'n8n-nodes-base.httpRequest',
|
||||
nodeName: 'HTTP Request',
|
||||
reasoning: 'To fetch data',
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
builderStore.$patch({
|
||||
chatMessages: [nodesPlanMessage],
|
||||
});
|
||||
|
||||
// Mock workflowMessages getter
|
||||
builderStore.workflowMessages = [];
|
||||
|
||||
const { queryByRole } = renderComponent();
|
||||
await flushPromises();
|
||||
|
||||
// Trigger workflow message update to set planStatus to pending
|
||||
builderStore.workflowMessages = [
|
||||
{
|
||||
id: faker.string.uuid(),
|
||||
role: 'assistant' as const,
|
||||
type: 'workflow-updated' as const,
|
||||
codeSnippet: '{}',
|
||||
},
|
||||
];
|
||||
|
||||
await flushPromises();
|
||||
|
||||
const chatInput = queryByRole('textbox');
|
||||
expect(chatInput).toHaveAttribute('disabled');
|
||||
});
|
||||
|
||||
it('should enable chat input after plan approval', async () => {
|
||||
const nodesPlanMessage = {
|
||||
id: faker.string.uuid(),
|
||||
role: 'assistant' as const,
|
||||
type: 'custom' as const,
|
||||
customType: 'nodesPlan',
|
||||
message: 'Plan message',
|
||||
data: [
|
||||
{
|
||||
nodeType: 'n8n-nodes-base.httpRequest',
|
||||
nodeName: 'HTTP Request',
|
||||
reasoning: 'To fetch data',
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
builderStore.$patch({
|
||||
chatMessages: [nodesPlanMessage],
|
||||
});
|
||||
|
||||
const { queryByRole, getByText } = renderComponent();
|
||||
await flushPromises();
|
||||
|
||||
// Click approve button
|
||||
const approveButton = getByText('aiAssistant.builder.plan.approve');
|
||||
await fireEvent.click(approveButton);
|
||||
await flushPromises();
|
||||
|
||||
// Chat should be enabled after approval
|
||||
const chatInput = queryByRole('textbox');
|
||||
expect(chatInput).not.toHaveAttribute('disabled');
|
||||
});
|
||||
|
||||
it('should enable chat input after plan rejection', async () => {
|
||||
const nodesPlanMessage = {
|
||||
id: faker.string.uuid(),
|
||||
role: 'assistant' as const,
|
||||
type: 'custom' as const,
|
||||
customType: 'nodesPlan',
|
||||
message: 'Plan message',
|
||||
data: [
|
||||
{
|
||||
nodeType: 'n8n-nodes-base.httpRequest',
|
||||
nodeName: 'HTTP Request',
|
||||
reasoning: 'To fetch data',
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
builderStore.$patch({
|
||||
chatMessages: [nodesPlanMessage],
|
||||
});
|
||||
|
||||
const { queryByRole, getByText } = renderComponent();
|
||||
await flushPromises();
|
||||
|
||||
// Click reject button
|
||||
const rejectButton = getByText('aiAssistant.builder.plan.reject');
|
||||
await fireEvent.click(rejectButton);
|
||||
await flushPromises();
|
||||
|
||||
// Chat should be enabled after rejection
|
||||
const chatInput = queryByRole('textbox');
|
||||
expect(chatInput).not.toHaveAttribute('disabled');
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle multiple canvas generations correctly', async () => {
|
||||
const originalWorkflow = {
|
||||
nodes: [],
|
||||
|
||||
@@ -1,19 +1,16 @@
|
||||
<script lang="ts" setup>
|
||||
import { useBuilderStore } from '@/stores/builder.store';
|
||||
import { useUsersStore } from '@/stores/users.store';
|
||||
import { computed, watch, ref, nextTick } from 'vue';
|
||||
import { computed, watch, ref } from 'vue';
|
||||
import AskAssistantChat from '@n8n/design-system/components/AskAssistantChat/AskAssistantChat.vue';
|
||||
import { useTelemetry } from '@/composables/useTelemetry';
|
||||
import { useI18n } from '@n8n/i18n';
|
||||
import { useWorkflowsStore } from '@/stores/workflows.store';
|
||||
import { useRoute, useRouter } from 'vue-router';
|
||||
import { useWorkflowSaving } from '@/composables/useWorkflowSaving';
|
||||
import type { ChatUI, RatingFeedback } from '@n8n/design-system/types/assistant';
|
||||
import type { RatingFeedback } from '@n8n/design-system/types/assistant';
|
||||
import { isWorkflowUpdatedMessage } from '@n8n/design-system/types/assistant';
|
||||
import { nodeViewEventBus } from '@/event-bus';
|
||||
import type { NodesPlanMessageType } from './NodesPlanMessage.vue';
|
||||
import NodesPlanMessage from './NodesPlanMessage.vue';
|
||||
import { PLAN_APPROVAL_MESSAGE } from '@/constants';
|
||||
|
||||
const emit = defineEmits<{
|
||||
close: [];
|
||||
@@ -31,7 +28,6 @@ const workflowSaver = useWorkflowSaving({ router });
|
||||
// Track processed workflow updates
|
||||
const processedWorkflowUpdates = ref(new Set<string>());
|
||||
const trackedTools = ref(new Set<string>());
|
||||
const planStatus = ref<'pending' | 'approved' | 'rejected'>();
|
||||
const assistantChatRef = ref<InstanceType<typeof AskAssistantChat> | null>(null);
|
||||
const workflowUpdated = ref<{ start: string; end: string } | undefined>();
|
||||
|
||||
@@ -81,40 +77,6 @@ function onFeedback(feedback: RatingFeedback) {
|
||||
}
|
||||
}
|
||||
|
||||
function isNodesPlanMessage(message: ChatUI.AssistantMessage): message is NodesPlanMessageType {
|
||||
return (
|
||||
message.type === 'custom' && message.customType === 'nodesPlan' && Array.isArray(message.data)
|
||||
);
|
||||
}
|
||||
|
||||
function onApprovePlan() {
|
||||
planStatus.value = 'approved';
|
||||
|
||||
telemetry.track('User clicked Approve plan', {
|
||||
session_id: builderStore.trackingSessionId,
|
||||
});
|
||||
|
||||
void onUserMessage(PLAN_APPROVAL_MESSAGE);
|
||||
}
|
||||
|
||||
function onRequestChanges() {
|
||||
planStatus.value = 'rejected';
|
||||
|
||||
telemetry.track('User clicked Request changes', {
|
||||
session_id: builderStore.trackingSessionId,
|
||||
});
|
||||
|
||||
// Focus the input after rejecting the plan
|
||||
void nextTick(() => {
|
||||
assistantChatRef.value?.focusInput();
|
||||
});
|
||||
}
|
||||
|
||||
function shouldShowPlanControls(message: NodesPlanMessageType) {
|
||||
const planMessageIndex = builderStore.chatMessages.findIndex((msg) => msg.id === message.id);
|
||||
return planMessageIndex === builderStore.chatMessages.length - 1;
|
||||
}
|
||||
|
||||
function dedupeToolNames(toolNames: string[]): string[] {
|
||||
return [...new Set(toolNames)];
|
||||
}
|
||||
@@ -175,12 +137,6 @@ watch(
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Check if last message is a plan message and if so, whether to show controls
|
||||
const lastMessage = builderStore.chatMessages[builderStore.chatMessages.length - 1];
|
||||
if (lastMessage && isNodesPlanMessage(lastMessage)) {
|
||||
planStatus.value = 'pending';
|
||||
}
|
||||
},
|
||||
{ deep: true },
|
||||
);
|
||||
@@ -229,7 +185,6 @@ watch(currentRoute, () => {
|
||||
:user="user"
|
||||
:messages="builderStore.chatMessages"
|
||||
:streaming="builderStore.streaming"
|
||||
:disabled="planStatus === 'pending'"
|
||||
:loading-message="loadingMessage"
|
||||
:mode="i18n.baseText('aiAssistant.builder.mode')"
|
||||
:title="'n8n AI'"
|
||||
@@ -245,16 +200,6 @@ watch(currentRoute, () => {
|
||||
<template #header>
|
||||
<slot name="header" />
|
||||
</template>
|
||||
<template #custom-message="{ message, ...props }">
|
||||
<NodesPlanMessage
|
||||
v-if="message.customType === 'nodesPlan' && isNodesPlanMessage(message)"
|
||||
:message="message"
|
||||
:show-controls="shouldShowPlanControls(message)"
|
||||
v-bind="props"
|
||||
@approve-plan="onApprovePlan"
|
||||
@request-changes="onRequestChanges"
|
||||
/>
|
||||
</template>
|
||||
<template #placeholder>
|
||||
<n8n-text :class="$style.topText">{{
|
||||
i18n.baseText('aiAssistant.builder.placeholder')
|
||||
|
||||
@@ -1,134 +0,0 @@
|
||||
<!-- eslint-disable import-x/extensions -->
|
||||
<script setup lang="ts">
|
||||
import NodeIcon from '@/components/NodeIcon.vue';
|
||||
import { useNodeTypesStore } from '@/stores/nodeTypes.store';
|
||||
import type { ChatUI, RatingFeedback } from '@n8n/design-system';
|
||||
import BaseMessage from '@n8n/design-system/components/AskAssistantChat/messages/BaseMessage.vue';
|
||||
import { useI18n } from '@n8n/i18n';
|
||||
import { ref } from 'vue';
|
||||
|
||||
interface NodesPlan {
|
||||
nodeType: string;
|
||||
nodeName: string;
|
||||
reasoning: string;
|
||||
}
|
||||
|
||||
export type NodesPlanMessageType = ChatUI.CustomMessage & {
|
||||
data: NodesPlan[];
|
||||
};
|
||||
|
||||
interface Props {
|
||||
message: NodesPlanMessageType;
|
||||
showControls?: boolean;
|
||||
isFirstOfRole: boolean;
|
||||
user?: {
|
||||
firstName: string;
|
||||
lastName: string;
|
||||
};
|
||||
streaming?: boolean;
|
||||
isLastMessage?: boolean;
|
||||
}
|
||||
|
||||
const props = defineProps<Props>();
|
||||
|
||||
const nodeTypesStore = useNodeTypesStore();
|
||||
const i18n = useI18n();
|
||||
const showControlsLocal = ref(props.showControls ?? true);
|
||||
const changesRequested = ref(false);
|
||||
|
||||
const emit = defineEmits<{
|
||||
approvePlan: [];
|
||||
requestChanges: [];
|
||||
feedback: [RatingFeedback];
|
||||
}>();
|
||||
|
||||
function onApprovePlan() {
|
||||
showControlsLocal.value = false;
|
||||
emit('approvePlan');
|
||||
}
|
||||
|
||||
function onRequestChanges() {
|
||||
showControlsLocal.value = false;
|
||||
changesRequested.value = true;
|
||||
emit('requestChanges');
|
||||
}
|
||||
</script>
|
||||
|
||||
<template>
|
||||
<BaseMessage
|
||||
:class="$style.message"
|
||||
:message="message"
|
||||
:is-first-of-role="true"
|
||||
:user="user"
|
||||
@feedback="(feedback) => emit('feedback', feedback)"
|
||||
>
|
||||
{{ message.message }}
|
||||
<ol :class="$style.nodes">
|
||||
<li v-for="(node, index) in message.data" :key="index" :class="$style.node">
|
||||
<n8n-tooltip placement="left" :show-after="300">
|
||||
<template #content>
|
||||
{{ node.reasoning }}
|
||||
</template>
|
||||
<div :class="$style.node">
|
||||
<NodeIcon
|
||||
:class="$style.nodeIcon"
|
||||
:node-type="nodeTypesStore.getNodeType(node.nodeType)"
|
||||
:node-name="node.nodeName"
|
||||
:show-tooltip="false"
|
||||
:size="12"
|
||||
/>
|
||||
<span>{{ node.nodeName }}</span>
|
||||
</div>
|
||||
</n8n-tooltip>
|
||||
</li>
|
||||
</ol>
|
||||
<template v-if="showControls && showControlsLocal && !streaming">
|
||||
{{ i18n.baseText('aiAssistant.builder.plan.intro') }}
|
||||
<div :class="$style.controls">
|
||||
<n8n-button type="primary" @click="onApprovePlan">{{
|
||||
i18n.baseText('aiAssistant.builder.plan.approve')
|
||||
}}</n8n-button>
|
||||
<n8n-button type="secondary" @click="onRequestChanges">{{
|
||||
i18n.baseText('aiAssistant.builder.plan.reject')
|
||||
}}</n8n-button>
|
||||
</div>
|
||||
</template>
|
||||
<template v-if="changesRequested">
|
||||
<span class="mb-m">{{ i18n.baseText('aiAssistant.builder.plan.whatToChange') }}</span>
|
||||
</template>
|
||||
</BaseMessage>
|
||||
</template>
|
||||
|
||||
<style lang="scss" module>
|
||||
.message {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
.nodes {
|
||||
list-style: none;
|
||||
padding: var(--spacing-2xs);
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: var(--spacing-2xs);
|
||||
}
|
||||
.nodeIcon {
|
||||
padding: var(--spacing-3xs);
|
||||
border-radius: var(--border-radius-base);
|
||||
border: 1px solid var(--color-foreground-base);
|
||||
display: inline-flex;
|
||||
}
|
||||
.node {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: var(--spacing-3xs);
|
||||
font-size: var(--font-size-2xs);
|
||||
}
|
||||
.controls {
|
||||
display: flex;
|
||||
gap: var(--spacing-2xs);
|
||||
margin-top: var(--spacing-xs);
|
||||
}
|
||||
.followUpMessage {
|
||||
margin-top: var(--spacing-m);
|
||||
}
|
||||
</style>
|
||||
@@ -1,12 +1,7 @@
|
||||
import type { ChatUI } from '@n8n/design-system/types/assistant';
|
||||
import type { ChatRequest } from '@/types/assistant.types';
|
||||
import { useI18n } from '@n8n/i18n';
|
||||
import {
|
||||
isTextMessage,
|
||||
isWorkflowUpdatedMessage,
|
||||
isToolMessage,
|
||||
isPlanMessage,
|
||||
} from '@/types/assistant.types';
|
||||
import { isTextMessage, isWorkflowUpdatedMessage, isToolMessage } from '@/types/assistant.types';
|
||||
|
||||
export interface MessageProcessingResult {
|
||||
messages: ChatUI.AssistantMessage[];
|
||||
@@ -124,18 +119,6 @@ export function useBuilderMessages() {
|
||||
// Don't clear thinking for workflow updates - they're just state changes
|
||||
} else if (isToolMessage(msg)) {
|
||||
processToolMessage(messages, msg, messageId);
|
||||
} else if (isPlanMessage(msg)) {
|
||||
// Add new plan message
|
||||
messages.push({
|
||||
id: messageId,
|
||||
role: 'assistant',
|
||||
type: 'custom',
|
||||
customType: 'nodesPlan',
|
||||
message: msg.message,
|
||||
data: msg.plan,
|
||||
} satisfies ChatUI.CustomMessage);
|
||||
// Plan messages are informational, clear thinking
|
||||
shouldClearThinking = true;
|
||||
} else if ('type' in msg && msg.type === 'error' && 'content' in msg) {
|
||||
// Handle error messages from the API
|
||||
// API sends error messages with type: 'error' and content field
|
||||
@@ -390,17 +373,6 @@ export function useBuilderMessages() {
|
||||
} satisfies ChatUI.AssistantMessage;
|
||||
}
|
||||
|
||||
if (isPlanMessage(message)) {
|
||||
return {
|
||||
id,
|
||||
role: 'assistant',
|
||||
type: 'custom',
|
||||
customType: 'nodesPlan',
|
||||
data: message.plan,
|
||||
message: message.message,
|
||||
} satisfies ChatUI.CustomMessage;
|
||||
}
|
||||
|
||||
// Handle event messages
|
||||
if ('type' in message && message.type === 'event') {
|
||||
return {
|
||||
|
||||
@@ -166,17 +166,6 @@ export namespace ChatRequest {
|
||||
}
|
||||
|
||||
// API-only types
|
||||
export interface PlanMessage {
|
||||
role: 'assistant';
|
||||
type: 'plan';
|
||||
plan: Array<{
|
||||
nodeType: string;
|
||||
nodeName: string;
|
||||
reasoning: string;
|
||||
}>;
|
||||
message?: string; // For plan-review messages
|
||||
}
|
||||
|
||||
export type MessageResponse =
|
||||
| ((
|
||||
| TextMessage
|
||||
@@ -187,7 +176,6 @@ export namespace ChatRequest {
|
||||
| ChatUI.WorkflowUpdatedMessage
|
||||
| ToolMessage
|
||||
| ChatUI.ErrorMessage
|
||||
| PlanMessage
|
||||
) & {
|
||||
quickReplies?: ChatUI.QuickReply[];
|
||||
})
|
||||
@@ -277,7 +265,3 @@ export function isEndSessionMessage(
|
||||
): msg is ChatUI.EndSessionMessage {
|
||||
return 'type' in msg && msg.type === 'event' && msg.eventName === 'end-session';
|
||||
}
|
||||
|
||||
export function isPlanMessage(msg: ChatRequest.MessageResponse): msg is ChatRequest.PlanMessage {
|
||||
return 'type' in msg && msg.type === 'plan' && 'plan' in msg && Array.isArray(msg.plan);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user