mirror of
https://github.com/Abdulazizzn/n8n-enterprise-unlocked.git
synced 2025-12-16 17:46:45 +00:00
feat: Auto-compact workflow builder conversation history (no-changelog) (#18083)
This commit is contained in:
@@ -1,9 +1,28 @@
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import type { BaseMessage } from '@langchain/core/messages';
|
||||
import { AIMessage, HumanMessage } from '@langchain/core/messages';
|
||||
import { PromptTemplate } from '@langchain/core/prompts';
|
||||
import z from 'zod';
|
||||
|
||||
export async function conversationCompactChain(llm: BaseChatModel, messages: BaseMessage[]) {
|
||||
const compactPromptTemplate = PromptTemplate.fromTemplate(
|
||||
`Please summarize the following conversation between a user and an AI assistant building an n8n workflow:
|
||||
|
||||
<previous_summary>
|
||||
{previousSummary}
|
||||
</previous_summary>
|
||||
|
||||
<conversation>
|
||||
{conversationText}
|
||||
</conversation>
|
||||
|
||||
Provide a structured summary that captures the key points, decisions made, current state of the workflow, and suggested next steps.`,
|
||||
);
|
||||
|
||||
export async function conversationCompactChain(
|
||||
llm: BaseChatModel,
|
||||
messages: BaseMessage[],
|
||||
previousSummary: string = '',
|
||||
) {
|
||||
// Use structured output for consistent summary format
|
||||
const CompactedSession = z.object({
|
||||
summary: z.string().describe('A concise summary of the conversation so far'),
|
||||
@@ -21,25 +40,26 @@ export async function conversationCompactChain(llm: BaseChatModel, messages: Bas
|
||||
// eslint-disable-next-line @typescript-eslint/no-base-to-string, @typescript-eslint/restrict-template-expressions
|
||||
return `User: ${msg.content}`;
|
||||
} else if (msg instanceof AIMessage) {
|
||||
// eslint-disable-next-line @typescript-eslint/no-base-to-string, @typescript-eslint/restrict-template-expressions
|
||||
return `Assistant: ${msg.content ?? 'Used tools'}`;
|
||||
if (typeof msg.content === 'string') {
|
||||
return `Assistant: ${msg.content}`;
|
||||
} else {
|
||||
return 'Assistant: Used tools';
|
||||
}
|
||||
}
|
||||
|
||||
return '';
|
||||
})
|
||||
.filter(Boolean)
|
||||
.join('\n');
|
||||
|
||||
const compactPrompt = `Please summarize the following conversation between a user and an AI assistant building an n8n workflow:
|
||||
|
||||
${conversationText}
|
||||
|
||||
Provide a structured summary that captures the key points, decisions made, current state of the workflow, and suggested next steps.`;
|
||||
const compactPrompt = await compactPromptTemplate.invoke({
|
||||
previousSummary,
|
||||
conversationText,
|
||||
});
|
||||
|
||||
const structuredOutput = await modelWithStructure.invoke(compactPrompt);
|
||||
|
||||
// Create a new compacted message
|
||||
const compactedMessage = new AIMessage({
|
||||
content: `## Previous Conversation Summary
|
||||
const formattedSummary = `## Previous Conversation Summary
|
||||
|
||||
**Summary:** ${structuredOutput.summary}
|
||||
|
||||
@@ -48,17 +68,11 @@ ${(structuredOutput.key_decisions as string[]).map((d: string) => `- ${d}`).join
|
||||
|
||||
**Current State:** ${structuredOutput.current_state}
|
||||
|
||||
**Next Steps:** ${structuredOutput.next_steps}`,
|
||||
});
|
||||
|
||||
// Keep only the last message(request to compact from user) plus the summary
|
||||
const lastUserMessage = messages.slice(-1);
|
||||
const newMessages = [lastUserMessage[0], compactedMessage];
|
||||
**Next Steps:** ${structuredOutput.next_steps}`;
|
||||
|
||||
return {
|
||||
success: true,
|
||||
summary: structuredOutput,
|
||||
newMessages,
|
||||
messagesRemoved: messages.length - newMessages.length,
|
||||
summaryPlain: formattedSummary,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -0,0 +1,226 @@
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { AIMessage, HumanMessage, ToolMessage } from '@langchain/core/messages';
|
||||
import type { BaseMessage } from '@langchain/core/messages';
|
||||
import { FakeListChatModel } from '@langchain/core/utils/testing';
|
||||
|
||||
import { conversationCompactChain } from '../conversation-compact';
|
||||
|
||||
// Mock structured output for testing
|
||||
class MockStructuredLLM extends FakeListChatModel {
|
||||
private readonly structuredResponse: Record<string, unknown>;
|
||||
|
||||
constructor(response: Record<string, unknown>) {
|
||||
super({ responses: ['mock'] });
|
||||
this.structuredResponse = response;
|
||||
}
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
withStructuredOutput(): any {
|
||||
return {
|
||||
invoke: async () => this.structuredResponse,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
describe('conversationCompactChain', () => {
|
||||
let fakeLLM: BaseChatModel;
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('Basic functionality', () => {
|
||||
it('should summarize a conversation without previous summary', async () => {
|
||||
fakeLLM = new MockStructuredLLM({
|
||||
summary: 'Test summary of the conversation',
|
||||
key_decisions: ['Decision 1', 'Decision 2'],
|
||||
current_state: 'Current workflow state',
|
||||
next_steps: 'Suggested next steps',
|
||||
});
|
||||
|
||||
const messages: BaseMessage[] = [
|
||||
new HumanMessage('Create a workflow'),
|
||||
new AIMessage('I will help you create a workflow'),
|
||||
new HumanMessage('Add an HTTP node'),
|
||||
new AIMessage('Added HTTP node'),
|
||||
];
|
||||
|
||||
const result = await conversationCompactChain(fakeLLM, messages);
|
||||
|
||||
expect(result.success).toBe(true);
|
||||
expect(result.summary).toEqual({
|
||||
summary: 'Test summary of the conversation',
|
||||
key_decisions: ['Decision 1', 'Decision 2'],
|
||||
current_state: 'Current workflow state',
|
||||
next_steps: 'Suggested next steps',
|
||||
});
|
||||
|
||||
expect(result.summaryPlain).toContain('## Previous Conversation Summary');
|
||||
expect(result.summaryPlain).toContain('**Summary:** Test summary of the conversation');
|
||||
expect(result.summaryPlain).toContain('- Decision 1');
|
||||
expect(result.summaryPlain).toContain('- Decision 2');
|
||||
expect(result.summaryPlain).toContain('**Current State:** Current workflow state');
|
||||
expect(result.summaryPlain).toContain('**Next Steps:** Suggested next steps');
|
||||
});
|
||||
|
||||
it('should include previous summary when provided', async () => {
|
||||
fakeLLM = new MockStructuredLLM({
|
||||
summary: 'Continued conversation summary',
|
||||
key_decisions: ['Previous decision', 'New decision'],
|
||||
current_state: 'Updated workflow state',
|
||||
next_steps: 'Continue with next steps',
|
||||
});
|
||||
|
||||
const previousSummary = 'This is a previous summary of earlier conversation';
|
||||
const messages: BaseMessage[] = [
|
||||
new HumanMessage('Continue with the workflow'),
|
||||
new AIMessage('Continuing from where we left off'),
|
||||
];
|
||||
|
||||
const result = await conversationCompactChain(fakeLLM, messages, previousSummary);
|
||||
|
||||
expect(result.success).toBe(true);
|
||||
expect(result.summary.summary).toBe('Continued conversation summary');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Message formatting', () => {
|
||||
beforeEach(() => {
|
||||
fakeLLM = new MockStructuredLLM({
|
||||
summary: 'Message formatting test',
|
||||
key_decisions: [],
|
||||
current_state: 'Test state',
|
||||
next_steps: 'Test steps',
|
||||
});
|
||||
});
|
||||
|
||||
it('should format HumanMessages correctly', async () => {
|
||||
const messages: BaseMessage[] = [
|
||||
new HumanMessage('User message 1'),
|
||||
new HumanMessage('User message 2'),
|
||||
];
|
||||
|
||||
const result = await conversationCompactChain(fakeLLM, messages);
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
|
||||
it('should format AIMessages with string content correctly', async () => {
|
||||
const messages: BaseMessage[] = [
|
||||
new AIMessage('Assistant response 1'),
|
||||
new AIMessage('Assistant response 2'),
|
||||
];
|
||||
|
||||
const result = await conversationCompactChain(fakeLLM, messages);
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle AIMessages with non-string content', async () => {
|
||||
const messages: BaseMessage[] = [
|
||||
new AIMessage({ content: 'structured', additional_kwargs: {} }),
|
||||
new AIMessage('Plain message'),
|
||||
];
|
||||
|
||||
// The function should handle both object and string content
|
||||
const result = await conversationCompactChain(fakeLLM, messages);
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
|
||||
it('should filter out ToolMessages and other message types', async () => {
|
||||
const messages: BaseMessage[] = [
|
||||
new HumanMessage('User message'),
|
||||
new ToolMessage({ content: 'Tool output', tool_call_id: 'tool-1' }),
|
||||
new AIMessage('Assistant message'),
|
||||
];
|
||||
|
||||
// ToolMessages should be filtered out during processing
|
||||
const result = await conversationCompactChain(fakeLLM, messages);
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle empty messages array', async () => {
|
||||
const messages: BaseMessage[] = [];
|
||||
|
||||
const result = await conversationCompactChain(fakeLLM, messages);
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle messages with empty content', async () => {
|
||||
const messages: BaseMessage[] = [
|
||||
new HumanMessage(''),
|
||||
new AIMessage(''),
|
||||
new HumanMessage('Valid message'),
|
||||
];
|
||||
|
||||
const result = await conversationCompactChain(fakeLLM, messages);
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Structured output', () => {
|
||||
it('should format the structured output correctly', async () => {
|
||||
fakeLLM = new MockStructuredLLM({
|
||||
summary: 'Workflow creation initiated',
|
||||
key_decisions: ['Use HTTP node', 'Add authentication', 'Set up error handling'],
|
||||
current_state: 'Workflow has HTTP node configured',
|
||||
next_steps: 'Add data transformation node',
|
||||
});
|
||||
|
||||
const messages: BaseMessage[] = [new HumanMessage('Create workflow')];
|
||||
|
||||
const result = await conversationCompactChain(fakeLLM, messages);
|
||||
|
||||
expect(result.summaryPlain).toBe(
|
||||
`## Previous Conversation Summary
|
||||
|
||||
**Summary:** Workflow creation initiated
|
||||
|
||||
**Key Decisions:**
|
||||
- Use HTTP node
|
||||
- Add authentication
|
||||
- Set up error handling
|
||||
|
||||
**Current State:** Workflow has HTTP node configured
|
||||
|
||||
**Next Steps:** Add data transformation node`,
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle empty key_decisions array', async () => {
|
||||
fakeLLM = new MockStructuredLLM({
|
||||
summary: 'Test summary',
|
||||
key_decisions: [],
|
||||
current_state: 'Test state',
|
||||
next_steps: 'Test steps',
|
||||
});
|
||||
|
||||
const messages: BaseMessage[] = [new HumanMessage('Test')];
|
||||
|
||||
const result = await conversationCompactChain(fakeLLM, messages);
|
||||
|
||||
expect(result.summaryPlain).toContain('**Key Decisions:**\n');
|
||||
expect(result.summary.key_decisions).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Error handling', () => {
|
||||
it('should propagate LLM errors', async () => {
|
||||
class ErrorLLM extends FakeListChatModel {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
withStructuredOutput(): any {
|
||||
return {
|
||||
invoke: async () => {
|
||||
throw new Error('LLM invocation failed');
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
const errorLLM = new ErrorLLM({ responses: [] });
|
||||
const messages: BaseMessage[] = [new HumanMessage('Test message')];
|
||||
|
||||
await expect(conversationCompactChain(errorLLM, messages)).rejects.toThrow(
|
||||
'LLM invocation failed',
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,3 +1,3 @@
|
||||
export const MAX_AI_BUILDER_PROMPT_LENGTH = 1000; // characters
|
||||
|
||||
export const MAX_USER_MESSAGES = 10; // Maximum number of user messages to keep in the state
|
||||
export const DEFAULT_AUTO_COMPACT_THRESHOLD_TOKENS = 20_000; // Tokens threshold for auto-compacting the conversation
|
||||
|
||||
@@ -359,6 +359,12 @@ const currentExecutionNodesSchemas = `
|
||||
<current_execution_nodes_schemas>
|
||||
{executionSchema}
|
||||
</current_execution_nodes_schemas>`;
|
||||
|
||||
const previousConversationSummary = `
|
||||
<previous_summary>
|
||||
{previousSummary}
|
||||
</previous_summary>`;
|
||||
|
||||
export const mainAgentPrompt = ChatPromptTemplate.fromMessages([
|
||||
[
|
||||
'system',
|
||||
@@ -385,6 +391,11 @@ export const mainAgentPrompt = ChatPromptTemplate.fromMessages([
|
||||
text: responsePatterns,
|
||||
cache_control: { type: 'ephemeral' },
|
||||
},
|
||||
{
|
||||
type: 'text',
|
||||
text: previousConversationSummary,
|
||||
cache_control: { type: 'ephemeral' },
|
||||
},
|
||||
],
|
||||
],
|
||||
['placeholder', '{messages}'],
|
||||
|
||||
@@ -386,6 +386,7 @@ describe('operations-processor', () => {
|
||||
workflowOperations,
|
||||
messages: [],
|
||||
workflowContext: {},
|
||||
previousSummary: 'EMPTY',
|
||||
});
|
||||
|
||||
it('should process operations and clear them', () => {
|
||||
|
||||
@@ -48,6 +48,7 @@ describe('tool-executor', () => {
|
||||
workflowOperations: null,
|
||||
messages,
|
||||
workflowContext: {},
|
||||
previousSummary: 'EMPTY',
|
||||
});
|
||||
|
||||
// Helper to create mock tool
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
import { AIMessage } from '@langchain/core/messages';
|
||||
|
||||
type AIMessageWithUsageMetadata = AIMessage & {
|
||||
response_metadata: {
|
||||
usage: {
|
||||
input_tokens: number;
|
||||
output_tokens: number;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
export interface TokenUsage {
|
||||
input_tokens: number;
|
||||
output_tokens: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts token usage information from the last AI assistant message
|
||||
*/
|
||||
export function extractLastTokenUsage(messages: unknown[]): TokenUsage | undefined {
|
||||
const lastAiAssistantMessage = messages.findLast(
|
||||
(m): m is AIMessageWithUsageMetadata =>
|
||||
m instanceof AIMessage &&
|
||||
m.response_metadata?.usage !== undefined &&
|
||||
'input_tokens' in m.response_metadata.usage &&
|
||||
'output_tokens' in m.response_metadata.usage,
|
||||
);
|
||||
|
||||
if (!lastAiAssistantMessage) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
return lastAiAssistantMessage.response_metadata.usage;
|
||||
}
|
||||
@@ -12,7 +12,7 @@ import type {
|
||||
NodeExecutionSchema,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { MAX_AI_BUILDER_PROMPT_LENGTH } from '@/constants';
|
||||
import { DEFAULT_AUTO_COMPACT_THRESHOLD_TOKENS, MAX_AI_BUILDER_PROMPT_LENGTH } from '@/constants';
|
||||
|
||||
import { conversationCompactChain } from './chains/conversation-compact';
|
||||
import { LLMServiceError, ValidationError } from './errors';
|
||||
@@ -26,6 +26,7 @@ import { createUpdateNodeParametersTool } from './tools/update-node-parameters.t
|
||||
import type { SimpleWorkflow } from './types/workflow';
|
||||
import { processOperations } from './utils/operations-processor';
|
||||
import { createStreamProcessor, formatMessages } from './utils/stream-processor';
|
||||
import { extractLastTokenUsage } from './utils/token-usage';
|
||||
import { executeToolsInParallel } from './utils/tool-executor';
|
||||
import { WorkflowState } from './workflow-state';
|
||||
|
||||
@@ -36,6 +37,7 @@ export interface WorkflowBuilderAgentConfig {
|
||||
logger?: Logger;
|
||||
checkpointer?: MemorySaver;
|
||||
tracer?: LangChainTracer;
|
||||
autoCompactThresholdTokens?: number;
|
||||
}
|
||||
|
||||
export interface ChatPayload {
|
||||
@@ -54,6 +56,7 @@ export class WorkflowBuilderAgent {
|
||||
private llmComplexTask: BaseChatModel;
|
||||
private logger?: Logger;
|
||||
private tracer?: LangChainTracer;
|
||||
private autoCompactThresholdTokens: number;
|
||||
|
||||
constructor(config: WorkflowBuilderAgentConfig) {
|
||||
this.parsedNodeTypes = config.parsedNodeTypes;
|
||||
@@ -62,6 +65,8 @@ export class WorkflowBuilderAgent {
|
||||
this.logger = config.logger;
|
||||
this.checkpointer = config.checkpointer ?? new MemorySaver();
|
||||
this.tracer = config.tracer;
|
||||
this.autoCompactThresholdTokens =
|
||||
config.autoCompactThresholdTokens ?? DEFAULT_AUTO_COMPACT_THRESHOLD_TOKENS;
|
||||
}
|
||||
|
||||
private createWorkflow() {
|
||||
@@ -97,17 +102,41 @@ export class WorkflowBuilderAgent {
|
||||
return { messages: [response] };
|
||||
};
|
||||
|
||||
const shouldModifyState = ({ messages }: typeof WorkflowState.State) => {
|
||||
const lastMessage = messages[messages.length - 1] as HumanMessage;
|
||||
const shouldAutoCompact = ({ messages }: typeof WorkflowState.State) => {
|
||||
const tokenUsage = extractLastTokenUsage(messages);
|
||||
|
||||
if (lastMessage.content === '/compact') {
|
||||
if (!tokenUsage) {
|
||||
this.logger?.debug('No token usage metadata found');
|
||||
return false;
|
||||
}
|
||||
|
||||
const tokensUsed = tokenUsage.input_tokens + tokenUsage.output_tokens;
|
||||
|
||||
this.logger?.debug('Token usage', {
|
||||
inputTokens: tokenUsage.input_tokens,
|
||||
outputTokens: tokenUsage.output_tokens,
|
||||
totalTokens: tokensUsed,
|
||||
});
|
||||
|
||||
return tokensUsed > this.autoCompactThresholdTokens;
|
||||
};
|
||||
|
||||
const shouldModifyState = (state: typeof WorkflowState.State) => {
|
||||
const { messages } = state;
|
||||
const lastHumanMessage = messages.findLast((m) => m instanceof HumanMessage)!; // There always should be at least one human message in the array
|
||||
|
||||
if (lastHumanMessage.content === '/compact') {
|
||||
return 'compact_messages';
|
||||
}
|
||||
|
||||
if (lastMessage.content === '/clear') {
|
||||
if (lastHumanMessage.content === '/clear') {
|
||||
return 'delete_messages';
|
||||
}
|
||||
|
||||
if (shouldAutoCompact(state)) {
|
||||
return 'auto_compact_messages';
|
||||
}
|
||||
|
||||
return 'agent';
|
||||
};
|
||||
|
||||
@@ -139,17 +168,43 @@ export class WorkflowBuilderAgent {
|
||||
return stateUpdate;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compacts the conversation history by summarizing it
|
||||
* and removing original messages.
|
||||
* Might be triggered manually by the user with `/compact` message, or run automatically
|
||||
* when the conversation history exceeds a certain token limit.
|
||||
*/
|
||||
const compactSession = async (state: typeof WorkflowState.State) => {
|
||||
if (!this.llmSimpleTask) {
|
||||
throw new LLMServiceError('LLM not setup');
|
||||
}
|
||||
const messages = state.messages;
|
||||
const compactedMessages = await conversationCompactChain(this.llmSimpleTask, messages);
|
||||
|
||||
const { messages, previousSummary } = state;
|
||||
const lastHumanMessage = messages[messages.length - 1] as HumanMessage;
|
||||
const isAutoCompact = lastHumanMessage.content !== '/compact';
|
||||
|
||||
this.logger?.debug('Compacting conversation history', {
|
||||
isAutoCompact,
|
||||
});
|
||||
|
||||
const compactedMessages = await conversationCompactChain(
|
||||
this.llmSimpleTask,
|
||||
messages,
|
||||
previousSummary,
|
||||
);
|
||||
|
||||
// The summarized conversation history will become a part of system prompt
|
||||
// and will be used in the next LLM call.
|
||||
// We will remove all messages and replace them with a mock HumanMessage and AIMessage
|
||||
// to indicate that the conversation history has been compacted.
|
||||
// If this is an auto-compact, we will also keep the last human message, as it will continue executing the workflow.
|
||||
return {
|
||||
previousSummary: compactedMessages.summaryPlain,
|
||||
messages: [
|
||||
...messages.map((m) => new RemoveMessage({ id: m.id! })),
|
||||
...compactedMessages.newMessages,
|
||||
new HumanMessage('Please compress the conversation history'),
|
||||
new AIMessage('Successfully compacted conversation history'),
|
||||
...(isAutoCompact ? [new HumanMessage({ content: lastHumanMessage.content })] : []),
|
||||
],
|
||||
};
|
||||
};
|
||||
@@ -160,15 +215,18 @@ export class WorkflowBuilderAgent {
|
||||
.addNode('process_operations', processOperations)
|
||||
.addNode('delete_messages', deleteMessages)
|
||||
.addNode('compact_messages', compactSession)
|
||||
.addNode('auto_compact_messages', compactSession)
|
||||
.addConditionalEdges('__start__', shouldModifyState)
|
||||
.addEdge('tools', 'process_operations')
|
||||
.addEdge('process_operations', 'agent')
|
||||
.addEdge('auto_compact_messages', 'agent')
|
||||
.addEdge('delete_messages', END)
|
||||
.addEdge('compact_messages', END)
|
||||
.addConditionalEdges('agent', shouldContinue);
|
||||
|
||||
return workflow;
|
||||
}
|
||||
|
||||
async getState(workflowId: string, userId?: string) {
|
||||
const workflow = this.createWorkflow();
|
||||
const agent = workflow.compile({ checkpointer: this.checkpointer });
|
||||
@@ -204,6 +262,7 @@ export class WorkflowBuilderAgent {
|
||||
`Message exceeds maximum length of ${MAX_AI_BUILDER_PROMPT_LENGTH} characters`,
|
||||
);
|
||||
}
|
||||
|
||||
const agent = this.createWorkflow().compile({ checkpointer: this.checkpointer });
|
||||
const workflowId = payload.workflowContext?.currentWorkflow?.id;
|
||||
// Generate thread ID from workflowId and userId
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
import type { BaseMessage } from '@langchain/core/messages';
|
||||
import { HumanMessage } from '@langchain/core/messages';
|
||||
import { Annotation, messagesStateReducer } from '@langchain/langgraph';
|
||||
import type { BinaryOperator } from '@langchain/langgraph/dist/channels/binop';
|
||||
|
||||
import { MAX_USER_MESSAGES } from '@/constants';
|
||||
|
||||
import type { SimpleWorkflow, WorkflowOperation } from './types/workflow';
|
||||
import type { ChatPayload } from './workflow-builder-agent';
|
||||
@@ -61,19 +58,9 @@ export function createTrimMessagesReducer(maxUserMessages: number) {
|
||||
};
|
||||
}
|
||||
|
||||
// Utility function to combine multiple message reducers into one.
|
||||
function combineMessageReducers(...reducers: Array<BinaryOperator<BaseMessage[], BaseMessage[]>>) {
|
||||
return (current: BaseMessage[], update: BaseMessage[]): BaseMessage[] => {
|
||||
return reducers.reduce((acc, reducer) => reducer(acc, update), current);
|
||||
};
|
||||
}
|
||||
|
||||
export const WorkflowState = Annotation.Root({
|
||||
messages: Annotation<BaseMessage[]>({
|
||||
reducer: combineMessageReducers(
|
||||
messagesStateReducer,
|
||||
createTrimMessagesReducer(MAX_USER_MESSAGES),
|
||||
),
|
||||
reducer: messagesStateReducer,
|
||||
default: () => [],
|
||||
}),
|
||||
// // The original prompt from the user.
|
||||
@@ -93,4 +80,10 @@ export const WorkflowState = Annotation.Root({
|
||||
workflowContext: Annotation<ChatPayload['workflowContext'] | undefined>({
|
||||
reducer: (x, y) => y ?? x,
|
||||
}),
|
||||
|
||||
// Previous conversation summary (used for compressing long conversations)
|
||||
previousSummary: Annotation<string>({
|
||||
reducer: (x, y) => y ?? x, // Overwrite with the latest summary
|
||||
default: () => 'EMPTY',
|
||||
}),
|
||||
});
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
"@n8n/typescript-config/tsconfig.backend.json"
|
||||
],
|
||||
"compilerOptions": {
|
||||
"target": "es2023",
|
||||
"lib": ["es2023"],
|
||||
"rootDir": ".",
|
||||
"emitDecoratorMetadata": true,
|
||||
"experimentalDecorators": true,
|
||||
|
||||
Reference in New Issue
Block a user