fix(AI Agent Node): Respect context window length in streaming mode (#19567)

This commit is contained in:
Benjamin Schroth
2025-09-16 16:39:44 +02:00
committed by GitHub
parent 0c0188fe40
commit 6b25c570ed
2 changed files with 72 additions and 1 deletions

View File

@@ -265,7 +265,13 @@ export async function toolsAgentExecute(
isStreamingAvailable &&
this.getNode().typeVersion >= 2.1
) {
const chatHistory = await memory?.chatHistory.getMessages();
// Get chat history respecting the context window length configured in memory
let chatHistory;
if (memory) {
// Load memory variables to respect context window length
const memoryVariables = await memory.loadMemoryVariables({});
chatHistory = memoryVariables['chat_history'];
}
const eventStream = executor.streamEvents(
{
...invokeParams,

View File

@@ -6,6 +6,7 @@ import type { ISupplyDataFunctions, IExecuteFunctions, INode } from 'n8n-workflo
import * as helpers from '../../../../../utils/helpers';
import * as outputParserModule from '../../../../../utils/output_parsers/N8nOutputParser';
import * as commonModule from '../../agents/ToolsAgent/common';
import { toolsAgentExecute } from '../../agents/ToolsAgent/V2/execute';
jest.mock('../../../../../utils/output_parsers/N8nOutputParser', () => ({
@@ -13,6 +14,11 @@ jest.mock('../../../../../utils/output_parsers/N8nOutputParser', () => ({
N8nStructuredOutputParser: jest.fn(),
}));
jest.mock('../../agents/ToolsAgent/common', () => ({
...jest.requireActual('../../agents/ToolsAgent/common'),
getOptionalMemory: jest.fn(),
}));
const mockHelpers = mock<IExecuteFunctions['helpers']>();
const mockContext = mock<IExecuteFunctions>({ helpers: mockHelpers });
@@ -620,6 +626,65 @@ describe('toolsAgentExecute', () => {
expect(result[0][0].json.output).toBe('Regular response');
});
it('should respect context window length from memory in streaming mode', async () => {
const mockMemory = {
loadMemoryVariables: jest.fn().mockResolvedValue({
chat_history: [
{ role: 'human', content: 'Message 1' },
{ role: 'ai', content: 'Response 1' },
],
}),
chatHistory: {
getMessages: jest.fn().mockResolvedValue([
{ role: 'human', content: 'Message 1' },
{ role: 'ai', content: 'Response 1' },
{ role: 'human', content: 'Message 2' },
{ role: 'ai', content: 'Response 2' },
]),
},
};
jest.spyOn(commonModule, 'getOptionalMemory').mockResolvedValue(mockMemory as any);
jest.spyOn(helpers, 'getConnectedTools').mockResolvedValue([mock<Tool>()]);
jest.spyOn(outputParserModule, 'getOptionalOutputParser').mockResolvedValue(undefined);
mockContext.isStreaming.mockReturnValue(true);
const mockStreamEvents = async function* () {
yield {
event: 'on_chat_model_stream',
data: {
chunk: {
content: 'Response',
},
},
};
};
const mockExecutor = {
streamEvents: jest.fn().mockReturnValue(mockStreamEvents()),
};
jest.spyOn(AgentExecutor, 'fromAgentAndTools').mockReturnValue(mockExecutor as any);
await toolsAgentExecute.call(mockContext);
// Verify that memory.loadMemoryVariables was called instead of chatHistory.getMessages
expect(mockMemory.loadMemoryVariables).toHaveBeenCalledWith({});
expect(mockMemory.chatHistory.getMessages).not.toHaveBeenCalled();
// Verify that streamEvents was called with the filtered chat history from loadMemoryVariables
expect(mockExecutor.streamEvents).toHaveBeenCalledWith(
expect.objectContaining({
chat_history: [
{ role: 'human', content: 'Message 1' },
{ role: 'ai', content: 'Response 1' },
],
}),
expect.any(Object),
);
});
it('should handle mixed message content types in streaming', async () => {
jest.spyOn(helpers, 'getConnectedTools').mockResolvedValue([mock<Tool>()]);
jest.spyOn(outputParserModule, 'getOptionalOutputParser').mockResolvedValue(undefined);