mirror of
https://github.com/Abdulazizzn/n8n-enterprise-unlocked.git
synced 2025-12-18 10:31:15 +00:00
feat: Optimise langchain calls in batching mode (#15243)
This commit is contained in:
@@ -3,7 +3,7 @@
|
||||
import { FakeChatModel } from '@langchain/core/utils/testing';
|
||||
import { mock } from 'jest-mock-extended';
|
||||
import type { IExecuteFunctions, INode } from 'n8n-workflow';
|
||||
import { NodeConnectionTypes } from 'n8n-workflow';
|
||||
import { NodeApiError, NodeConnectionTypes } from 'n8n-workflow';
|
||||
|
||||
import * as helperModule from '@utils/helpers';
|
||||
import * as outputParserModule from '@utils/output_parsers/N8nOutputParser';
|
||||
@@ -191,6 +191,148 @@ describe('ChainLlm Node', () => {
|
||||
expect(result[0]).toHaveLength(2);
|
||||
});
|
||||
|
||||
describe('batching (version 1.7+)', () => {
|
||||
beforeEach(() => {
|
||||
mockExecuteFunction.getNode.mockReturnValue({
|
||||
name: 'Chain LLM',
|
||||
typeVersion: 1.7,
|
||||
parameters: {},
|
||||
} as INode);
|
||||
});
|
||||
|
||||
it('should process items in batches with default settings', async () => {
|
||||
mockExecuteFunction.getInputData.mockReturnValue([
|
||||
{ json: { item: 1 } },
|
||||
{ json: { item: 2 } },
|
||||
{ json: { item: 3 } },
|
||||
]);
|
||||
|
||||
mockExecuteFunction.getNodeParameter.mockImplementation(
|
||||
(param, _itemIndex, defaultValue) => {
|
||||
if (param === 'messages.messageValues') return [];
|
||||
return defaultValue;
|
||||
},
|
||||
);
|
||||
|
||||
(helperModule.getPromptInputByType as jest.Mock)
|
||||
.mockReturnValueOnce('Test prompt 1')
|
||||
.mockReturnValueOnce('Test prompt 2')
|
||||
.mockReturnValueOnce('Test prompt 3');
|
||||
|
||||
(executeChainModule.executeChain as jest.Mock)
|
||||
.mockResolvedValueOnce(['Response 1'])
|
||||
.mockResolvedValueOnce(['Response 2'])
|
||||
.mockResolvedValueOnce(['Response 3']);
|
||||
|
||||
const result = await node.execute.call(mockExecuteFunction);
|
||||
|
||||
expect(executeChainModule.executeChain).toHaveBeenCalledTimes(3);
|
||||
expect(result[0]).toHaveLength(3);
|
||||
});
|
||||
|
||||
it('should process items in smaller batches', async () => {
|
||||
mockExecuteFunction.getInputData.mockReturnValue([
|
||||
{ json: { item: 1 } },
|
||||
{ json: { item: 2 } },
|
||||
{ json: { item: 3 } },
|
||||
{ json: { item: 4 } },
|
||||
]);
|
||||
|
||||
mockExecuteFunction.getNodeParameter.mockImplementation(
|
||||
(param, _itemIndex, defaultValue) => {
|
||||
if (param === 'batching.batchSize') return 2;
|
||||
if (param === 'batching.delayBetweenBatches') return 0;
|
||||
if (param === 'messages.messageValues') return [];
|
||||
return defaultValue;
|
||||
},
|
||||
);
|
||||
|
||||
(helperModule.getPromptInputByType as jest.Mock)
|
||||
.mockReturnValueOnce('Test prompt 1')
|
||||
.mockReturnValueOnce('Test prompt 2')
|
||||
.mockReturnValueOnce('Test prompt 3')
|
||||
.mockReturnValueOnce('Test prompt 4');
|
||||
|
||||
(executeChainModule.executeChain as jest.Mock)
|
||||
.mockResolvedValueOnce(['Response 1'])
|
||||
.mockResolvedValueOnce(['Response 2'])
|
||||
.mockResolvedValueOnce(['Response 3'])
|
||||
.mockResolvedValueOnce(['Response 4']);
|
||||
|
||||
const result = await node.execute.call(mockExecuteFunction);
|
||||
|
||||
expect(executeChainModule.executeChain).toHaveBeenCalledTimes(4);
|
||||
expect(result[0]).toHaveLength(4);
|
||||
});
|
||||
|
||||
it('should handle errors in batches with continueOnFail', async () => {
|
||||
mockExecuteFunction.getInputData.mockReturnValue([
|
||||
{ json: { item: 1 } },
|
||||
{ json: { item: 2 } },
|
||||
]);
|
||||
|
||||
mockExecuteFunction.getNodeParameter.mockImplementation(
|
||||
(param, _itemIndex, defaultValue) => {
|
||||
if (param === 'batching.batchSize') return 2;
|
||||
if (param === 'batching.delayBetweenBatches') return 0;
|
||||
if (param === 'messages.messageValues') return [];
|
||||
return defaultValue;
|
||||
},
|
||||
);
|
||||
|
||||
mockExecuteFunction.continueOnFail.mockReturnValue(true);
|
||||
|
||||
(helperModule.getPromptInputByType as jest.Mock)
|
||||
.mockReturnValueOnce('Test prompt 1')
|
||||
.mockReturnValueOnce('Test prompt 2');
|
||||
|
||||
(executeChainModule.executeChain as jest.Mock)
|
||||
.mockResolvedValueOnce(['Response 1'])
|
||||
.mockRejectedValueOnce(new Error('Test error'));
|
||||
|
||||
const result = await node.execute.call(mockExecuteFunction);
|
||||
|
||||
expect(result[0]).toHaveLength(2);
|
||||
expect(result[0][1].json).toEqual({ error: 'Test error' });
|
||||
});
|
||||
|
||||
it('should handle OpenAI rate limit errors in batches', async () => {
|
||||
mockExecuteFunction.getInputData.mockReturnValue([
|
||||
{ json: { item: 1 } },
|
||||
{ json: { item: 2 } },
|
||||
]);
|
||||
|
||||
mockExecuteFunction.getNodeParameter.mockImplementation(
|
||||
(param, _itemIndex, defaultValue) => {
|
||||
if (param === 'batching.batchSize') return 2;
|
||||
if (param === 'batching.delayBetweenBatches') return 0;
|
||||
if (param === 'messages.messageValues') return [];
|
||||
return defaultValue;
|
||||
},
|
||||
);
|
||||
|
||||
mockExecuteFunction.continueOnFail.mockReturnValue(true);
|
||||
|
||||
(helperModule.getPromptInputByType as jest.Mock)
|
||||
.mockReturnValueOnce('Test prompt 1')
|
||||
.mockReturnValueOnce('Test prompt 2');
|
||||
|
||||
const openAiError = new NodeApiError(mockExecuteFunction.getNode(), {
|
||||
message: 'Rate limit exceeded',
|
||||
cause: { error: { code: 'rate_limit_exceeded' } },
|
||||
});
|
||||
|
||||
(executeChainModule.executeChain as jest.Mock)
|
||||
.mockResolvedValueOnce(['Response 1'])
|
||||
.mockRejectedValueOnce(openAiError);
|
||||
|
||||
const result = await node.execute.call(mockExecuteFunction);
|
||||
|
||||
expect(result[0]).toHaveLength(2);
|
||||
expect(result[0][1].json).toEqual({ error: expect.stringContaining('Rate limit') });
|
||||
});
|
||||
});
|
||||
|
||||
it('should unwrap object responses when node version is 1.6 or higher', async () => {
|
||||
mockExecuteFunction.getNode.mockReturnValue({
|
||||
name: 'Chain LLM',
|
||||
|
||||
Reference in New Issue
Block a user