mirror of
https://github.com/Abdulazizzn/n8n-enterprise-unlocked.git
synced 2025-12-18 02:21:13 +00:00
feat: Optimise langchain calls in batching mode (#15243)
This commit is contained in:
@@ -0,0 +1,54 @@
|
||||
import type { BaseLanguageModel } from '@langchain/core/language_models/base';
|
||||
import { type IExecuteFunctions, NodeConnectionTypes, NodeOperationError } from 'n8n-workflow';
|
||||
|
||||
import { getPromptInputByType } from '@utils/helpers';
|
||||
import { getOptionalOutputParser } from '@utils/output_parsers/N8nOutputParser';
|
||||
|
||||
import { executeChain } from './chainExecutor';
|
||||
import { type MessageTemplate } from './types';
|
||||
|
||||
export const processItem = async (ctx: IExecuteFunctions, itemIndex: number) => {
|
||||
const llm = (await ctx.getInputConnectionData(
|
||||
NodeConnectionTypes.AiLanguageModel,
|
||||
0,
|
||||
)) as BaseLanguageModel;
|
||||
|
||||
// Get output parser if configured
|
||||
const outputParser = await getOptionalOutputParser(ctx);
|
||||
|
||||
// Get user prompt based on node version
|
||||
let prompt: string;
|
||||
|
||||
if (ctx.getNode().typeVersion <= 1.3) {
|
||||
prompt = ctx.getNodeParameter('prompt', itemIndex) as string;
|
||||
} else {
|
||||
prompt = getPromptInputByType({
|
||||
ctx,
|
||||
i: itemIndex,
|
||||
inputKey: 'text',
|
||||
promptTypeKey: 'promptType',
|
||||
});
|
||||
}
|
||||
|
||||
// Validate prompt
|
||||
if (prompt === undefined) {
|
||||
throw new NodeOperationError(ctx.getNode(), "The 'prompt' parameter is empty.");
|
||||
}
|
||||
|
||||
// Get chat messages if configured
|
||||
const messages = ctx.getNodeParameter(
|
||||
'messages.messageValues',
|
||||
itemIndex,
|
||||
[],
|
||||
) as MessageTemplate[];
|
||||
|
||||
// Execute the chain
|
||||
return await executeChain({
|
||||
context: ctx,
|
||||
itemIndex,
|
||||
query: prompt,
|
||||
llm,
|
||||
outputParser,
|
||||
messages,
|
||||
});
|
||||
};
|
||||
Reference in New Issue
Block a user