mirror of
https://github.com/Abdulazizzn/n8n-enterprise-unlocked.git
synced 2025-12-17 18:12:04 +00:00
feat: Optimize langchain calls in batching mode (#15011)
Co-authored-by: कारतोफ्फेलस्क्रिप्ट™ <aditya@netroy.in>
This commit is contained in:
@@ -5,7 +5,7 @@ import type {
|
||||
INodeType,
|
||||
INodeTypeDescription,
|
||||
} from 'n8n-workflow';
|
||||
import { NodeApiError, NodeConnectionTypes, NodeOperationError } from 'n8n-workflow';
|
||||
import { NodeApiError, NodeConnectionTypes, NodeOperationError, sleep } from 'n8n-workflow';
|
||||
|
||||
import { getPromptInputByType } from '@utils/helpers';
|
||||
import { getOptionalOutputParser } from '@utils/output_parsers/N8nOutputParser';
|
||||
@@ -67,19 +67,28 @@ export class ChainLlm implements INodeType {
|
||||
this.logger.debug('Executing Basic LLM Chain');
|
||||
const items = this.getInputData();
|
||||
const returnData: INodeExecutionData[] = [];
|
||||
const { batchSize, delayBetweenBatches } = this.getNodeParameter('batching', 0, {
|
||||
batchSize: 100,
|
||||
delayBetweenBatches: 0,
|
||||
}) as {
|
||||
batchSize: number;
|
||||
delayBetweenBatches: number;
|
||||
};
|
||||
// Get output parser if configured
|
||||
const outputParser = await getOptionalOutputParser(this);
|
||||
|
||||
// Process items in batches
|
||||
for (let i = 0; i < items.length; i += batchSize) {
|
||||
const batch = items.slice(i, i + batchSize);
|
||||
const batchPromises = batch.map(async (_item, batchItemIndex) => {
|
||||
const itemIndex = i + batchItemIndex;
|
||||
|
||||
// Process each input item
|
||||
for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
|
||||
try {
|
||||
// Get the language model
|
||||
const llm = (await this.getInputConnectionData(
|
||||
NodeConnectionTypes.AiLanguageModel,
|
||||
0,
|
||||
)) as BaseLanguageModel;
|
||||
|
||||
// Get output parser if configured
|
||||
const outputParser = await getOptionalOutputParser(this);
|
||||
|
||||
// Get user prompt based on node version
|
||||
let prompt: string;
|
||||
|
||||
@@ -106,44 +115,53 @@ export class ChainLlm implements INodeType {
|
||||
[],
|
||||
) as MessageTemplate[];
|
||||
|
||||
// Execute the chain
|
||||
const responses = await executeChain({
|
||||
return (await executeChain({
|
||||
context: this,
|
||||
itemIndex,
|
||||
query: prompt,
|
||||
llm,
|
||||
outputParser,
|
||||
messages,
|
||||
});
|
||||
})) as object[];
|
||||
});
|
||||
|
||||
// If the node version is 1.6(and LLM is using `response_format: json_object`) or higher or an output parser is configured,
|
||||
// we unwrap the response and return the object directly as JSON
|
||||
const shouldUnwrapObjects = this.getNode().typeVersion >= 1.6 || !!outputParser;
|
||||
// Process each response and add to return data
|
||||
responses.forEach((response) => {
|
||||
returnData.push({
|
||||
json: formatResponse(response, shouldUnwrapObjects),
|
||||
});
|
||||
});
|
||||
} catch (error) {
|
||||
// Handle OpenAI specific rate limit errors
|
||||
if (error instanceof NodeApiError && isOpenAiError(error.cause)) {
|
||||
const openAiErrorCode: string | undefined = (error.cause as any).error?.code;
|
||||
if (openAiErrorCode) {
|
||||
const customMessage = getCustomOpenAiErrorMessage(openAiErrorCode);
|
||||
if (customMessage) {
|
||||
error.message = customMessage;
|
||||
const batchResults = await Promise.allSettled(batchPromises);
|
||||
|
||||
batchResults.forEach((promiseResult, batchItemIndex) => {
|
||||
const itemIndex = i + batchItemIndex;
|
||||
if (promiseResult.status === 'rejected') {
|
||||
const error = promiseResult.reason as Error;
|
||||
// Handle OpenAI specific rate limit errors
|
||||
if (error instanceof NodeApiError && isOpenAiError(error.cause)) {
|
||||
const openAiErrorCode: string | undefined = (error.cause as any).error?.code;
|
||||
if (openAiErrorCode) {
|
||||
const customMessage = getCustomOpenAiErrorMessage(openAiErrorCode);
|
||||
if (customMessage) {
|
||||
error.message = customMessage;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (this.continueOnFail()) {
|
||||
returnData.push({
|
||||
json: { error: error.message },
|
||||
pairedItem: { item: itemIndex },
|
||||
});
|
||||
return;
|
||||
}
|
||||
throw new NodeOperationError(this.getNode(), error);
|
||||
}
|
||||
|
||||
// Continue on failure if configured
|
||||
if (this.continueOnFail()) {
|
||||
returnData.push({ json: { error: error.message }, pairedItem: { item: itemIndex } });
|
||||
continue;
|
||||
}
|
||||
const responses = promiseResult.value;
|
||||
responses.forEach((response: object) => {
|
||||
returnData.push({
|
||||
json: formatResponse(response, this.getNode().typeVersion >= 1.6 || !!outputParser),
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
throw error;
|
||||
if (i + batchSize < items.length && delayBetweenBatches > 0) {
|
||||
await sleep(delayBetweenBatches);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user