feat: Optimize langchain calls in batching mode (#15011)

Co-authored-by: कारतोफ्फेलस्क्रिप्ट™ <aditya@netroy.in>
This commit is contained in:
Benjamin Schroth
2025-05-02 17:09:31 +02:00
committed by GitHub
parent a4290dcb78
commit f3e29d25ed
12 changed files with 632 additions and 205 deletions

View File

@@ -2,7 +2,7 @@ import type { BaseLanguageModel } from '@langchain/core/language_models/base';
import { HumanMessage } from '@langchain/core/messages';
import { SystemMessagePromptTemplate, ChatPromptTemplate } from '@langchain/core/prompts';
import { OutputFixingParser, StructuredOutputParser } from 'langchain/output_parsers';
import { NodeConnectionTypes, NodeOperationError } from 'n8n-workflow';
import { NodeConnectionTypes, NodeOperationError, sleep } from 'n8n-workflow';
import type {
IDataObject,
IExecuteFunctions,
@@ -131,6 +131,31 @@ export class SentimentAnalysis implements INodeType {
description:
'Whether to enable auto-fixing (may trigger an additional LLM call if output is broken)',
},
{
displayName: 'Batch Processing',
name: 'batching',
type: 'collection',
description: 'Batch processing options for rate limiting',
default: {},
options: [
{
displayName: 'Batch Size',
name: 'batchSize',
default: 100,
type: 'number',
description:
'How many items to process in parallel. This is useful for rate limiting.',
},
{
displayName: 'Delay Between Batches',
name: 'delayBetweenBatches',
default: 0,
type: 'number',
description:
'Delay in milliseconds between batches. This is useful for rate limiting.',
},
],
},
],
},
],
@@ -145,12 +170,21 @@ export class SentimentAnalysis implements INodeType {
)) as BaseLanguageModel;
const returnData: INodeExecutionData[][] = [];
const { batchSize, delayBetweenBatches } = this.getNodeParameter('options.batching', 0, {
batchSize: 100,
delayBetweenBatches: 0,
}) as {
batchSize: number;
delayBetweenBatches: number;
};
for (let i = 0; i < items.length; i++) {
try {
for (let i = 0; i < items.length; i += batchSize) {
const batch = items.slice(i, i + batchSize);
const batchPromises = batch.map(async (_item, batchItemIndex) => {
const itemIndex = i + batchItemIndex;
const sentimentCategories = this.getNodeParameter(
'options.categories',
i,
itemIndex,
DEFAULT_CATEGORIES,
) as string;
@@ -160,9 +194,13 @@ export class SentimentAnalysis implements INodeType {
.filter(Boolean);
if (categories.length === 0) {
throw new NodeOperationError(this.getNode(), 'No sentiment categories provided', {
itemIndex: i,
});
return {
result: null,
itemIndex,
error: new NodeOperationError(this.getNode(), 'No sentiment categories provided', {
itemIndex,
}),
};
}
// Initialize returnData with empty arrays for each category
@@ -170,7 +208,7 @@ export class SentimentAnalysis implements INodeType {
returnData.push(...Array.from({ length: categories.length }, () => []));
}
const options = this.getNodeParameter('options', i, {}) as {
const options = this.getNodeParameter('options', itemIndex, {}) as {
systemPromptTemplate?: string;
includeDetailedResults?: boolean;
enableAutoFixing?: boolean;
@@ -194,10 +232,10 @@ export class SentimentAnalysis implements INodeType {
const systemPromptTemplate = SystemMessagePromptTemplate.fromTemplate(
`${options.systemPromptTemplate ?? DEFAULT_SYSTEM_PROMPT_TEMPLATE}
{format_instructions}`,
{format_instructions}`,
);
const input = this.getNodeParameter('inputText', i) as string;
const input = this.getNodeParameter('inputText', itemIndex) as string;
const inputPrompt = new HumanMessage(input);
const messages = [
await systemPromptTemplate.format({
@@ -217,7 +255,7 @@ export class SentimentAnalysis implements INodeType {
);
if (sentimentIndex !== -1) {
const resultItem = { ...items[i] };
const resultItem = { ...items[itemIndex] };
const sentimentAnalysis: IDataObject = {
category: output.sentiment,
};
@@ -229,27 +267,59 @@ export class SentimentAnalysis implements INodeType {
...resultItem.json,
sentimentAnalysis,
};
returnData[sentimentIndex].push(resultItem);
return {
result: {
resultItem,
sentimentIndex,
},
itemIndex,
};
}
return {
result: {},
itemIndex,
};
} catch (error) {
throw new NodeOperationError(
this.getNode(),
'Error during parsing of LLM output, please check your LLM model and configuration',
{
itemIndex: i,
},
);
return {
result: null,
itemIndex,
error: new NodeOperationError(
this.getNode(),
'Error during parsing of LLM output, please check your LLM model and configuration',
{
itemIndex,
},
),
};
}
} catch (error) {
if (this.continueOnFail()) {
const executionErrorData = this.helpers.constructExecutionMetaData(
this.helpers.returnJsonArray({ error: error.message }),
{ itemData: { item: i } },
);
returnData[0].push(...executionErrorData);
continue;
});
const batchResults = await Promise.all(batchPromises);
batchResults.forEach(({ result, itemIndex, error }) => {
if (error) {
if (this.continueOnFail()) {
const executionErrorData = this.helpers.constructExecutionMetaData(
this.helpers.returnJsonArray({ error: error.message }),
{ itemData: { item: itemIndex } },
);
returnData[0].push(...executionErrorData);
return;
} else {
throw error;
}
} else if (result.resultItem && result.sentimentIndex) {
const sentimentIndex = result.sentimentIndex;
const resultItem = result.resultItem;
returnData[sentimentIndex].push(resultItem);
}
throw error;
});
// Add delay between batches if not the last batch
if (i + batchSize < items.length && delayBetweenBatches > 0) {
await sleep(delayBetweenBatches);
}
}
return returnData;