fix: Revert AI nodes batching (#15129)

This commit is contained in:
oleg
2025-05-06 11:18:11 +02:00
committed by GitHub
parent ca0e7ffe3b
commit 939ff97ec4
12 changed files with 205 additions and 632 deletions

View File

@@ -2,7 +2,7 @@ import type { BaseLanguageModel } from '@langchain/core/language_models/base';
import { HumanMessage } from '@langchain/core/messages';
import { SystemMessagePromptTemplate, ChatPromptTemplate } from '@langchain/core/prompts';
import { OutputFixingParser, StructuredOutputParser } from 'langchain/output_parsers';
import { NodeConnectionTypes, NodeOperationError, sleep } from 'n8n-workflow';
import { NodeConnectionTypes, NodeOperationError } from 'n8n-workflow';
import type {
IDataObject,
IExecuteFunctions,
@@ -131,31 +131,6 @@ export class SentimentAnalysis implements INodeType {
description:
'Whether to enable auto-fixing (may trigger an additional LLM call if output is broken)',
},
{
displayName: 'Batch Processing',
name: 'batching',
type: 'collection',
description: 'Batch processing options for rate limiting',
default: {},
options: [
{
displayName: 'Batch Size',
name: 'batchSize',
default: 100,
type: 'number',
description:
'How many items to process in parallel. This is useful for rate limiting.',
},
{
displayName: 'Delay Between Batches',
name: 'delayBetweenBatches',
default: 0,
type: 'number',
description:
'Delay in milliseconds between batches. This is useful for rate limiting.',
},
],
},
],
},
],
@@ -170,21 +145,12 @@ export class SentimentAnalysis implements INodeType {
)) as BaseLanguageModel;
const returnData: INodeExecutionData[][] = [];
const { batchSize, delayBetweenBatches } = this.getNodeParameter('options.batching', 0, {
batchSize: 100,
delayBetweenBatches: 0,
}) as {
batchSize: number;
delayBetweenBatches: number;
};
for (let i = 0; i < items.length; i += batchSize) {
const batch = items.slice(i, i + batchSize);
const batchPromises = batch.map(async (_item, batchItemIndex) => {
const itemIndex = i + batchItemIndex;
for (let i = 0; i < items.length; i++) {
try {
const sentimentCategories = this.getNodeParameter(
'options.categories',
itemIndex,
i,
DEFAULT_CATEGORIES,
) as string;
@@ -194,13 +160,9 @@ export class SentimentAnalysis implements INodeType {
.filter(Boolean);
if (categories.length === 0) {
return {
result: null,
itemIndex,
error: new NodeOperationError(this.getNode(), 'No sentiment categories provided', {
itemIndex,
}),
};
throw new NodeOperationError(this.getNode(), 'No sentiment categories provided', {
itemIndex: i,
});
}
// Initialize returnData with empty arrays for each category
@@ -208,7 +170,7 @@ export class SentimentAnalysis implements INodeType {
returnData.push(...Array.from({ length: categories.length }, () => []));
}
const options = this.getNodeParameter('options', itemIndex, {}) as {
const options = this.getNodeParameter('options', i, {}) as {
systemPromptTemplate?: string;
includeDetailedResults?: boolean;
enableAutoFixing?: boolean;
@@ -232,10 +194,10 @@ export class SentimentAnalysis implements INodeType {
const systemPromptTemplate = SystemMessagePromptTemplate.fromTemplate(
`${options.systemPromptTemplate ?? DEFAULT_SYSTEM_PROMPT_TEMPLATE}
{format_instructions}`,
{format_instructions}`,
);
const input = this.getNodeParameter('inputText', itemIndex) as string;
const input = this.getNodeParameter('inputText', i) as string;
const inputPrompt = new HumanMessage(input);
const messages = [
await systemPromptTemplate.format({
@@ -255,7 +217,7 @@ export class SentimentAnalysis implements INodeType {
);
if (sentimentIndex !== -1) {
const resultItem = { ...items[itemIndex] };
const resultItem = { ...items[i] };
const sentimentAnalysis: IDataObject = {
category: output.sentiment,
};
@@ -267,59 +229,27 @@ export class SentimentAnalysis implements INodeType {
...resultItem.json,
sentimentAnalysis,
};
return {
result: {
resultItem,
sentimentIndex,
},
itemIndex,
};
returnData[sentimentIndex].push(resultItem);
}
return {
result: {},
itemIndex,
};
} catch (error) {
return {
result: null,
itemIndex,
error: new NodeOperationError(
this.getNode(),
'Error during parsing of LLM output, please check your LLM model and configuration',
{
itemIndex,
},
),
};
throw new NodeOperationError(
this.getNode(),
'Error during parsing of LLM output, please check your LLM model and configuration',
{
itemIndex: i,
},
);
}
});
const batchResults = await Promise.all(batchPromises);
batchResults.forEach(({ result, itemIndex, error }) => {
if (error) {
if (this.continueOnFail()) {
const executionErrorData = this.helpers.constructExecutionMetaData(
this.helpers.returnJsonArray({ error: error.message }),
{ itemData: { item: itemIndex } },
);
returnData[0].push(...executionErrorData);
return;
} else {
throw error;
}
} else if (result.resultItem && result.sentimentIndex) {
const sentimentIndex = result.sentimentIndex;
const resultItem = result.resultItem;
returnData[sentimentIndex].push(resultItem);
} catch (error) {
if (this.continueOnFail()) {
const executionErrorData = this.helpers.constructExecutionMetaData(
this.helpers.returnJsonArray({ error: error.message }),
{ itemData: { item: i } },
);
returnData[0].push(...executionErrorData);
continue;
}
});
// Add delay between batches if not the last batch
if (i + batchSize < items.length && delayBetweenBatches > 0) {
await sleep(delayBetweenBatches);
throw error;
}
}
return returnData;