feat: Add fallback mechanism for agent and basic chain llm (#16617)

This commit is contained in:
Benjamin Schroth
2025-06-26 16:14:03 +02:00
committed by GitHub
parent 0b7bca29f8
commit 6408d5a1b0
20 changed files with 476 additions and 140 deletions

View File

@@ -1,5 +1,6 @@
import type { BaseLanguageModel } from '@langchain/core/language_models/base';
import { type IExecuteFunctions, NodeConnectionTypes, NodeOperationError } from 'n8n-workflow';
import assert from 'node:assert';
import { getPromptInputByType } from '@utils/helpers';
import { getOptionalOutputParser } from '@utils/output_parsers/N8nOutputParser';
@@ -7,11 +8,40 @@ import { getOptionalOutputParser } from '@utils/output_parsers/N8nOutputParser';
import { executeChain } from './chainExecutor';
import { type MessageTemplate } from './types';
async function getChatModel(
ctx: IExecuteFunctions,
index: number = 0,
): Promise<BaseLanguageModel | undefined> {
const connectedModels = await ctx.getInputConnectionData(NodeConnectionTypes.AiLanguageModel, 0);
let model;
if (Array.isArray(connectedModels) && index !== undefined) {
if (connectedModels.length <= index) {
return undefined;
}
// We get the models in reversed order from the workflow so we need to reverse them again to match the right index
const reversedModels = [...connectedModels].reverse();
model = reversedModels[index] as BaseLanguageModel;
} else {
model = connectedModels as BaseLanguageModel;
}
return model;
}
export const processItem = async (ctx: IExecuteFunctions, itemIndex: number) => {
const llm = (await ctx.getInputConnectionData(
NodeConnectionTypes.AiLanguageModel,
0,
)) as BaseLanguageModel;
const needsFallback = ctx.getNodeParameter('needsFallback', 0, false) as boolean;
const llm = await getChatModel(ctx, 0);
assert(llm, 'Please connect a model to the Chat Model input');
const fallbackLlm = needsFallback ? await getChatModel(ctx, 1) : null;
if (needsFallback && !fallbackLlm) {
throw new NodeOperationError(
ctx.getNode(),
'Please connect a model to the Fallback Model input or disable the fallback option',
);
}
// Get output parser if configured
const outputParser = await getOptionalOutputParser(ctx, itemIndex);
@@ -50,5 +80,6 @@ export const processItem = async (ctx: IExecuteFunctions, itemIndex: number) =>
llm,
outputParser,
messages,
fallbackLlm,
});
};