fix(core): Improve model sub-nodes error handling (#11418)

This commit is contained in:
Eugene
2024-11-08 10:17:11 +01:00
committed by GitHub
parent b496bf3147
commit 57467d0285
24 changed files with 310 additions and 56 deletions

View File

@@ -14,6 +14,7 @@ import {
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
const modelField: INodeProperties = {
displayName: 'Model',
@@ -214,6 +215,7 @@ export class LmChatAnthropic implements INodeType {
topK: options.topK,
topP: options.topP,
callbacks: [new N8nLlmTracing(this, { tokensUsageParser })],
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
});
return {

View File

@@ -12,6 +12,7 @@ import { ChatOllama } from '@langchain/ollama';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { ollamaModel, ollamaOptions, ollamaDescription } from '../LMOllama/description';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
export class LmChatOllama implements INodeType {
description: INodeTypeDescription = {
@@ -64,6 +65,7 @@ export class LmChatOllama implements INodeType {
model: modelName,
format: options.format === 'default' ? undefined : options.format,
callbacks: [new N8nLlmTracing(this)],
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
});
return {

View File

@@ -1,19 +1,18 @@
/* eslint-disable n8n-nodes-base/node-dirname-against-convention */
import { ChatOpenAI, type ClientOptions } from '@langchain/openai';
import {
NodeConnectionType,
type INodeType,
type INodeTypeDescription,
type ISupplyDataFunctions,
type SupplyData,
type JsonObject,
NodeApiError,
} from 'n8n-workflow';
import { ChatOpenAI, type ClientOptions } from '@langchain/openai';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { RateLimitError } from 'openai';
import { getCustomErrorMessage } from '../../vendors/OpenAi/helpers/error-handling';
export class LmChatOpenAi implements INodeType {
description: INodeTypeDescription = {
@@ -276,25 +275,7 @@ export class LmChatOpenAi implements INodeType {
response_format: { type: options.responseFormat },
}
: undefined,
onFailedAttempt: (error: any) => {
// If the error is a rate limit error, we want to handle it differently
// because OpenAI has multiple different rate limit errors
if (error instanceof RateLimitError) {
const errorCode = error?.code;
if (errorCode) {
const customErrorMessage = getCustomErrorMessage(errorCode);
const apiError = new NodeApiError(this.getNode(), error as unknown as JsonObject);
if (customErrorMessage) {
apiError.message = customErrorMessage;
}
throw apiError;
}
}
throw error;
},
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this, openAiFailedAttemptHandler),
});
return {

View File

@@ -10,6 +10,7 @@ import {
import { Cohere } from '@langchain/cohere';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
export class LmCohere implements INodeType {
description: INodeTypeDescription = {
@@ -99,6 +100,7 @@ export class LmCohere implements INodeType {
apiKey: credentials.apiKey as string,
...options,
callbacks: [new N8nLlmTracing(this)],
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
});
return {

View File

@@ -11,6 +11,7 @@ import { Ollama } from '@langchain/community/llms/ollama';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { ollamaDescription, ollamaModel, ollamaOptions } from './description';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
export class LmOllama implements INodeType {
description: INodeTypeDescription = {
@@ -62,6 +63,7 @@ export class LmOllama implements INodeType {
model: modelName,
...options,
callbacks: [new N8nLlmTracing(this)],
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
});
return {

View File

@@ -10,6 +10,7 @@ import type {
import { OpenAI, type ClientOptions } from '@langchain/openai';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
type LmOpenAiOptions = {
baseURL?: string;
@@ -260,6 +261,7 @@ export class LmOpenAi implements INodeType {
timeout: options.timeout ?? 60000,
maxRetries: options.maxRetries ?? 2,
callbacks: [new N8nLlmTracing(this)],
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
});
return {

View File

@@ -10,6 +10,7 @@ import {
import { HuggingFaceInference } from '@langchain/community/llms/hf';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
export class LmOpenHuggingFaceInference implements INodeType {
description: INodeTypeDescription = {
@@ -143,6 +144,7 @@ export class LmOpenHuggingFaceInference implements INodeType {
apiKey: credentials.apiKey as string,
...options,
callbacks: [new N8nLlmTracing(this)],
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
});
return {

View File

@@ -10,6 +10,7 @@ import {
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
export class LmChatAwsBedrock implements INodeType {
description: INodeTypeDescription = {
@@ -151,6 +152,7 @@ export class LmChatAwsBedrock implements INodeType {
sessionToken: credentials.sessionToken as string,
},
callbacks: [new N8nLlmTracing(this)],
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
});
return {

View File

@@ -10,6 +10,7 @@ import {
import { ChatOpenAI } from '@langchain/openai';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
export class LmChatAzureOpenAi implements INodeType {
description: INodeTypeDescription = {
@@ -195,6 +196,7 @@ export class LmChatAzureOpenAi implements INodeType {
response_format: { type: options.responseFormat },
}
: undefined,
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
});
return {

View File

@@ -11,6 +11,7 @@ import type { SafetySetting } from '@google/generative-ai';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { additionalOptions } from '../gemini-common/additional-options';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
export class LmChatGoogleGemini implements INodeType {
description: INodeTypeDescription = {
@@ -144,6 +145,7 @@ export class LmChatGoogleGemini implements INodeType {
maxOutputTokens: options.maxOutputTokens,
safetySettings,
callbacks: [new N8nLlmTracing(this)],
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
});
return {

View File

@@ -17,6 +17,7 @@ import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { additionalOptions } from '../gemini-common/additional-options';
import { makeErrorFromStatus } from './error-handling';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
export class LmChatGoogleVertex implements INodeType {
description: INodeTypeDescription = {
@@ -170,7 +171,8 @@ export class LmChatGoogleVertex implements INodeType {
safetySettings,
callbacks: [new N8nLlmTracing(this)],
// Handle ChatVertexAI invocation errors to provide better error messages
onFailedAttempt: (error: any) => {
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this, (error: any) => {
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
const customError = makeErrorFromStatus(Number(error?.response?.status), {
modelName,
});
@@ -180,7 +182,7 @@ export class LmChatGoogleVertex implements INodeType {
}
throw error;
},
}),
});
return {

View File

@@ -10,6 +10,7 @@ import {
import { ChatGroq } from '@langchain/groq';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
export class LmChatGroq implements INodeType {
description: INodeTypeDescription = {
@@ -144,6 +145,7 @@ export class LmChatGroq implements INodeType {
maxTokens: options.maxTokensToSample,
temperature: options.temperature,
callbacks: [new N8nLlmTracing(this)],
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
});
return {

View File

@@ -11,6 +11,7 @@ import type { ChatMistralAIInput } from '@langchain/mistralai';
import { ChatMistralAI } from '@langchain/mistralai';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
export class LmChatMistralCloud implements INodeType {
description: INodeTypeDescription = {
@@ -190,6 +191,7 @@ export class LmChatMistralCloud implements INodeType {
modelName,
...options,
callbacks: [new N8nLlmTracing(this)],
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
});
return {

View File

@@ -1,17 +1,18 @@
import { BaseCallbackHandler } from '@langchain/core/callbacks/base';
import type { SerializedFields } from '@langchain/core/dist/load/map_keys';
import { getModelNameForTiktoken } from '@langchain/core/language_models/base';
import { encodingForModel } from '@langchain/core/utils/tiktoken';
import type {
Serialized,
SerializedNotImplemented,
SerializedSecret,
} from '@langchain/core/load/serializable';
import type { LLMResult } from '@langchain/core/outputs';
import type { IDataObject, ISupplyDataFunctions } from 'n8n-workflow';
import { NodeConnectionType } from 'n8n-workflow';
import { pick } from 'lodash';
import type { BaseMessage } from '@langchain/core/messages';
import type { SerializedFields } from '@langchain/core/dist/load/map_keys';
import type { LLMResult } from '@langchain/core/outputs';
import { encodingForModel } from '@langchain/core/utils/tiktoken';
import type { IDataObject, ISupplyDataFunctions, JsonObject } from 'n8n-workflow';
import { pick } from 'lodash';
import { NodeConnectionType, NodeError, NodeOperationError } from 'n8n-workflow';
import { logAiEvent } from '../../utils/helpers';
type TokensUsageParser = (llmOutput: LLMResult['llmOutput']) => {
@@ -30,6 +31,10 @@ const TIKTOKEN_ESTIMATE_MODEL = 'gpt-4o';
export class N8nLlmTracing extends BaseCallbackHandler {
name = 'N8nLlmTracing';
// This flag makes sure that LangChain will wait for the handlers to finish before continuing
// This is crucial for the handleLLMError handler to work correctly (it should be called before the error is propagated to the root node)
awaitHandlers = true;
connectionType = NodeConnectionType.AiLanguageModel;
promptTokensEstimate = 0;
@@ -135,6 +140,7 @@ export class N8nLlmTracing extends BaseCallbackHandler {
this.executionFunctions.addOutputData(this.connectionType, runDetails.index, [
[{ json: { ...response } }],
]);
logAiEvent(this.executionFunctions, 'ai-llm-generated-output', {
messages: parsedMessages,
options: runDetails.options,
@@ -172,6 +178,8 @@ export class N8nLlmTracing extends BaseCallbackHandler {
runId: string,
parentRunId?: string | undefined,
) {
const runDetails = this.runsMap[runId] ?? { index: Object.keys(this.runsMap).length };
// Filter out non-x- headers to avoid leaking sensitive information in logs
if (typeof error === 'object' && error?.hasOwnProperty('headers')) {
const errorWithHeaders = error as { headers: Record<string, unknown> };
@@ -183,6 +191,19 @@ export class N8nLlmTracing extends BaseCallbackHandler {
});
}
if (error instanceof NodeError) {
this.executionFunctions.addOutputData(this.connectionType, runDetails.index, error);
} else {
// If the error is not a NodeError, we wrap it in a NodeOperationError
this.executionFunctions.addOutputData(
this.connectionType,
runDetails.index,
new NodeOperationError(this.executionFunctions.getNode(), error as JsonObject, {
functionality: 'configuration-node',
}),
);
}
logAiEvent(this.executionFunctions, 'ai-llm-errored', {
error: Object.keys(error).length === 0 ? error.toString() : error,
runId,

View File

@@ -0,0 +1,66 @@
import { n8nDefaultFailedAttemptHandler } from './n8nDefaultFailedAttemptHandler';
class MockHttpError extends Error {
response: { status: number };
constructor(message: string, code: number) {
super(message);
this.response = { status: code };
}
}
describe('n8nDefaultFailedAttemptHandler', () => {
it('should throw error if message starts with "Cancel"', () => {
const error = new Error('Cancel operation');
expect(() => n8nDefaultFailedAttemptHandler(error)).toThrow(error);
});
it('should throw error if message starts with "AbortError"', () => {
const error = new Error('AbortError occurred');
expect(() => n8nDefaultFailedAttemptHandler(error)).toThrow(error);
});
it('should throw error if name is "AbortError"', () => {
class MockAbortError extends Error {
constructor() {
super('Some error');
this.name = 'AbortError';
}
}
const error = new MockAbortError();
expect(() => n8nDefaultFailedAttemptHandler(error)).toThrow(error);
});
it('should throw error if code is "ECONNABORTED"', () => {
class MockAbortError extends Error {
code: string;
constructor() {
super('Some error');
this.code = 'ECONNABORTED';
}
}
const error = new MockAbortError();
expect(() => n8nDefaultFailedAttemptHandler(error)).toThrow(error);
});
it('should throw error if status is in STATUS_NO_RETRY', () => {
const error = new MockHttpError('Some error', 400);
expect(() => n8nDefaultFailedAttemptHandler(error)).toThrow(error);
});
it('should not throw error if status is not in STATUS_NO_RETRY', () => {
const error = new MockHttpError('Some error', 500);
error.response = { status: 500 };
expect(() => n8nDefaultFailedAttemptHandler(error)).not.toThrow();
});
it('should not throw error if no conditions are met', () => {
const error = new Error('Some random error');
expect(() => n8nDefaultFailedAttemptHandler(error)).not.toThrow();
});
});

View File

@@ -0,0 +1,41 @@
const STATUS_NO_RETRY = [
400, // Bad Request
401, // Unauthorized
402, // Payment Required
403, // Forbidden
404, // Not Found
405, // Method Not Allowed
406, // Not Acceptable
407, // Proxy Authentication Required
409, // Conflict
];
/**
* This function is used as a default handler for failed attempts in all LLMs.
* It is based on a default handler from the langchain core package.
* It throws an error when it encounters a known error that should not be retried.
* @param error
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export const n8nDefaultFailedAttemptHandler = (error: any) => {
if (
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access,@typescript-eslint/no-unsafe-call
error?.message?.startsWith?.('Cancel') ||
error?.message?.startsWith?.('AbortError') ||
error?.name === 'AbortError'
) {
throw error;
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any,@typescript-eslint/no-unsafe-member-access
if (error?.code === 'ECONNABORTED') {
throw error;
}
const status =
// eslint-disable-next-line @typescript-eslint/no-explicit-any,@typescript-eslint/no-unsafe-member-access
error?.response?.status ?? error?.status;
if (status && STATUS_NO_RETRY.includes(+status)) {
throw error;
}
};

View File

@@ -0,0 +1,65 @@
import { mock } from 'jest-mock-extended';
import type { ISupplyDataFunctions } from 'n8n-workflow';
import { ApplicationError, NodeApiError } from 'n8n-workflow';
import { makeN8nLlmFailedAttemptHandler } from './n8nLlmFailedAttemptHandler';
describe('makeN8nLlmFailedAttemptHandler', () => {
const ctx = mock<ISupplyDataFunctions>({
getNode: jest.fn(),
});
it('should throw a wrapped error, when NO custom handler is provided', () => {
const handler = makeN8nLlmFailedAttemptHandler(ctx);
expect(() => handler(new Error('Test error'))).toThrow(NodeApiError);
});
it('should wrapped error when custom handler is provided', () => {
const customHandler = jest.fn();
const handler = makeN8nLlmFailedAttemptHandler(ctx, customHandler);
expect(() => handler(new Error('Test error'))).toThrow(NodeApiError);
expect(customHandler).toHaveBeenCalled();
});
it('should throw wrapped exception from custom handler', () => {
const customHandler = jest.fn(() => {
throw new ApplicationError('Custom handler error');
});
const handler = makeN8nLlmFailedAttemptHandler(ctx, customHandler);
expect(() => handler(new Error('Test error'))).toThrow('Custom handler error');
expect(customHandler).toHaveBeenCalled();
});
it('should not throw if retries are left', () => {
const customHandler = jest.fn();
const handler = makeN8nLlmFailedAttemptHandler(ctx, customHandler);
const error = new Error('Test error');
(error as any).retriesLeft = 1;
expect(() => handler(error)).not.toThrow();
});
it('should throw NodeApiError if no retries are left', () => {
const handler = makeN8nLlmFailedAttemptHandler(ctx);
const error = new Error('Test error');
(error as any).retriesLeft = 0;
expect(() => handler(error)).toThrow(NodeApiError);
});
it('should throw NodeApiError if no retries are left with custom handler', () => {
const customHandler = jest.fn();
const handler = makeN8nLlmFailedAttemptHandler(ctx, customHandler);
const error = new Error('Test error');
(error as any).retriesLeft = 0;
expect(() => handler(error)).toThrow(NodeApiError);
expect(customHandler).toHaveBeenCalled();
});
});

View File

@@ -0,0 +1,46 @@
import type { FailedAttemptHandler } from '@langchain/core/dist/utils/async_caller';
import type { ISupplyDataFunctions, JsonObject } from 'n8n-workflow';
import { NodeApiError } from 'n8n-workflow';
import { n8nDefaultFailedAttemptHandler } from './n8nDefaultFailedAttemptHandler';
/**
* This function returns a custom failed attempt handler for using with LangChain models.
* It first tries to use a custom handler passed as an argument, and if that doesn't throw an error, it uses the default handler.
* It always wraps the error in a NodeApiError.
* It throws an error ONLY if there are no retries left.
*/
export const makeN8nLlmFailedAttemptHandler = (
ctx: ISupplyDataFunctions,
handler?: FailedAttemptHandler,
): FailedAttemptHandler => {
return (error: any) => {
try {
// Try custom error handler first
handler?.(error);
// If it didn't throw an error, use the default handler
n8nDefaultFailedAttemptHandler(error);
} catch (e) {
// Wrap the error in a NodeApiError
const apiError = new NodeApiError(ctx.getNode(), e as unknown as JsonObject, {
functionality: 'configuration-node',
});
throw apiError;
}
// If no error was thrown, check if it is the last retry
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
if (error?.retriesLeft > 0) {
return;
}
// If there are no retries left, throw the error wrapped in a NodeApiError
const apiError = new NodeApiError(ctx.getNode(), error as unknown as JsonObject, {
functionality: 'configuration-node',
});
throw apiError;
};
};