fix(editor): Show correct error messages on Ask AI (#16913)

This commit is contained in:
Jaakko Husso
2025-07-02 14:00:23 +03:00
committed by GitHub
parent 7ebde66eed
commit 3a733b9127
8 changed files with 67 additions and 18 deletions

View File

@@ -180,7 +180,11 @@ return []
{ code: 400, message: 'Code generation failed due to an unknown reason' },
{ code: 413, message: 'Your workflow data is too large for AI to process' },
{ code: 429, message: "We've hit our rate limit with our AI partner" },
{ code: 500, message: 'Code generation failed due to an unknown reason' },
{
code: 500,
message:
'Code generation failed with error: Request failed with status code 500. Try again in a few minutes',
},
];
handledCodes.forEach(({ code, message }) => {

View File

@@ -8,7 +8,7 @@ import {
} from '@n8n/api-types';
import { AuthenticatedRequest } from '@n8n/db';
import { Body, Post, RestController } from '@n8n/decorators';
import type { AiAssistantSDK } from '@n8n_io/ai-assistant-sdk';
import { type AiAssistantSDK, APIResponseError } from '@n8n_io/ai-assistant-sdk';
import { Response } from 'express';
import { OPEN_AI_API_CREDENTIAL_TYPE } from 'n8n-workflow';
import { strict as assert } from 'node:assert';
@@ -16,7 +16,10 @@ import { WritableStream } from 'node:stream/web';
import { FREE_AI_CREDITS_CREDENTIAL_NAME } from '@/constants';
import { CredentialsService } from '@/credentials/credentials.service';
import { BadRequestError } from '@/errors/response-errors/bad-request.error';
import { ContentTooLargeError } from '@/errors/response-errors/content-too-large.error';
import { InternalServerError } from '@/errors/response-errors/internal-server.error';
import { TooManyRequestsError } from '@/errors/response-errors/too-many-requests.error';
import { WorkflowBuilderService } from '@/services/ai-workflow-builder.service';
import { AiService } from '@/services/ai.service';
import { UserService } from '@/services/user.service';
@@ -106,6 +109,19 @@ export class AiController {
try {
return await this.aiService.askAi(payload, req.user);
} catch (e) {
if (e instanceof APIResponseError) {
switch (e.statusCode) {
case 413:
throw new ContentTooLargeError(e.message);
case 429:
throw new TooManyRequestsError(e.message);
case 400:
throw new BadRequestError(e.message);
default:
throw new InternalServerError(e.message, e);
}
}
assert(e instanceof Error);
throw new InternalServerError(e.message, e);
}

View File

@@ -0,0 +1,7 @@
import { ResponseError } from './abstract/response.error';
export class ContentTooLargeError extends ResponseError {
constructor(message: string, hint: string | undefined = undefined) {
super(message, 413, 413, hint);
}
}

View File

@@ -0,0 +1,7 @@
import { ResponseError } from './abstract/response.error';
export class TooManyRequestsError extends ResponseError {
constructor(message: string, hint: string | undefined = undefined) {
super(message, 429, 429, hint);
}
}

View File

@@ -534,7 +534,8 @@
"codeNodeEditor.askAi.loadingPhrase7": "Stand by, AI magic at work…",
"codeNodeEditor.askAi.generationCompleted": "✨ Code generation completed",
"codeNodeEditor.askAi.generationFailed": "Code generation failed",
"codeNodeEditor.askAi.generationFailedUnknown": "Code generation failed due to an unknown reason. Try again in a few minutes.",
"codeNodeEditor.askAi.generationFailedUnknown": "Code generation failed due to an unknown reason. Try again in a few minutes",
"codeNodeEditor.askAi.generationFailedWithReason": "Code generation failed with error: {error}. Try again in a few minutes",
"codeNodeEditor.askAi.generationFailedDown": "We're sorry, our AI service is currently unavailable. Please try again later. If the problem persists, contact support.",
"codeNodeEditor.askAi.generationFailedRate": "We've hit our rate limit with our AI partner (too many requests). Please wait a minute before trying again.",
"codeNodeEditor.askAi.generationFailedTooLarge": "Your workflow data is too large for AI to process. Simplify the data being sent into the Code node and retry.",

View File

@@ -66,12 +66,18 @@ const isEachItemMode = computed(() => {
return mode === 'runOnceForEachItem';
});
function getErrorMessageByStatusCode(statusCode: number) {
function getErrorMessageByStatusCode(statusCode: number, message: string | undefined): string {
const errorMessages: Record<number, string> = {
400: i18n.baseText('codeNodeEditor.askAi.generationFailedUnknown'),
413: i18n.baseText('codeNodeEditor.askAi.generationFailedTooLarge'),
429: i18n.baseText('codeNodeEditor.askAi.generationFailedRate'),
500: i18n.baseText('codeNodeEditor.askAi.generationFailedUnknown'),
[413]: i18n.baseText('codeNodeEditor.askAi.generationFailedTooLarge'),
[400]: i18n.baseText('codeNodeEditor.askAi.generationFailedUnknown'),
[429]: i18n.baseText('codeNodeEditor.askAi.generationFailedRate'),
[500]: message
? i18n.baseText('codeNodeEditor.askAi.generationFailedWithReason', {
interpolate: {
error: message,
},
})
: i18n.baseText('codeNodeEditor.askAi.generationFailedUnknown'),
};
return errorMessages[statusCode] || i18n.baseText('codeNodeEditor.askAi.generationFailedUnknown');
@@ -189,7 +195,10 @@ async function onSubmit() {
showMessage({
type: 'error',
title: i18n.baseText('codeNodeEditor.askAi.generationFailed'),
message: getErrorMessageByStatusCode(error.httpStatusCode || error?.response.status),
message: getErrorMessageByStatusCode(
error.httpStatusCode || error?.response.status,
error?.message,
),
});
stopLoading();
useTelemetry().trackAskAI('askAi.generationFinished', {

21
pnpm-lock.yaml generated
View File

@@ -22,8 +22,8 @@ catalogs:
specifier: 0.3.20-12
version: 0.3.20-12
'@n8n_io/ai-assistant-sdk':
specifier: 1.14.0
version: 1.14.0
specifier: 1.14.1
version: 1.14.1
'@sentry/node':
specifier: 8.52.1
version: 8.52.1
@@ -390,7 +390,7 @@ importers:
version: link:../di
'@n8n_io/ai-assistant-sdk':
specifier: 'catalog:'
version: 1.14.0
version: 1.14.1
n8n-workflow:
specifier: workspace:*
version: link:../../workflow
@@ -1270,7 +1270,7 @@ importers:
version: 0.3.20-12(@sentry/node@8.52.1)(ioredis@5.3.2)(mssql@10.0.2)(mysql2@3.11.0)(pg@8.12.0)(redis@4.6.14)(sqlite3@5.1.7)(ts-node@10.9.2(@types/node@20.19.1)(typescript@5.8.3))
'@n8n_io/ai-assistant-sdk':
specifier: 'catalog:'
version: 1.14.0
version: 1.14.1
'@n8n_io/license-sdk':
specifier: 2.22.0
version: 2.22.0
@@ -5472,8 +5472,8 @@ packages:
engines: {node: '>=18.10', pnpm: '>=9.6'}
hasBin: true
'@n8n_io/ai-assistant-sdk@1.14.0':
resolution: {integrity: sha512-apo1VXGmyUpwsBZ2dp9EyqZYR+FA3DzdD79MVzSMuknLIukhyZjvabBYRrjK5BJOU3vo5z9inaCm4UvenQ5Mgg==}
'@n8n_io/ai-assistant-sdk@1.14.1':
resolution: {integrity: sha512-I2WXfNnDltrSqaMTXFJUZKq/uff6wuHBhFv0oiCyi0NK+CNwFkU1FCcmPWLQrQlj9llda4urwv5MuXygH0zUVw==}
engines: {node: '>=20.15', pnpm: '>=8.14'}
'@n8n_io/license-sdk@2.22.0':
@@ -15146,6 +15146,9 @@ packages:
vue-component-type-helpers@2.2.10:
resolution: {integrity: sha512-iDUO7uQK+Sab2tYuiP9D1oLujCWlhHELHMgV/cB13cuGbG4qwkLHvtfWb6FzvxrIOPDnU0oHsz2MlQjhYDeaHA==}
vue-component-type-helpers@3.0.0:
resolution: {integrity: sha512-J1HtqhZIqmYoNg4SLcYVFdCdsVUkMo4Z6/Wx4sQMfY8TFIIqDmd3mS2whfBIKzAA7dHMexarwYbvtB/fOUuEsw==}
vue-demi@0.14.10:
resolution: {integrity: sha512-nMZBOwuzabUO0nLgIcc6rycZEebF6eeUfaiQx9+WSk8e29IbLvPU9feI6tqW4kTo3hvoYAJkMh8n8D0fuISphg==}
engines: {node: '>=12'}
@@ -19044,7 +19047,7 @@ snapshots:
acorn: 8.12.1
acorn-walk: 8.3.4
'@n8n_io/ai-assistant-sdk@1.14.0': {}
'@n8n_io/ai-assistant-sdk@1.14.1': {}
'@n8n_io/license-sdk@2.22.0':
dependencies:
@@ -20563,7 +20566,7 @@ snapshots:
ts-dedent: 2.2.0
type-fest: 2.19.0
vue: 3.5.13(typescript@5.8.3)
vue-component-type-helpers: 2.2.10
vue-component-type-helpers: 3.0.0
'@stylistic/eslint-plugin@5.0.0(eslint@9.29.0(jiti@1.21.7))':
dependencies:
@@ -30993,6 +30996,8 @@ snapshots:
vue-component-type-helpers@2.2.10: {}
vue-component-type-helpers@3.0.0: {}
vue-demi@0.14.10(vue@3.5.13(typescript@5.8.3)):
dependencies:
vue: 3.5.13(typescript@5.8.3)

View File

@@ -8,7 +8,7 @@ packages:
catalog:
'@n8n/typeorm': 0.3.20-12
'@n8n_io/ai-assistant-sdk': 1.14.0
'@n8n_io/ai-assistant-sdk': 1.14.1
'@langchain/core': 0.3.59
'@langchain/openai': 0.5.13
'@langchain/anthropic': 0.3.22