fix(OpenAI Node, Basic LLM Chain Node, Tool Agent Node): Better OpenAI API rate limit errors (#10797)

This commit is contained in:
Eugene
2024-09-18 10:52:10 +02:00
committed by GitHub
parent df8b2c0694
commit ab83c4b416
4 changed files with 69 additions and 1 deletions

View File

@@ -1,4 +1,9 @@
import { ApplicationError, NodeConnectionType, NodeOperationError } from 'n8n-workflow';
import {
ApplicationError,
NodeApiError,
NodeConnectionType,
NodeOperationError,
} from 'n8n-workflow';
import type {
IBinaryData,
IDataObject,
@@ -30,6 +35,10 @@ import {
isChatInstance,
} from '../../../utils/helpers';
import { getTracingConfig } from '../../../utils/tracing';
import {
getCustomErrorMessage as getCustomOpenAiErrorMessage,
isOpenAiError,
} from '../../vendors/OpenAi/helpers/error-handling';
interface MessagesTemplate {
type: string;
@@ -580,6 +589,18 @@ export class ChainLlm implements INodeType {
});
});
} catch (error) {
// If the error is an OpenAI's rate limit error, we want to handle it differently
// because OpenAI has multiple different rate limit errors
if (error instanceof NodeApiError && isOpenAiError(error.cause)) {
const openAiErrorCode: string | undefined = (error.cause as any).error?.code;
if (openAiErrorCode) {
const customMessage = getCustomOpenAiErrorMessage(openAiErrorCode);
if (customMessage) {
error.message = customMessage;
}
}
}
if (this.continueOnFail()) {
returnData.push({ json: { error: error.message }, pairedItem: { item: itemIndex } });
continue;