mirror of
https://github.com/Abdulazizzn/n8n-enterprise-unlocked.git
synced 2025-12-17 01:56:46 +00:00
fix(OpenAI Node, Basic LLM Chain Node, Tool Agent Node): Better OpenAI API rate limit errors (#10797)
This commit is contained in:
@@ -5,11 +5,15 @@ import {
|
||||
type INodeType,
|
||||
type INodeTypeDescription,
|
||||
type SupplyData,
|
||||
type JsonObject,
|
||||
NodeApiError,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { ChatOpenAI, type ClientOptions } from '@langchain/openai';
|
||||
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
import { RateLimitError } from 'openai';
|
||||
import { getCustomErrorMessage } from '../../vendors/OpenAi/helpers/error-handling';
|
||||
|
||||
export class LmChatOpenAi implements INodeType {
|
||||
description: INodeTypeDescription = {
|
||||
@@ -272,6 +276,25 @@ export class LmChatOpenAi implements INodeType {
|
||||
response_format: { type: options.responseFormat },
|
||||
}
|
||||
: undefined,
|
||||
onFailedAttempt: (error: any) => {
|
||||
// If the error is a rate limit error, we want to handle it differently
|
||||
// because OpenAI has multiple different rate limit errors
|
||||
if (error instanceof RateLimitError) {
|
||||
const errorCode = error?.code;
|
||||
if (errorCode) {
|
||||
const customErrorMessage = getCustomErrorMessage(errorCode);
|
||||
|
||||
const apiError = new NodeApiError(this.getNode(), error as unknown as JsonObject);
|
||||
if (customErrorMessage) {
|
||||
apiError.message = customErrorMessage;
|
||||
}
|
||||
|
||||
throw apiError;
|
||||
}
|
||||
}
|
||||
|
||||
throw error;
|
||||
},
|
||||
});
|
||||
|
||||
return {
|
||||
|
||||
Reference in New Issue
Block a user