fix(OpenAI Node, Basic LLM Chain Node, Tool Agent Node): Better OpenAI API rate limit errors (#10797)

This commit is contained in:
Eugene
2024-09-18 10:52:10 +02:00
committed by GitHub
parent df8b2c0694
commit ab83c4b416
4 changed files with 69 additions and 1 deletions

View File

@@ -5,11 +5,15 @@ import {
type INodeType,
type INodeTypeDescription,
type SupplyData,
type JsonObject,
NodeApiError,
} from 'n8n-workflow';
import { ChatOpenAI, type ClientOptions } from '@langchain/openai';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { RateLimitError } from 'openai';
import { getCustomErrorMessage } from '../../vendors/OpenAi/helpers/error-handling';
export class LmChatOpenAi implements INodeType {
description: INodeTypeDescription = {
@@ -272,6 +276,25 @@ export class LmChatOpenAi implements INodeType {
response_format: { type: options.responseFormat },
}
: undefined,
onFailedAttempt: (error: any) => {
// If the error is a rate limit error, we want to handle it differently
// because OpenAI has multiple different rate limit errors
if (error instanceof RateLimitError) {
const errorCode = error?.code;
if (errorCode) {
const customErrorMessage = getCustomErrorMessage(errorCode);
const apiError = new NodeApiError(this.getNode(), error as unknown as JsonObject);
if (customErrorMessage) {
apiError.message = customErrorMessage;
}
throw apiError;
}
}
throw error;
},
});
return {