mirror of
https://github.com/Abdulazizzn/n8n-enterprise-unlocked.git
synced 2025-12-16 17:46:45 +00:00
feat(Vercel AI Gateway Node): Add Vercel AI Gateway model Node (#17524)
This commit is contained in:
@@ -0,0 +1,63 @@
|
||||
import type {
|
||||
IAuthenticateGeneric,
|
||||
ICredentialTestRequest,
|
||||
ICredentialType,
|
||||
INodeProperties,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
export class VercelAiGatewayApi implements ICredentialType {
|
||||
name = 'vercelAiGatewayApi';
|
||||
|
||||
displayName = 'Vercel AI Gateway';
|
||||
|
||||
documentationUrl = 'vercelaigateway';
|
||||
|
||||
properties: INodeProperties[] = [
|
||||
{
|
||||
displayName: 'API Key or OIDC Token',
|
||||
name: 'apiKey',
|
||||
type: 'string',
|
||||
typeOptions: { password: true },
|
||||
required: true,
|
||||
default: '',
|
||||
description: 'Your credentials for the Vercel AI Gateway',
|
||||
},
|
||||
{
|
||||
displayName: 'Base URL',
|
||||
name: 'url',
|
||||
type: 'string',
|
||||
required: true,
|
||||
default: 'https://ai-gateway.vercel.sh/v1',
|
||||
description: 'The base URL for your Vercel AI Gateway instance',
|
||||
placeholder: 'https://ai-gateway.vercel.sh/v1',
|
||||
},
|
||||
];
|
||||
|
||||
authenticate: IAuthenticateGeneric = {
|
||||
type: 'generic',
|
||||
properties: {
|
||||
headers: {
|
||||
Authorization: '=Bearer {{$credentials.apiKey}}',
|
||||
'http-referer': 'https://n8n.io/',
|
||||
'x-title': 'n8n',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
test: ICredentialTestRequest = {
|
||||
request: {
|
||||
baseURL: '={{ $credentials.url }}',
|
||||
url: '/chat/completions',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'http-referer': 'https://n8n.io/',
|
||||
'x-title': 'n8n',
|
||||
},
|
||||
body: {
|
||||
model: 'openai/gpt-4.1-nano',
|
||||
messages: [{ role: 'user', content: 'test' }],
|
||||
max_tokens: 1,
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
@@ -99,6 +99,7 @@ function getInputs(
|
||||
'@n8n/n8n-nodes-langchain.lmChatAzureOpenAi',
|
||||
'@n8n/n8n-nodes-langchain.lmChatDeepSeek',
|
||||
'@n8n/n8n-nodes-langchain.lmChatOpenRouter',
|
||||
'@n8n/n8n-nodes-langchain.lmChatVercelAiGateway',
|
||||
'@n8n/n8n-nodes-langchain.lmChatXAiGrok',
|
||||
'@n8n/n8n-nodes-langchain.modelSelector',
|
||||
],
|
||||
@@ -131,6 +132,7 @@ function getInputs(
|
||||
'@n8n/n8n-nodes-langchain.lmChatGoogleGemini',
|
||||
'@n8n/n8n-nodes-langchain.lmChatDeepSeek',
|
||||
'@n8n/n8n-nodes-langchain.lmChatOpenRouter',
|
||||
'@n8n/n8n-nodes-langchain.lmChatVercelAiGateway',
|
||||
'@n8n/n8n-nodes-langchain.lmChatXAiGrok',
|
||||
],
|
||||
},
|
||||
|
||||
@@ -0,0 +1,254 @@
|
||||
import { ChatOpenAI, type ClientOptions } from '@langchain/openai';
|
||||
import {
|
||||
NodeConnectionTypes,
|
||||
type INodeType,
|
||||
type INodeTypeDescription,
|
||||
type ISupplyDataFunctions,
|
||||
type SupplyData,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { getProxyAgent } from '@utils/httpProxyAgent';
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
|
||||
import type { OpenAICompatibleCredential } from '../../../types/types';
|
||||
import { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
|
||||
export class LmChatVercelAiGateway implements INodeType {
|
||||
description: INodeTypeDescription = {
|
||||
displayName: 'Vercel AI Gateway Chat Model',
|
||||
name: 'lmChatVercelAiGateway',
|
||||
icon: { light: 'file:vercel.dark.svg', dark: 'file:vercel.svg' },
|
||||
group: ['transform'],
|
||||
version: [1],
|
||||
description: 'For advanced usage with an AI chain via Vercel AI Gateway',
|
||||
defaults: {
|
||||
name: 'Vercel AI Gateway Chat Model',
|
||||
},
|
||||
codex: {
|
||||
categories: ['AI'],
|
||||
subcategories: {
|
||||
AI: ['Language Models', 'Root Nodes'],
|
||||
'Language Models': ['Chat Models (Recommended)'],
|
||||
},
|
||||
resources: {
|
||||
primaryDocumentation: [
|
||||
{
|
||||
url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatvercel/',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
|
||||
inputs: [],
|
||||
|
||||
outputs: [NodeConnectionTypes.AiLanguageModel],
|
||||
outputNames: ['Model'],
|
||||
credentials: [
|
||||
{
|
||||
name: 'vercelAiGatewayApi',
|
||||
required: true,
|
||||
},
|
||||
],
|
||||
requestDefaults: {
|
||||
ignoreHttpStatusErrors: true,
|
||||
baseURL: '={{ $credentials?.url }}',
|
||||
},
|
||||
properties: [
|
||||
getConnectionHintNoticeField([NodeConnectionTypes.AiChain, NodeConnectionTypes.AiAgent]),
|
||||
{
|
||||
displayName:
|
||||
'If using JSON response format, you must include word "json" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.',
|
||||
name: 'notice',
|
||||
type: 'notice',
|
||||
default: '',
|
||||
displayOptions: {
|
||||
show: {
|
||||
'/options.responseFormat': ['json_object'],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
displayName: 'Model',
|
||||
name: 'model',
|
||||
type: 'options',
|
||||
description: 'The model which will generate the completion',
|
||||
typeOptions: {
|
||||
loadOptions: {
|
||||
routing: {
|
||||
request: {
|
||||
method: 'GET',
|
||||
url: '/models',
|
||||
},
|
||||
output: {
|
||||
postReceive: [
|
||||
{
|
||||
type: 'rootProperty',
|
||||
properties: {
|
||||
property: 'data',
|
||||
},
|
||||
},
|
||||
{
|
||||
type: 'setKeyValue',
|
||||
properties: {
|
||||
name: '={{$responseItem.id}}',
|
||||
value: '={{$responseItem.id}}',
|
||||
},
|
||||
},
|
||||
{
|
||||
type: 'sort',
|
||||
properties: {
|
||||
key: 'name',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
routing: {
|
||||
send: {
|
||||
type: 'body',
|
||||
property: 'model',
|
||||
},
|
||||
},
|
||||
default: 'openai/gpt-4o',
|
||||
},
|
||||
{
|
||||
displayName: 'Options',
|
||||
name: 'options',
|
||||
placeholder: 'Add Option',
|
||||
description: 'Additional options to add',
|
||||
type: 'collection',
|
||||
default: {},
|
||||
options: [
|
||||
{
|
||||
displayName: 'Frequency Penalty',
|
||||
name: 'frequencyPenalty',
|
||||
default: 0,
|
||||
typeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },
|
||||
description:
|
||||
"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim",
|
||||
type: 'number',
|
||||
},
|
||||
{
|
||||
displayName: 'Maximum Number of Tokens',
|
||||
name: 'maxTokens',
|
||||
default: -1,
|
||||
description:
|
||||
'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).',
|
||||
type: 'number',
|
||||
typeOptions: {
|
||||
maxValue: 32768,
|
||||
},
|
||||
},
|
||||
{
|
||||
displayName: 'Response Format',
|
||||
name: 'responseFormat',
|
||||
default: 'text',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
name: 'Text',
|
||||
value: 'text',
|
||||
description: 'Regular text response',
|
||||
},
|
||||
{
|
||||
name: 'JSON',
|
||||
value: 'json_object',
|
||||
description:
|
||||
'Enables JSON mode, which should guarantee the message the model generates is valid JSON',
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
displayName: 'Presence Penalty',
|
||||
name: 'presencePenalty',
|
||||
default: 0,
|
||||
typeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },
|
||||
description:
|
||||
"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics",
|
||||
type: 'number',
|
||||
},
|
||||
{
|
||||
displayName: 'Sampling Temperature',
|
||||
name: 'temperature',
|
||||
default: 0.7,
|
||||
typeOptions: { maxValue: 2, minValue: 0, numberPrecision: 1 },
|
||||
description:
|
||||
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
|
||||
type: 'number',
|
||||
},
|
||||
{
|
||||
displayName: 'Timeout',
|
||||
name: 'timeout',
|
||||
default: 360000,
|
||||
description: 'Maximum amount of time a request is allowed to take in milliseconds',
|
||||
type: 'number',
|
||||
},
|
||||
{
|
||||
displayName: 'Max Retries',
|
||||
name: 'maxRetries',
|
||||
default: 2,
|
||||
description: 'Maximum number of retries to attempt',
|
||||
type: 'number',
|
||||
},
|
||||
{
|
||||
displayName: 'Top P',
|
||||
name: 'topP',
|
||||
default: 1,
|
||||
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
|
||||
description:
|
||||
'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',
|
||||
type: 'number',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
async supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {
|
||||
const credentials = await this.getCredentials<OpenAICompatibleCredential>('vercelAiGatewayApi');
|
||||
|
||||
const modelName = this.getNodeParameter('model', itemIndex) as string;
|
||||
|
||||
const options = this.getNodeParameter('options', itemIndex, {}) as {
|
||||
frequencyPenalty?: number;
|
||||
maxTokens?: number;
|
||||
maxRetries: number;
|
||||
timeout: number;
|
||||
presencePenalty?: number;
|
||||
temperature?: number;
|
||||
topP?: number;
|
||||
responseFormat?: 'text' | 'json_object';
|
||||
};
|
||||
|
||||
const configuration: ClientOptions = {
|
||||
baseURL: credentials.url,
|
||||
fetchOptions: {
|
||||
dispatcher: getProxyAgent(credentials.url),
|
||||
},
|
||||
};
|
||||
|
||||
const model = new ChatOpenAI({
|
||||
openAIApiKey: credentials.apiKey,
|
||||
model: modelName,
|
||||
...options,
|
||||
timeout: options.timeout ?? 60000,
|
||||
maxRetries: options.maxRetries ?? 2,
|
||||
configuration,
|
||||
callbacks: [new N8nLlmTracing(this)],
|
||||
modelKwargs: options.responseFormat
|
||||
? {
|
||||
response_format: { type: options.responseFormat },
|
||||
}
|
||||
: undefined,
|
||||
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this, openAiFailedAttemptHandler),
|
||||
});
|
||||
|
||||
return {
|
||||
response: model,
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
<svg width="76" height="65" viewBox="0 0 76 65" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M37.5274 0L75.0548 65H0L37.5274 0Z" fill="#000000"/></svg>
|
||||
|
After Width: | Height: | Size: 162 B |
@@ -0,0 +1,6 @@
|
||||
<svg
|
||||
width="76"
|
||||
height="65"
|
||||
viewBox="0 0 76 65"
|
||||
fill="none" xmlns="http://www.w3.org/2000/svg"><path
|
||||
d="M37.5274 0L75.0548 65H0L37.5274 0Z" fill="#ffffff"/></svg>
|
||||
|
After Width: | Height: | Size: 331 B |
@@ -41,6 +41,7 @@
|
||||
"dist/credentials/QdrantApi.credentials.js",
|
||||
"dist/credentials/SearXngApi.credentials.js",
|
||||
"dist/credentials/SerpApi.credentials.js",
|
||||
"dist/credentials/VercelAiGatewayApi.credentials.js",
|
||||
"dist/credentials/WeaviateApi.credentials.js",
|
||||
"dist/credentials/WolframAlphaApi.credentials.js",
|
||||
"dist/credentials/XAiApi.credentials.js",
|
||||
@@ -85,6 +86,7 @@
|
||||
"dist/nodes/llms/LmChatMistralCloud/LmChatMistralCloud.node.js",
|
||||
"dist/nodes/llms/LMChatOllama/LmChatOllama.node.js",
|
||||
"dist/nodes/llms/LmChatOpenRouter/LmChatOpenRouter.node.js",
|
||||
"dist/nodes/llms/LmChatVercelAiGateway/LmChatVercelAiGateway.node.js",
|
||||
"dist/nodes/llms/LmChatXAiGrok/LmChatXAiGrok.node.js",
|
||||
"dist/nodes/llms/LMChatOpenAi/LmChatOpenAi.node.js",
|
||||
"dist/nodes/llms/LMOpenAi/LmOpenAi.node.js",
|
||||
|
||||
Reference in New Issue
Block a user