mirror of
https://github.com/Abdulazizzn/n8n-enterprise-unlocked.git
synced 2025-12-16 17:46:45 +00:00
feat: Add HTTP proxy for supported LLM nodes (#15449)
This commit is contained in:
@@ -12,6 +12,7 @@ import {
|
||||
type SupplyData,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { getHttpProxyAgent } from '@utils/httpProxyAgent';
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
|
||||
import { searchModels } from './methods/searchModels';
|
||||
@@ -329,6 +330,9 @@ export class LmChatAnthropic implements INodeType {
|
||||
callbacks: [new N8nLlmTracing(this, { tokensUsageParser })],
|
||||
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
|
||||
invocationKwargs,
|
||||
clientOptions: {
|
||||
httpAgent: getHttpProxyAgent(),
|
||||
},
|
||||
});
|
||||
|
||||
return {
|
||||
|
||||
@@ -9,6 +9,7 @@ import {
|
||||
type SupplyData,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { getHttpProxyAgent } from '@utils/httpProxyAgent';
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
|
||||
import { searchModels } from './methods/loadModels';
|
||||
@@ -346,7 +347,9 @@ export class LmChatOpenAi implements INodeType {
|
||||
reasoningEffort?: 'low' | 'medium' | 'high';
|
||||
};
|
||||
|
||||
const configuration: ClientOptions = {};
|
||||
const configuration: ClientOptions = {
|
||||
httpAgent: getHttpProxyAgent(),
|
||||
};
|
||||
if (options.baseURL) {
|
||||
configuration.baseURL = options.baseURL;
|
||||
} else if (credentials.url) {
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import type { ILoadOptionsFunctions, INodeListSearchResult } from 'n8n-workflow';
|
||||
import OpenAI from 'openai';
|
||||
|
||||
import { getHttpProxyAgent } from '@utils/httpProxyAgent';
|
||||
|
||||
export async function searchModels(
|
||||
this: ILoadOptionsFunctions,
|
||||
filter?: string,
|
||||
@@ -11,7 +13,11 @@ export async function searchModels(
|
||||
(credentials.url as string) ||
|
||||
'https://api.openai.com/v1';
|
||||
|
||||
const openai = new OpenAI({ baseURL, apiKey: credentials.apiKey as string });
|
||||
const openai = new OpenAI({
|
||||
baseURL,
|
||||
apiKey: credentials.apiKey as string,
|
||||
httpAgent: getHttpProxyAgent(),
|
||||
});
|
||||
const { data: models = [] } = await openai.models.list();
|
||||
|
||||
const filteredModels = models.filter((model: { id: string }) => {
|
||||
|
||||
@@ -9,6 +9,8 @@ import type {
|
||||
ILoadOptionsFunctions,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { getHttpProxyAgent } from '@utils/httpProxyAgent';
|
||||
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
|
||||
@@ -248,7 +250,9 @@ export class LmOpenAi implements INodeType {
|
||||
topP?: number;
|
||||
};
|
||||
|
||||
const configuration: ClientOptions = {};
|
||||
const configuration: ClientOptions = {
|
||||
httpAgent: getHttpProxyAgent(),
|
||||
};
|
||||
if (options.baseURL) {
|
||||
configuration.baseURL = options.baseURL;
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import {
|
||||
type SupplyData,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { getHttpProxyAgent } from '@utils/httpProxyAgent';
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
@@ -147,6 +148,9 @@ export class LmChatAwsBedrock implements INodeType {
|
||||
model: modelName,
|
||||
temperature: options.temperature,
|
||||
maxTokens: options.maxTokensToSample,
|
||||
clientConfig: {
|
||||
httpAgent: getHttpProxyAgent(),
|
||||
},
|
||||
credentials: {
|
||||
secretAccessKey: credentials.secretAccessKey as string,
|
||||
accessKeyId: credentials.accessKeyId as string,
|
||||
|
||||
@@ -10,6 +10,8 @@ import {
|
||||
type SupplyData,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { getHttpProxyAgent } from '@utils/httpProxyAgent';
|
||||
|
||||
import { setupApiKeyAuthentication } from './credentials/api-key';
|
||||
import { setupOAuth2Authentication } from './credentials/oauth2';
|
||||
import { properties } from './properties';
|
||||
@@ -111,6 +113,9 @@ export class LmChatAzureOpenAi implements INodeType {
|
||||
timeout: options.timeout ?? 60000,
|
||||
maxRetries: options.maxRetries ?? 2,
|
||||
callbacks: [new N8nLlmTracing(this)],
|
||||
configuration: {
|
||||
httpAgent: getHttpProxyAgent(),
|
||||
},
|
||||
modelKwargs: options.responseFormat
|
||||
? {
|
||||
response_format: { type: options.responseFormat },
|
||||
|
||||
@@ -9,6 +9,7 @@ import {
|
||||
type SupplyData,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { getHttpProxyAgent } from '@utils/httpProxyAgent';
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
|
||||
import { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';
|
||||
@@ -228,6 +229,7 @@ export class LmChatDeepSeek implements INodeType {
|
||||
|
||||
const configuration: ClientOptions = {
|
||||
baseURL: credentials.url,
|
||||
httpAgent: getHttpProxyAgent(),
|
||||
};
|
||||
|
||||
const model = new ChatOpenAI({
|
||||
|
||||
@@ -8,6 +8,7 @@ import {
|
||||
type SupplyData,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { getHttpProxyAgent } from '@utils/httpProxyAgent';
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
@@ -146,6 +147,7 @@ export class LmChatGroq implements INodeType {
|
||||
maxTokens: options.maxTokensToSample,
|
||||
temperature: options.temperature,
|
||||
callbacks: [new N8nLlmTracing(this)],
|
||||
httpAgent: getHttpProxyAgent(),
|
||||
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
|
||||
});
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import {
|
||||
type SupplyData,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { getHttpProxyAgent } from '@utils/httpProxyAgent';
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
|
||||
import { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';
|
||||
@@ -227,6 +228,7 @@ export class LmChatOpenRouter implements INodeType {
|
||||
|
||||
const configuration: ClientOptions = {
|
||||
baseURL: credentials.url,
|
||||
httpAgent: getHttpProxyAgent(),
|
||||
};
|
||||
|
||||
const model = new ChatOpenAI({
|
||||
|
||||
@@ -9,6 +9,7 @@ import {
|
||||
type SupplyData,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { getHttpProxyAgent } from '@utils/httpProxyAgent';
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
|
||||
import { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';
|
||||
@@ -228,6 +229,7 @@ export class LmChatXAiGrok implements INodeType {
|
||||
|
||||
const configuration: ClientOptions = {
|
||||
baseURL: credentials.url,
|
||||
httpAgent: getHttpProxyAgent(),
|
||||
};
|
||||
|
||||
const model = new ChatOpenAI({
|
||||
|
||||
Reference in New Issue
Block a user