chore: Update LangChain dependencies (no-changelog) (#16523)

This commit is contained in:
Eugene
2025-06-20 11:50:14 +02:00
committed by GitHub
parent 57911225e7
commit 67852b826f
21 changed files with 644 additions and 551 deletions

View File

@@ -94,7 +94,8 @@
"typescript": "^5.8.2", "typescript": "^5.8.2",
"vue-tsc": "^2.2.8", "vue-tsc": "^2.2.8",
"google-gax": "^4.3.7", "google-gax": "^4.3.7",
"ws": ">=8.17.1" "ws": ">=8.17.1",
"zod": "3.25.67"
}, },
"patchedDependencies": { "patchedDependencies": {
"bull@4.16.4": "patches/bull@4.16.4.patch", "bull@4.16.4": "patches/bull@4.16.4.patch",

View File

@@ -7,7 +7,7 @@ type LLMConfig = {
export const o4mini = async (config: LLMConfig) => { export const o4mini = async (config: LLMConfig) => {
const { ChatOpenAI } = await import('@langchain/openai'); const { ChatOpenAI } = await import('@langchain/openai');
return new ChatOpenAI({ return new ChatOpenAI({
modelName: 'o4-mini-2025-04-16', model: 'o4-mini-2025-04-16',
apiKey: config.apiKey, apiKey: config.apiKey,
configuration: { configuration: {
baseURL: config.baseUrl, baseURL: config.baseUrl,
@@ -19,7 +19,7 @@ export const o4mini = async (config: LLMConfig) => {
export const gpt41mini = async (config: LLMConfig) => { export const gpt41mini = async (config: LLMConfig) => {
const { ChatOpenAI } = await import('@langchain/openai'); const { ChatOpenAI } = await import('@langchain/openai');
return new ChatOpenAI({ return new ChatOpenAI({
modelName: 'gpt-4.1-mini-2025-04-14', model: 'gpt-4.1-mini-2025-04-14',
apiKey: config.apiKey, apiKey: config.apiKey,
temperature: 0, temperature: 0,
configuration: { configuration: {
@@ -32,7 +32,7 @@ export const gpt41mini = async (config: LLMConfig) => {
export const anthropicClaude37Sonnet = async (config: LLMConfig) => { export const anthropicClaude37Sonnet = async (config: LLMConfig) => {
const { ChatAnthropic } = await import('@langchain/anthropic'); const { ChatAnthropic } = await import('@langchain/anthropic');
return new ChatAnthropic({ return new ChatAnthropic({
modelName: 'claude-3-7-sonnet-20250219', model: 'claude-3-7-sonnet-20250219',
apiKey: config.apiKey, apiKey: config.apiKey,
temperature: 0, temperature: 0,
maxTokens: 16000, maxTokens: 16000,

View File

@@ -1,9 +1,8 @@
import type { BaseOutputParser } from '@langchain/core/output_parsers'; import type { BaseOutputParser } from '@langchain/core/output_parsers';
import type { DynamicStructuredTool, Tool } from 'langchain/tools'; import type { DynamicStructuredTool, Tool } from 'langchain/tools';
import { NodeOperationError, type IExecuteFunctions, type INode } from 'n8n-workflow'; import { NodeOperationError, type IExecuteFunctions, type INode } from 'n8n-workflow';
import type { z } from 'zod';
type ZodObjectAny = z.ZodObject<any, any, any, any>; import type { ZodObjectAny } from '../../../../types/types';
export async function extractParsedOutput( export async function extractParsedOutput(
ctx: IExecuteFunctions, ctx: IExecuteFunctions,

View File

@@ -4,6 +4,7 @@ import { NodeOperationError } from 'n8n-workflow';
import type { INode } from 'n8n-workflow'; import type { INode } from 'n8n-workflow';
import { z } from 'zod'; import { z } from 'zod';
import type { ZodObjectAny } from '../../../../types/types';
import { checkForStructuredTools } from '../agents/utils'; import { checkForStructuredTools } from '../agents/utils';
describe('checkForStructuredTools', () => { describe('checkForStructuredTools', () => {
@@ -41,7 +42,7 @@ describe('checkForStructuredTools', () => {
func: async () => 'result', func: async () => 'result',
}); });
const tools: Array<Tool | DynamicStructuredTool> = [dynamicTool]; const tools: Array<Tool | DynamicStructuredTool<ZodObjectAny>> = [dynamicTool];
await expect(checkForStructuredTools(tools, mockNode, 'Conversation Agent')).rejects.toThrow( await expect(checkForStructuredTools(tools, mockNode, 'Conversation Agent')).rejects.toThrow(
NodeOperationError, NodeOperationError,

View File

@@ -189,7 +189,11 @@ describe('imageUtils', () => {
it('should handle image data differently for GoogleGenerativeAI models', async () => { it('should handle image data differently for GoogleGenerativeAI models', async () => {
// Mock a Google model - using our mocked class // Mock a Google model - using our mocked class
mockContext.getInputConnectionData.mockResolvedValue(new ChatGoogleGenerativeAI()); mockContext.getInputConnectionData.mockResolvedValue(
new ChatGoogleGenerativeAI({
model: 'gemini-1.0-pro',
}),
);
const message: MessageTemplate = { const message: MessageTemplate = {
type: 'HumanMessagePromptTemplate', type: 'HumanMessagePromptTemplate',

View File

@@ -126,7 +126,7 @@ export class EmbeddingsGoogleGemini implements INodeType {
const credentials = await this.getCredentials('googlePalmApi'); const credentials = await this.getCredentials('googlePalmApi');
const embeddings = new GoogleGenerativeAIEmbeddings({ const embeddings = new GoogleGenerativeAIEmbeddings({
apiKey: credentials.apiKey as string, apiKey: credentials.apiKey as string,
modelName, model: modelName,
}); });
return { return {

View File

@@ -146,7 +146,7 @@ export class EmbeddingsMistralCloud implements INodeType {
const embeddings = new MistralAIEmbeddings({ const embeddings = new MistralAIEmbeddings({
apiKey: credentials.apiKey as string, apiKey: credentials.apiKey as string,
modelName, model: modelName,
...options, ...options,
}); });

View File

@@ -321,7 +321,7 @@ export class LmChatAnthropic implements INodeType {
const model = new ChatAnthropic({ const model = new ChatAnthropic({
anthropicApiKey: credentials.apiKey, anthropicApiKey: credentials.apiKey,
modelName, model: modelName,
anthropicApiUrl: baseURL, anthropicApiUrl: baseURL,
maxTokens: options.maxTokensToSample, maxTokens: options.maxTokensToSample,
temperature: options.temperature, temperature: options.temperature,

View File

@@ -367,7 +367,7 @@ export class LmChatOpenAi implements INodeType {
const model = new ChatOpenAI({ const model = new ChatOpenAI({
openAIApiKey: credentials.apiKey as string, openAIApiKey: credentials.apiKey as string,
modelName, model: modelName,
...options, ...options,
timeout: options.timeout ?? 60000, timeout: options.timeout ?? 60000,
maxRetries: options.maxRetries ?? 2, maxRetries: options.maxRetries ?? 2,

View File

@@ -259,7 +259,7 @@ export class LmOpenAi implements INodeType {
const model = new OpenAI({ const model = new OpenAI({
openAIApiKey: credentials.apiKey as string, openAIApiKey: credentials.apiKey as string,
modelName, model: modelName,
...options, ...options,
configuration, configuration,
timeout: options.timeout ?? 60000, timeout: options.timeout ?? 60000,

View File

@@ -12,6 +12,7 @@ import {
import { getHttpProxyAgent } from '@utils/httpProxyAgent'; import { getHttpProxyAgent } from '@utils/httpProxyAgent';
import { getConnectionHintNoticeField } from '@utils/sharedFields'; import { getConnectionHintNoticeField } from '@utils/sharedFields';
import type { OpenAICompatibleCredential } from '../../../types/types';
import { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling'; import { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler'; import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
import { N8nLlmTracing } from '../N8nLlmTracing'; import { N8nLlmTracing } from '../N8nLlmTracing';
@@ -234,7 +235,7 @@ export class LmChatDeepSeek implements INodeType {
const model = new ChatOpenAI({ const model = new ChatOpenAI({
openAIApiKey: credentials.apiKey, openAIApiKey: credentials.apiKey,
modelName, model: modelName,
...options, ...options,
timeout: options.timeout ?? 60000, timeout: options.timeout ?? 60000,
maxRetries: options.maxRetries ?? 2, maxRetries: options.maxRetries ?? 2,

View File

@@ -149,7 +149,7 @@ export class LmChatGoogleGemini implements INodeType {
const model = new ChatGoogleGenerativeAI({ const model = new ChatGoogleGenerativeAI({
apiKey: credentials.apiKey as string, apiKey: credentials.apiKey as string,
baseUrl: credentials.host as string, baseUrl: credentials.host as string,
modelName, model: modelName,
topK: options.topK, topK: options.topK,
topP: options.topP, topP: options.topP,
temperature: options.temperature, temperature: options.temperature,

View File

@@ -143,7 +143,7 @@ export class LmChatGroq implements INodeType {
const model = new ChatGroq({ const model = new ChatGroq({
apiKey: credentials.apiKey as string, apiKey: credentials.apiKey as string,
modelName, model: modelName,
maxTokens: options.maxTokensToSample, maxTokens: options.maxTokensToSample,
temperature: options.temperature, temperature: options.temperature,
callbacks: [new N8nLlmTracing(this)], callbacks: [new N8nLlmTracing(this)],

View File

@@ -190,7 +190,7 @@ export class LmChatMistralCloud implements INodeType {
const model = new ChatMistralAI({ const model = new ChatMistralAI({
apiKey: credentials.apiKey as string, apiKey: credentials.apiKey as string,
modelName, model: modelName,
...options, ...options,
callbacks: [new N8nLlmTracing(this)], callbacks: [new N8nLlmTracing(this)],
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this), onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),

View File

@@ -12,6 +12,7 @@ import {
import { getHttpProxyAgent } from '@utils/httpProxyAgent'; import { getHttpProxyAgent } from '@utils/httpProxyAgent';
import { getConnectionHintNoticeField } from '@utils/sharedFields'; import { getConnectionHintNoticeField } from '@utils/sharedFields';
import type { OpenAICompatibleCredential } from '../../../types/types';
import { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling'; import { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler'; import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
import { N8nLlmTracing } from '../N8nLlmTracing'; import { N8nLlmTracing } from '../N8nLlmTracing';
@@ -233,7 +234,7 @@ export class LmChatOpenRouter implements INodeType {
const model = new ChatOpenAI({ const model = new ChatOpenAI({
openAIApiKey: credentials.apiKey, openAIApiKey: credentials.apiKey,
modelName, model: modelName,
...options, ...options,
timeout: options.timeout ?? 60000, timeout: options.timeout ?? 60000,
maxRetries: options.maxRetries ?? 2, maxRetries: options.maxRetries ?? 2,

View File

@@ -12,6 +12,7 @@ import {
import { getHttpProxyAgent } from '@utils/httpProxyAgent'; import { getHttpProxyAgent } from '@utils/httpProxyAgent';
import { getConnectionHintNoticeField } from '@utils/sharedFields'; import { getConnectionHintNoticeField } from '@utils/sharedFields';
import type { OpenAICompatibleCredential } from '../../../types/types';
import { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling'; import { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler'; import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
import { N8nLlmTracing } from '../N8nLlmTracing'; import { N8nLlmTracing } from '../N8nLlmTracing';
@@ -234,7 +235,7 @@ export class LmChatXAiGrok implements INodeType {
const model = new ChatOpenAI({ const model = new ChatOpenAI({
openAIApiKey: credentials.apiKey, openAIApiKey: credentials.apiKey,
modelName, model: modelName,
...options, ...options,
timeout: options.timeout ?? 60000, timeout: options.timeout ?? 60000,
maxRetries: options.maxRetries ?? 2, maxRetries: options.maxRetries ?? 2,

View File

@@ -160,20 +160,20 @@
"@google/generative-ai": "0.21.0", "@google/generative-ai": "0.21.0",
"@huggingface/inference": "2.8.0", "@huggingface/inference": "2.8.0",
"@langchain/anthropic": "catalog:", "@langchain/anthropic": "catalog:",
"@langchain/aws": "0.1.10", "@langchain/aws": "0.1.11",
"@langchain/cohere": "0.3.2", "@langchain/cohere": "0.3.4",
"@langchain/community": "catalog:", "@langchain/community": "catalog:",
"@langchain/core": "catalog:", "@langchain/core": "catalog:",
"@langchain/google-genai": "0.1.6", "@langchain/google-genai": "0.2.13",
"@langchain/google-vertexai": "0.1.8", "@langchain/google-vertexai": "0.2.13",
"@langchain/groq": "0.1.3", "@langchain/groq": "0.2.3",
"@langchain/mistralai": "0.2.0", "@langchain/mistralai": "0.2.1",
"@langchain/mongodb": "^0.1.0", "@langchain/mongodb": "^0.1.0",
"@langchain/ollama": "0.1.4", "@langchain/ollama": "0.2.2",
"@langchain/openai": "catalog:", "@langchain/openai": "catalog:",
"@langchain/pinecone": "0.1.3", "@langchain/pinecone": "0.2.0",
"@langchain/qdrant": "0.1.2", "@langchain/qdrant": "0.1.2",
"@langchain/redis": "0.1.0", "@langchain/redis": "0.1.1",
"@langchain/textsplitters": "0.1.0", "@langchain/textsplitters": "0.1.0",
"@modelcontextprotocol/sdk": "1.12.0", "@modelcontextprotocol/sdk": "1.12.0",
"@mozilla/readability": "0.6.0", "@mozilla/readability": "0.6.0",
@@ -182,7 +182,7 @@
"@n8n/typeorm": "0.3.20-12", "@n8n/typeorm": "0.3.20-12",
"@n8n/typescript-config": "workspace:*", "@n8n/typescript-config": "workspace:*",
"@n8n/vm2": "3.9.25", "@n8n/vm2": "3.9.25",
"@pinecone-database/pinecone": "4.0.0", "@pinecone-database/pinecone": "^5.0.2",
"@qdrant/js-client-rest": "1.14.1", "@qdrant/js-client-rest": "1.14.1",
"@supabase/supabase-js": "2.49.9", "@supabase/supabase-js": "2.49.9",
"@xata.io/client": "0.28.4", "@xata.io/client": "0.28.4",
@@ -197,14 +197,14 @@
"html-to-text": "9.0.5", "html-to-text": "9.0.5",
"https-proxy-agent": "catalog:", "https-proxy-agent": "catalog:",
"jsdom": "23.0.1", "jsdom": "23.0.1",
"langchain": "0.3.11", "langchain": "0.3.28",
"lodash": "catalog:", "lodash": "catalog:",
"mammoth": "1.7.2", "mammoth": "1.7.2",
"mime-types": "2.1.35", "mime-types": "2.1.35",
"mongodb": "6.11.0", "mongodb": "6.11.0",
"n8n-nodes-base": "workspace:*", "n8n-nodes-base": "workspace:*",
"n8n-workflow": "workspace:*", "n8n-workflow": "workspace:*",
"openai": "4.78.1", "openai": "4.103.0",
"pdf-parse": "1.1.1", "pdf-parse": "1.1.1",
"pg": "8.12.0", "pg": "8.12.0",
"redis": "4.6.12", "redis": "4.6.12",

View File

@@ -1 +1,5 @@
type OpenAICompatibleCredential = { apiKey: string; url: string }; import type { z } from 'zod';
export type OpenAICompatibleCredential = { apiKey: string; url: string };
export type ZodObjectAny = z.ZodObject<any, any, any, any>;

View File

@@ -6,6 +6,8 @@ import { NodeConnectionTypes, jsonParse, NodeOperationError } from 'n8n-workflow
import type { ZodTypeAny } from 'zod'; import type { ZodTypeAny } from 'zod';
import { ZodBoolean, ZodNullable, ZodNumber, ZodObject, ZodOptional } from 'zod'; import { ZodBoolean, ZodNullable, ZodNumber, ZodObject, ZodOptional } from 'zod';
import type { ZodObjectAny } from '../types/types';
const getSimplifiedType = (schema: ZodTypeAny) => { const getSimplifiedType = (schema: ZodTypeAny) => {
if (schema instanceof ZodObject) { if (schema instanceof ZodObject) {
return 'object'; return 'object';
@@ -44,10 +46,10 @@ ALL parameters marked as required must be provided`;
return description; return description;
}; };
export class N8nTool extends DynamicStructuredTool { export class N8nTool extends DynamicStructuredTool<ZodObjectAny> {
constructor( constructor(
private context: ISupplyDataFunctions, private context: ISupplyDataFunctions,
fields: DynamicStructuredToolInput, fields: DynamicStructuredToolInput<ZodObjectAny>,
) { ) {
super(fields); super(fields);
} }

1103
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@@ -37,12 +37,12 @@ catalog:
uuid: 10.0.0 uuid: 10.0.0
xml2js: 0.6.2 xml2js: 0.6.2
xss: 1.0.15 xss: 1.0.15
zod: 3.24.1 zod: 3.25.67
zod-to-json-schema: 3.23.3 zod-to-json-schema: 3.23.3
'@langchain/core': 0.3.48 '@langchain/core': 0.3.59
'@langchain/openai': 0.5.0 '@langchain/openai': 0.5.13
'@langchain/anthropic': 0.3.21 '@langchain/anthropic': 0.3.22
'@langchain/community': 0.3.24 '@langchain/community': 0.3.46
'@n8n_io/ai-assistant-sdk': 1.14.0 '@n8n_io/ai-assistant-sdk': 1.14.0
catalogs: catalogs: