diff --git a/packages/@n8n/nodes-langchain/nodes/agents/Agent/Agent.node.ts b/packages/@n8n/nodes-langchain/nodes/agents/Agent/Agent.node.ts
index a8ab5e514e..d1b367dd45 100644
--- a/packages/@n8n/nodes-langchain/nodes/agents/Agent/Agent.node.ts
+++ b/packages/@n8n/nodes-langchain/nodes/agents/Agent/Agent.node.ts
@@ -45,6 +45,7 @@ export class Agent extends VersionedNodeType {
2: new AgentV2(baseDescription),
2.1: new AgentV2(baseDescription),
2.2: new AgentV2(baseDescription),
+ // IMPORTANT Reminder to update AgentTool
};
super(nodeVersions, baseDescription);
diff --git a/packages/@n8n/nodes-langchain/nodes/agents/Agent/AgentTool.node.ts b/packages/@n8n/nodes-langchain/nodes/agents/Agent/AgentTool.node.ts
new file mode 100644
index 0000000000..42e88f02d9
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/agents/Agent/AgentTool.node.ts
@@ -0,0 +1,34 @@
+import type { INodeTypeBaseDescription, IVersionedNodeType } from 'n8n-workflow';
+import { VersionedNodeType } from 'n8n-workflow';
+
+import { AgentToolV2 } from './V2/AgentToolV2.node';
+
+export class AgentTool extends VersionedNodeType {
+ constructor() {
+ const baseDescription: INodeTypeBaseDescription = {
+ displayName: 'AI Agent Tool',
+ name: 'agentTool',
+ icon: 'fa:robot',
+ iconColor: 'black',
+ group: ['transform'],
+ description: 'Generates an action plan and executes it. Can use external tools.',
+ codex: {
+ alias: ['LangChain', 'Chat', 'Conversational', 'Plan and Execute', 'ReAct', 'Tools'],
+ categories: ['AI'],
+ subcategories: {
+ AI: ['Tools'],
+ Tools: ['Other Tools'],
+ },
+ },
+ defaultVersion: 2.2,
+ };
+
+ const nodeVersions: IVersionedNodeType['nodeVersions'] = {
+ // Should have the same versioning as Agent node
+ // because internal agent logic often checks for node version
+ 2.2: new AgentToolV2(baseDescription),
+ };
+
+ super(nodeVersions, baseDescription);
+ }
+}
diff --git a/packages/@n8n/nodes-langchain/nodes/agents/Agent/V2/AgentToolV2.node.ts b/packages/@n8n/nodes-langchain/nodes/agents/Agent/V2/AgentToolV2.node.ts
new file mode 100644
index 0000000000..07884a267b
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/agents/Agent/V2/AgentToolV2.node.ts
@@ -0,0 +1,90 @@
+import { NodeConnectionTypes } from 'n8n-workflow';
+import type {
+ IExecuteFunctions,
+ INodeExecutionData,
+ INodeType,
+ INodeTypeDescription,
+ INodeTypeBaseDescription,
+ ISupplyDataFunctions,
+} from 'n8n-workflow';
+
+import { textInput, toolDescription } from '@utils/descriptions';
+
+import { getInputs } from './utils';
+import { getToolsAgentProperties } from '../agents/ToolsAgent/V2/description';
+import { toolsAgentExecute } from '../agents/ToolsAgent/V2/execute';
+
+export class AgentToolV2 implements INodeType {
+ description: INodeTypeDescription;
+ constructor(baseDescription: INodeTypeBaseDescription) {
+ this.description = {
+ ...baseDescription,
+ version: [2.2],
+ defaults: {
+ name: 'AI Agent Tool',
+ color: '#404040',
+ },
+ inputs: `={{
+ ((hasOutputParser, needsFallback) => {
+ ${getInputs.toString()};
+ return getInputs(false, hasOutputParser, needsFallback)
+ })($parameter.hasOutputParser === undefined || $parameter.hasOutputParser === true, $parameter.needsFallback !== undefined && $parameter.needsFallback === true)
+ }}`,
+ outputs: [NodeConnectionTypes.AiTool],
+ properties: [
+ toolDescription,
+ {
+ ...textInput,
+ },
+ {
+ displayName: 'Require Specific Output Format',
+ name: 'hasOutputParser',
+ type: 'boolean',
+ default: false,
+ noDataExpression: true,
+ },
+ {
+ displayName: `Connect an output parser on the canvas to specify the output format you require`,
+ name: 'notice',
+ type: 'notice',
+ default: '',
+ displayOptions: {
+ show: {
+ hasOutputParser: [true],
+ },
+ },
+ },
+ {
+ displayName: 'Enable Fallback Model',
+ name: 'needsFallback',
+ type: 'boolean',
+ default: false,
+ noDataExpression: true,
+ displayOptions: {
+ show: {
+ '@version': [{ _cnd: { gte: 2.1 } }],
+ },
+ },
+ },
+ {
+ displayName:
+ 'Connect an additional language model on the canvas to use it as a fallback if the main model fails',
+ name: 'fallbackNotice',
+ type: 'notice',
+ default: '',
+ displayOptions: {
+ show: {
+ needsFallback: [true],
+ },
+ },
+ },
+ ...getToolsAgentProperties({ withStreaming: false }),
+ ],
+ };
+ }
+
+ // Automatically wrapped as a tool
+ async execute(this: IExecuteFunctions | ISupplyDataFunctions): Promise {
+ return await toolsAgentExecute.call(this);
+ }
+}
diff --git a/packages/@n8n/nodes-langchain/nodes/agents/Agent/V2/AgentV2.node.ts b/packages/@n8n/nodes-langchain/nodes/agents/Agent/V2/AgentV2.node.ts
index 4a4097d2f1..ea956a778f 100644
--- a/packages/@n8n/nodes-langchain/nodes/agents/Agent/V2/AgentV2.node.ts
+++ b/packages/@n8n/nodes-langchain/nodes/agents/Agent/V2/AgentV2.node.ts
@@ -1,104 +1,18 @@
import { NodeConnectionTypes } from 'n8n-workflow';
import type {
- INodeInputConfiguration,
- INodeInputFilter,
IExecuteFunctions,
INodeExecutionData,
INodeType,
INodeTypeDescription,
- NodeConnectionType,
INodeTypeBaseDescription,
} from 'n8n-workflow';
import { promptTypeOptions, textFromPreviousNode, textInput } from '@utils/descriptions';
-import { toolsAgentProperties } from '../agents/ToolsAgent/V2/description';
+import { getInputs } from './utils';
+import { getToolsAgentProperties } from '../agents/ToolsAgent/V2/description';
import { toolsAgentExecute } from '../agents/ToolsAgent/V2/execute';
-// Function used in the inputs expression to figure out which inputs to
-// display based on the agent type
-function getInputs(
- hasOutputParser?: boolean,
- needsFallback?: boolean,
-): Array {
- interface SpecialInput {
- type: NodeConnectionType;
- filter?: INodeInputFilter;
- displayName: string;
- required?: boolean;
- }
-
- const getInputData = (
- inputs: SpecialInput[],
- ): Array => {
- return inputs.map(({ type, filter, displayName, required }) => {
- const input: INodeInputConfiguration = {
- type,
- displayName,
- required,
- maxConnections: ['ai_languageModel', 'ai_memory', 'ai_outputParser'].includes(type)
- ? 1
- : undefined,
- };
-
- if (filter) {
- input.filter = filter;
- }
-
- return input;
- });
- };
-
- let specialInputs: SpecialInput[] = [
- {
- type: 'ai_languageModel',
- displayName: 'Chat Model',
- required: true,
- filter: {
- excludedNodes: [
- '@n8n/n8n-nodes-langchain.lmCohere',
- '@n8n/n8n-nodes-langchain.lmOllama',
- 'n8n/n8n-nodes-langchain.lmOpenAi',
- '@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference',
- ],
- },
- },
- {
- type: 'ai_languageModel',
- displayName: 'Fallback Model',
- required: true,
- filter: {
- excludedNodes: [
- '@n8n/n8n-nodes-langchain.lmCohere',
- '@n8n/n8n-nodes-langchain.lmOllama',
- 'n8n/n8n-nodes-langchain.lmOpenAi',
- '@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference',
- ],
- },
- },
- {
- displayName: 'Memory',
- type: 'ai_memory',
- },
- {
- displayName: 'Tool',
- type: 'ai_tool',
- },
- {
- displayName: 'Output Parser',
- type: 'ai_outputParser',
- },
- ];
-
- if (hasOutputParser === false) {
- specialInputs = specialInputs.filter((input) => input.type !== 'ai_outputParser');
- }
- if (needsFallback === false) {
- specialInputs = specialInputs.filter((input) => input.displayName !== 'Fallback Model');
- }
- return ['main', ...getInputData(specialInputs)];
-}
-
export class AgentV2 implements INodeType {
description: INodeTypeDescription;
@@ -113,7 +27,7 @@ export class AgentV2 implements INodeType {
inputs: `={{
((hasOutputParser, needsFallback) => {
${getInputs.toString()};
- return getInputs(hasOutputParser, needsFallback)
+ return getInputs(true, hasOutputParser, needsFallback);
})($parameter.hasOutputParser === undefined || $parameter.hasOutputParser === true, $parameter.needsFallback !== undefined && $parameter.needsFallback === true)
}}`,
outputs: [NodeConnectionTypes.Main],
@@ -184,7 +98,7 @@ export class AgentV2 implements INodeType {
},
},
},
- ...toolsAgentProperties,
+ ...getToolsAgentProperties({ withStreaming: true }),
],
hints: [
{
diff --git a/packages/@n8n/nodes-langchain/nodes/agents/Agent/V2/utils.ts b/packages/@n8n/nodes-langchain/nodes/agents/Agent/V2/utils.ts
new file mode 100644
index 0000000000..39690f703c
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/agents/Agent/V2/utils.ts
@@ -0,0 +1,95 @@
+// Function used in the inputs expression to figure out which inputs to
+
+import {
+ type INodeInputConfiguration,
+ type INodeInputFilter,
+ type NodeConnectionType,
+} from 'n8n-workflow';
+
+// display based on the agent type
+export function getInputs(
+ hasMainInput?: boolean,
+ hasOutputParser?: boolean,
+ needsFallback?: boolean,
+): Array {
+ interface SpecialInput {
+ type: NodeConnectionType;
+ filter?: INodeInputFilter;
+ displayName: string;
+ required?: boolean;
+ }
+
+ const getInputData = (
+ inputs: SpecialInput[],
+ ): Array => {
+ return inputs.map(({ type, filter, displayName, required }) => {
+ const input: INodeInputConfiguration = {
+ type,
+ displayName,
+ required,
+ maxConnections: ['ai_languageModel', 'ai_memory', 'ai_outputParser'].includes(type)
+ ? 1
+ : undefined,
+ };
+
+ if (filter) {
+ input.filter = filter;
+ }
+
+ return input;
+ });
+ };
+
+ let specialInputs: SpecialInput[] = [
+ {
+ type: 'ai_languageModel',
+ displayName: 'Chat Model',
+ required: true,
+ filter: {
+ excludedNodes: [
+ '@n8n/n8n-nodes-langchain.lmCohere',
+ '@n8n/n8n-nodes-langchain.lmOllama',
+ 'n8n/n8n-nodes-langchain.lmOpenAi',
+ '@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference',
+ ],
+ },
+ },
+ {
+ type: 'ai_languageModel',
+ displayName: 'Fallback Model',
+ required: true,
+ filter: {
+ excludedNodes: [
+ '@n8n/n8n-nodes-langchain.lmCohere',
+ '@n8n/n8n-nodes-langchain.lmOllama',
+ 'n8n/n8n-nodes-langchain.lmOpenAi',
+ '@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference',
+ ],
+ },
+ },
+ {
+ displayName: 'Memory',
+ type: 'ai_memory',
+ },
+ {
+ displayName: 'Tool',
+ type: 'ai_tool',
+ },
+ {
+ displayName: 'Output Parser',
+ type: 'ai_outputParser',
+ },
+ ];
+
+ if (hasOutputParser === false) {
+ specialInputs = specialInputs.filter((input) => input.type !== 'ai_outputParser');
+ }
+ if (needsFallback === false) {
+ specialInputs = specialInputs.filter((input) => input.displayName !== 'Fallback Model');
+ }
+
+ // Note cannot use NodeConnectionType.Main
+ // otherwise expression won't evaluate correctly on the FE
+ const mainInputs = hasMainInput ? ['main' as NodeConnectionType] : [];
+ return [...mainInputs, ...getInputData(specialInputs)];
+}
diff --git a/packages/@n8n/nodes-langchain/nodes/agents/Agent/agents/ToolsAgent/V2/description.ts b/packages/@n8n/nodes-langchain/nodes/agents/Agent/agents/ToolsAgent/V2/description.ts
index 05612cf138..96faa94437 100644
--- a/packages/@n8n/nodes-langchain/nodes/agents/Agent/agents/ToolsAgent/V2/description.ts
+++ b/packages/@n8n/nodes-langchain/nodes/agents/Agent/agents/ToolsAgent/V2/description.ts
@@ -4,7 +4,17 @@ import { getBatchingOptionFields } from '@utils/sharedFields';
import { commonOptions } from '../options';
-export const toolsAgentProperties: INodeProperties[] = [
+const enableStreaminOption: INodeProperties = {
+ displayName: 'Enable Streaming',
+ name: 'enableStreaming',
+ type: 'boolean',
+ default: true,
+ description: 'Whether this agent will stream the response in real-time as it generates text',
+};
+
+export const getToolsAgentProperties = ({
+ withStreaming,
+}: { withStreaming: boolean }): INodeProperties[] => [
{
displayName: 'Options',
name: 'options',
@@ -14,14 +24,7 @@ export const toolsAgentProperties: INodeProperties[] = [
options: [
...commonOptions,
getBatchingOptionFields(undefined, 1),
- {
- displayName: 'Enable Streaming',
- name: 'enableStreaming',
- type: 'boolean',
- default: true,
- description:
- 'Whether this agent will stream the response in real-time as it generates text',
- },
+ ...(withStreaming ? [enableStreaminOption] : []),
],
displayOptions: {
hide: {
diff --git a/packages/@n8n/nodes-langchain/nodes/agents/Agent/agents/ToolsAgent/V2/execute.ts b/packages/@n8n/nodes-langchain/nodes/agents/Agent/agents/ToolsAgent/V2/execute.ts
index 8899f818d7..55dbf859c8 100644
--- a/packages/@n8n/nodes-langchain/nodes/agents/Agent/agents/ToolsAgent/V2/execute.ts
+++ b/packages/@n8n/nodes-langchain/nodes/agents/Agent/agents/ToolsAgent/V2/execute.ts
@@ -13,7 +13,7 @@ import type { BaseChatMemory } from 'langchain/memory';
import type { DynamicStructuredTool, Tool } from 'langchain/tools';
import omit from 'lodash/omit';
import { jsonParse, NodeOperationError, sleep } from 'n8n-workflow';
-import type { IExecuteFunctions, INodeExecutionData } from 'n8n-workflow';
+import type { IExecuteFunctions, INodeExecutionData, ISupplyDataFunctions } from 'n8n-workflow';
import assert from 'node:assert';
import { getPromptInputByType } from '@utils/helpers';
@@ -167,9 +167,13 @@ async function processEventStream(
* creates the agent, and processes each input item. The error handling for each item is also
* managed here based on the node's continueOnFail setting.
*
+ * @param this Execute context. SupplyDataContext is passed when agent is as a tool
+ *
* @returns The array of execution data for all processed items
*/
-export async function toolsAgentExecute(this: IExecuteFunctions): Promise {
+export async function toolsAgentExecute(
+ this: IExecuteFunctions | ISupplyDataFunctions,
+): Promise {
this.logger.debug('Executing Tools Agent V2');
const returnData: INodeExecutionData[] = [];
@@ -247,9 +251,14 @@ export async function toolsAgentExecute(this: IExecuteFunctions): Promise= 2.1) {
+ if (
+ 'isStreaming' in this &&
+ enableStreaming &&
+ isStreamingAvailable &&
+ this.getNode().typeVersion >= 2.1
+ ) {
const chatHistory = await memory?.chatHistory.getMessages();
const eventStream = executor.streamEvents(
{
diff --git a/packages/@n8n/nodes-langchain/nodes/agents/Agent/agents/ToolsAgent/common.ts b/packages/@n8n/nodes-langchain/nodes/agents/Agent/agents/ToolsAgent/common.ts
index 63c22ed017..0f8038c53c 100644
--- a/packages/@n8n/nodes-langchain/nodes/agents/Agent/agents/ToolsAgent/common.ts
+++ b/packages/@n8n/nodes-langchain/nodes/agents/Agent/agents/ToolsAgent/common.ts
@@ -7,7 +7,7 @@ import type { ToolsAgentAction } from 'langchain/dist/agents/tool_calling/output
import type { BaseChatMemory } from 'langchain/memory';
import { DynamicStructuredTool, type Tool } from 'langchain/tools';
import { BINARY_ENCODING, jsonParse, NodeConnectionTypes, NodeOperationError } from 'n8n-workflow';
-import type { IExecuteFunctions } from 'n8n-workflow';
+import type { IExecuteFunctions, ISupplyDataFunctions } from 'n8n-workflow';
import type { ZodObject } from 'zod';
import { z } from 'zod';
@@ -42,7 +42,7 @@ export function getOutputParserSchema(
* @returns A HumanMessage containing the binary image messages.
*/
export async function extractBinaryMessages(
- ctx: IExecuteFunctions,
+ ctx: IExecuteFunctions | ISupplyDataFunctions,
itemIndex: number,
): Promise {
const binaryData = ctx.getInputData()?.[itemIndex]?.binary ?? {};
@@ -263,7 +263,7 @@ export const getAgentStepsParser =
* @returns The validated chat model
*/
export async function getChatModel(
- ctx: IExecuteFunctions,
+ ctx: IExecuteFunctions | ISupplyDataFunctions,
index: number = 0,
): Promise {
const connectedModels = await ctx.getInputConnectionData(NodeConnectionTypes.AiLanguageModel, 0);
@@ -297,7 +297,7 @@ export async function getChatModel(
* @returns The connected memory (if any)
*/
export async function getOptionalMemory(
- ctx: IExecuteFunctions,
+ ctx: IExecuteFunctions | ISupplyDataFunctions,
): Promise {
return (await ctx.getInputConnectionData(NodeConnectionTypes.AiMemory, 0)) as
| BaseChatMemory
@@ -313,7 +313,7 @@ export async function getOptionalMemory(
* @returns The array of connected tools
*/
export async function getTools(
- ctx: IExecuteFunctions,
+ ctx: IExecuteFunctions | ISupplyDataFunctions,
outputParser?: N8nOutputParser,
): Promise> {
const tools = (await getConnectedTools(ctx, true, false)) as Array;
@@ -343,7 +343,7 @@ export async function getTools(
* @returns The array of prompt messages
*/
export async function prepareMessages(
- ctx: IExecuteFunctions,
+ ctx: IExecuteFunctions | ISupplyDataFunctions,
itemIndex: number,
options: {
systemMessage?: string;
diff --git a/packages/@n8n/nodes-langchain/nodes/agents/Agent/test/ToolsAgent/ToolsAgentV2.test.ts b/packages/@n8n/nodes-langchain/nodes/agents/Agent/test/ToolsAgent/ToolsAgentV2.test.ts
index efffac9584..9e15b9d426 100644
--- a/packages/@n8n/nodes-langchain/nodes/agents/Agent/test/ToolsAgent/ToolsAgentV2.test.ts
+++ b/packages/@n8n/nodes-langchain/nodes/agents/Agent/test/ToolsAgent/ToolsAgentV2.test.ts
@@ -2,7 +2,7 @@ import type { BaseChatModel } from '@langchain/core/language_models/chat_models'
import { mock } from 'jest-mock-extended';
import { AgentExecutor } from 'langchain/agents';
import type { Tool } from 'langchain/tools';
-import type { IExecuteFunctions, INode } from 'n8n-workflow';
+import type { ISupplyDataFunctions, IExecuteFunctions, INode } from 'n8n-workflow';
import * as helpers from '../../../../../utils/helpers';
import * as outputParserModule from '../../../../../utils/output_parsers/N8nOutputParser';
@@ -620,4 +620,60 @@ describe('toolsAgentExecute', () => {
expect(result[0][0].json.output).toBe('Regular response');
});
});
+
+ it('should process items if SupplyDataContext is passed and isStreaming is not set', async () => {
+ const mockSupplyDataContext = mock();
+
+ // @ts-expect-error isStreaming is not supported by SupplyDataFunctions, but mock object still resolves it
+ mockSupplyDataContext.isStreaming = undefined;
+
+ mockSupplyDataContext.logger = {
+ debug: jest.fn(),
+ info: jest.fn(),
+ warn: jest.fn(),
+ error: jest.fn(),
+ };
+
+ const mockNode = mock();
+ mockNode.typeVersion = 2.2; // version where streaming is supported
+ mockSupplyDataContext.getNode.mockReturnValue(mockNode);
+ mockSupplyDataContext.getInputData.mockReturnValue([{ json: { text: 'test input 1' } }]);
+
+ const mockModel = mock();
+ mockModel.bindTools = jest.fn();
+ mockModel.lc_namespace = ['chat_models'];
+ mockSupplyDataContext.getInputConnectionData.mockResolvedValue(mockModel);
+
+ const mockTools = [mock()];
+ jest.spyOn(helpers, 'getConnectedTools').mockResolvedValue(mockTools);
+
+ // Mock getNodeParameter to return default values
+ mockSupplyDataContext.getNodeParameter.mockImplementation((param, _i, defaultValue) => {
+ if (param === 'enableStreaming') return true;
+ if (param === 'text') return 'test input';
+ if (param === 'needsFallback') return false;
+ if (param === 'options.batching.batchSize') return defaultValue;
+ if (param === 'options.batching.delayBetweenBatches') return defaultValue;
+ if (param === 'options')
+ return {
+ systemMessage: 'You are a helpful assistant',
+ maxIterations: 10,
+ returnIntermediateSteps: false,
+ passthroughBinaryImages: true,
+ };
+ return defaultValue;
+ });
+
+ const mockExecutor = {
+ invoke: jest.fn().mockResolvedValueOnce({ output: { text: 'success 1' } }),
+ };
+
+ jest.spyOn(AgentExecutor, 'fromAgentAndTools').mockReturnValue(mockExecutor as any);
+
+ const result = await toolsAgentExecute.call(mockSupplyDataContext);
+
+ expect(mockExecutor.invoke).toHaveBeenCalledTimes(1);
+ expect(result[0]).toHaveLength(1);
+ expect(result[0][0].json).toEqual({ output: { text: 'success 1' } });
+ });
});
diff --git a/packages/@n8n/nodes-langchain/nodes/agents/Agent/test/utils.test.ts b/packages/@n8n/nodes-langchain/nodes/agents/Agent/test/utils.test.ts
index e685d7a1e0..8222db1bcc 100644
--- a/packages/@n8n/nodes-langchain/nodes/agents/Agent/test/utils.test.ts
+++ b/packages/@n8n/nodes-langchain/nodes/agents/Agent/test/utils.test.ts
@@ -6,6 +6,7 @@ import { z } from 'zod';
import type { ZodObjectAny } from '../../../../types/types';
import { checkForStructuredTools } from '../agents/utils';
+import { getInputs } from '../V2/utils';
describe('checkForStructuredTools', () => {
let mockNode: INode;
@@ -105,3 +106,172 @@ describe('checkForStructuredTools', () => {
});
});
});
+
+describe('getInputs', () => {
+ it('should include all inputs when no flags are set to false', () => {
+ const inputs = getInputs(true, true, true);
+ expect(inputs).toEqual([
+ 'main',
+ {
+ type: 'ai_languageModel',
+ displayName: 'Chat Model',
+ required: true,
+ maxConnections: 1,
+ filter: {
+ excludedNodes: [
+ '@n8n/n8n-nodes-langchain.lmCohere',
+ '@n8n/n8n-nodes-langchain.lmOllama',
+ 'n8n/n8n-nodes-langchain.lmOpenAi',
+ '@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference',
+ ],
+ },
+ },
+ {
+ type: 'ai_languageModel',
+ displayName: 'Fallback Model',
+ required: true,
+ maxConnections: 1,
+ filter: {
+ excludedNodes: [
+ '@n8n/n8n-nodes-langchain.lmCohere',
+ '@n8n/n8n-nodes-langchain.lmOllama',
+ 'n8n/n8n-nodes-langchain.lmOpenAi',
+ '@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference',
+ ],
+ },
+ },
+ {
+ type: 'ai_memory',
+ displayName: 'Memory',
+ maxConnections: 1,
+ },
+ {
+ type: 'ai_tool',
+ displayName: 'Tool',
+ },
+ {
+ type: 'ai_outputParser',
+ displayName: 'Output Parser',
+ maxConnections: 1,
+ },
+ ]);
+ });
+
+ it('should exclude Output Parser when hasOutputParser is false', () => {
+ const inputs = getInputs(true, false, true);
+ expect(inputs).toEqual([
+ 'main',
+ {
+ type: 'ai_languageModel',
+ displayName: 'Chat Model',
+ required: true,
+ maxConnections: 1,
+ filter: {
+ excludedNodes: [
+ '@n8n/n8n-nodes-langchain.lmCohere',
+ '@n8n/n8n-nodes-langchain.lmOllama',
+ 'n8n/n8n-nodes-langchain.lmOpenAi',
+ '@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference',
+ ],
+ },
+ },
+ {
+ type: 'ai_languageModel',
+ displayName: 'Fallback Model',
+ required: true,
+ maxConnections: 1,
+ filter: {
+ excludedNodes: [
+ '@n8n/n8n-nodes-langchain.lmCohere',
+ '@n8n/n8n-nodes-langchain.lmOllama',
+ 'n8n/n8n-nodes-langchain.lmOpenAi',
+ '@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference',
+ ],
+ },
+ },
+ {
+ type: 'ai_memory',
+ displayName: 'Memory',
+ maxConnections: 1,
+ },
+ {
+ type: 'ai_tool',
+ displayName: 'Tool',
+ },
+ ]);
+ });
+
+ it('should exclude Fallback Model when needsFallback is false', () => {
+ const inputs = getInputs(true, true, false);
+ expect(inputs).toEqual([
+ 'main',
+ {
+ type: 'ai_languageModel',
+ displayName: 'Chat Model',
+ required: true,
+ maxConnections: 1,
+ filter: {
+ excludedNodes: [
+ '@n8n/n8n-nodes-langchain.lmCohere',
+ '@n8n/n8n-nodes-langchain.lmOllama',
+ 'n8n/n8n-nodes-langchain.lmOpenAi',
+ '@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference',
+ ],
+ },
+ },
+ {
+ type: 'ai_memory',
+ displayName: 'Memory',
+ maxConnections: 1,
+ },
+ {
+ type: 'ai_tool',
+ displayName: 'Tool',
+ },
+ {
+ type: 'ai_outputParser',
+ displayName: 'Output Parser',
+ maxConnections: 1,
+ },
+ ]);
+ });
+
+ it('should include main input when hasMainInput is true', () => {
+ const inputs = getInputs(true, true, true);
+ expect(inputs[0]).toBe('main');
+ });
+
+ it('should exclude main input when hasMainInput is false', () => {
+ const inputs = getInputs(false, true, true);
+ expect(inputs).not.toContain('main');
+ });
+
+ it('should handle all flags set to false', () => {
+ const inputs = getInputs(false, false, false);
+ expect(inputs).toEqual([
+ {
+ type: 'ai_languageModel',
+ displayName: 'Chat Model',
+ required: true,
+ maxConnections: 1,
+ filter: {
+ excludedNodes: [
+ '@n8n/n8n-nodes-langchain.lmCohere',
+ '@n8n/n8n-nodes-langchain.lmOllama',
+ 'n8n/n8n-nodes-langchain.lmOpenAi',
+ '@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference',
+ ],
+ },
+ },
+ {
+ type: 'ai_memory',
+ displayName: 'Memory',
+ maxConnections: 1,
+ },
+ {
+ type: 'ai_tool',
+ displayName: 'Tool',
+ },
+ ]);
+ });
+});
diff --git a/packages/@n8n/nodes-langchain/package.json b/packages/@n8n/nodes-langchain/package.json
index a84aa2bb07..ede0aa2499 100644
--- a/packages/@n8n/nodes-langchain/package.json
+++ b/packages/@n8n/nodes-langchain/package.json
@@ -50,6 +50,7 @@
"nodes": [
"dist/nodes/vendors/OpenAi/OpenAi.node.js",
"dist/nodes/agents/Agent/Agent.node.js",
+ "dist/nodes/agents/Agent/AgentTool.node.js",
"dist/nodes/agents/OpenAiAssistant/OpenAiAssistant.node.js",
"dist/nodes/chains/ChainSummarization/ChainSummarization.node.js",
"dist/nodes/chains/ChainLLM/ChainLlm.node.js",
diff --git a/packages/@n8n/nodes-langchain/utils/descriptions.ts b/packages/@n8n/nodes-langchain/utils/descriptions.ts
index 1c429e9533..9d235ff6b9 100644
--- a/packages/@n8n/nodes-langchain/utils/descriptions.ts
+++ b/packages/@n8n/nodes-langchain/utils/descriptions.ts
@@ -143,3 +143,14 @@ export const textFromPreviousNode: INodeProperties = {
},
disabledOptions: { show: { promptType: ['auto'] } },
};
+
+export const toolDescription: INodeProperties = {
+ displayName: 'Description',
+ name: 'toolDescription',
+ type: 'string',
+ default: 'AI Agent that can call other tools',
+ required: true,
+ typeOptions: { rows: 2 },
+ description:
+ 'Explain to the LLM what this tool does, a good, specific description would allow LLMs to produce expected results much more often',
+};
diff --git a/packages/@n8n/nodes-langchain/utils/helpers.ts b/packages/@n8n/nodes-langchain/utils/helpers.ts
index e182c8c73d..ecc531d070 100644
--- a/packages/@n8n/nodes-langchain/utils/helpers.ts
+++ b/packages/@n8n/nodes-langchain/utils/helpers.ts
@@ -75,16 +75,16 @@ export function isToolsInstance(model: unknown): model is Tool {
}
export function getPromptInputByType(options: {
- ctx: IExecuteFunctions;
+ ctx: IExecuteFunctions | ISupplyDataFunctions;
i: number;
promptTypeKey: string;
inputKey: string;
}) {
const { ctx, i, promptTypeKey, inputKey } = options;
- const prompt = ctx.getNodeParameter(promptTypeKey, i) as string;
+ const promptType = ctx.getNodeParameter(promptTypeKey, i, 'define') as string;
let input;
- if (prompt === 'auto') {
+ if (promptType === 'auto') {
input = ctx.evaluateExpression('{{ $json["chatInput"] }}', i) as string;
} else {
input = ctx.getNodeParameter(inputKey, i) as string;
@@ -186,7 +186,7 @@ export function escapeSingleCurlyBrackets(text?: string): string | undefined {
}
export const getConnectedTools = async (
- ctx: IExecuteFunctions | IWebhookFunctions,
+ ctx: IExecuteFunctions | IWebhookFunctions | ISupplyDataFunctions,
enforceUniqueNames: boolean,
convertStructuredTool: boolean = true,
escapeCurlyBrackets: boolean = false,
diff --git a/packages/@n8n/nodes-langchain/utils/output_parsers/N8nOutputParser.ts b/packages/@n8n/nodes-langchain/utils/output_parsers/N8nOutputParser.ts
index 633ce19386..733406919e 100644
--- a/packages/@n8n/nodes-langchain/utils/output_parsers/N8nOutputParser.ts
+++ b/packages/@n8n/nodes-langchain/utils/output_parsers/N8nOutputParser.ts
@@ -1,4 +1,4 @@
-import type { IExecuteFunctions } from 'n8n-workflow';
+import type { IExecuteFunctions, ISupplyDataFunctions } from 'n8n-workflow';
import { NodeConnectionTypes } from 'n8n-workflow';
import { N8nItemListOutputParser } from './N8nItemListOutputParser';
@@ -13,7 +13,7 @@ export type N8nOutputParser =
export { N8nOutputFixingParser, N8nItemListOutputParser, N8nStructuredOutputParser };
export async function getOptionalOutputParser(
- ctx: IExecuteFunctions,
+ ctx: IExecuteFunctions | ISupplyDataFunctions,
index: number = 0,
): Promise {
let outputParser: N8nOutputParser | undefined;