mirror of
https://github.com/Abdulazizzn/n8n-enterprise-unlocked.git
synced 2025-12-16 09:36:44 +00:00
feat(AI Agent Tool Node): Add Agent Tool (#17108)
This commit is contained in:
@@ -45,6 +45,7 @@ export class Agent extends VersionedNodeType {
|
||||
2: new AgentV2(baseDescription),
|
||||
2.1: new AgentV2(baseDescription),
|
||||
2.2: new AgentV2(baseDescription),
|
||||
// IMPORTANT Reminder to update AgentTool
|
||||
};
|
||||
|
||||
super(nodeVersions, baseDescription);
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
import type { INodeTypeBaseDescription, IVersionedNodeType } from 'n8n-workflow';
|
||||
import { VersionedNodeType } from 'n8n-workflow';
|
||||
|
||||
import { AgentToolV2 } from './V2/AgentToolV2.node';
|
||||
|
||||
export class AgentTool extends VersionedNodeType {
|
||||
constructor() {
|
||||
const baseDescription: INodeTypeBaseDescription = {
|
||||
displayName: 'AI Agent Tool',
|
||||
name: 'agentTool',
|
||||
icon: 'fa:robot',
|
||||
iconColor: 'black',
|
||||
group: ['transform'],
|
||||
description: 'Generates an action plan and executes it. Can use external tools.',
|
||||
codex: {
|
||||
alias: ['LangChain', 'Chat', 'Conversational', 'Plan and Execute', 'ReAct', 'Tools'],
|
||||
categories: ['AI'],
|
||||
subcategories: {
|
||||
AI: ['Tools'],
|
||||
Tools: ['Other Tools'],
|
||||
},
|
||||
},
|
||||
defaultVersion: 2.2,
|
||||
};
|
||||
|
||||
const nodeVersions: IVersionedNodeType['nodeVersions'] = {
|
||||
// Should have the same versioning as Agent node
|
||||
// because internal agent logic often checks for node version
|
||||
2.2: new AgentToolV2(baseDescription),
|
||||
};
|
||||
|
||||
super(nodeVersions, baseDescription);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,90 @@
|
||||
import { NodeConnectionTypes } from 'n8n-workflow';
|
||||
import type {
|
||||
IExecuteFunctions,
|
||||
INodeExecutionData,
|
||||
INodeType,
|
||||
INodeTypeDescription,
|
||||
INodeTypeBaseDescription,
|
||||
ISupplyDataFunctions,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { textInput, toolDescription } from '@utils/descriptions';
|
||||
|
||||
import { getInputs } from './utils';
|
||||
import { getToolsAgentProperties } from '../agents/ToolsAgent/V2/description';
|
||||
import { toolsAgentExecute } from '../agents/ToolsAgent/V2/execute';
|
||||
|
||||
export class AgentToolV2 implements INodeType {
|
||||
description: INodeTypeDescription;
|
||||
constructor(baseDescription: INodeTypeBaseDescription) {
|
||||
this.description = {
|
||||
...baseDescription,
|
||||
version: [2.2],
|
||||
defaults: {
|
||||
name: 'AI Agent Tool',
|
||||
color: '#404040',
|
||||
},
|
||||
inputs: `={{
|
||||
((hasOutputParser, needsFallback) => {
|
||||
${getInputs.toString()};
|
||||
return getInputs(false, hasOutputParser, needsFallback)
|
||||
})($parameter.hasOutputParser === undefined || $parameter.hasOutputParser === true, $parameter.needsFallback !== undefined && $parameter.needsFallback === true)
|
||||
}}`,
|
||||
outputs: [NodeConnectionTypes.AiTool],
|
||||
properties: [
|
||||
toolDescription,
|
||||
{
|
||||
...textInput,
|
||||
},
|
||||
{
|
||||
displayName: 'Require Specific Output Format',
|
||||
name: 'hasOutputParser',
|
||||
type: 'boolean',
|
||||
default: false,
|
||||
noDataExpression: true,
|
||||
},
|
||||
{
|
||||
displayName: `Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='${NodeConnectionTypes.AiOutputParser}'>output parser</a> on the canvas to specify the output format you require`,
|
||||
name: 'notice',
|
||||
type: 'notice',
|
||||
default: '',
|
||||
displayOptions: {
|
||||
show: {
|
||||
hasOutputParser: [true],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
displayName: 'Enable Fallback Model',
|
||||
name: 'needsFallback',
|
||||
type: 'boolean',
|
||||
default: false,
|
||||
noDataExpression: true,
|
||||
displayOptions: {
|
||||
show: {
|
||||
'@version': [{ _cnd: { gte: 2.1 } }],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
displayName:
|
||||
'Connect an additional language model on the canvas to use it as a fallback if the main model fails',
|
||||
name: 'fallbackNotice',
|
||||
type: 'notice',
|
||||
default: '',
|
||||
displayOptions: {
|
||||
show: {
|
||||
needsFallback: [true],
|
||||
},
|
||||
},
|
||||
},
|
||||
...getToolsAgentProperties({ withStreaming: false }),
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
// Automatically wrapped as a tool
|
||||
async execute(this: IExecuteFunctions | ISupplyDataFunctions): Promise<INodeExecutionData[][]> {
|
||||
return await toolsAgentExecute.call(this);
|
||||
}
|
||||
}
|
||||
@@ -1,104 +1,18 @@
|
||||
import { NodeConnectionTypes } from 'n8n-workflow';
|
||||
import type {
|
||||
INodeInputConfiguration,
|
||||
INodeInputFilter,
|
||||
IExecuteFunctions,
|
||||
INodeExecutionData,
|
||||
INodeType,
|
||||
INodeTypeDescription,
|
||||
NodeConnectionType,
|
||||
INodeTypeBaseDescription,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { promptTypeOptions, textFromPreviousNode, textInput } from '@utils/descriptions';
|
||||
|
||||
import { toolsAgentProperties } from '../agents/ToolsAgent/V2/description';
|
||||
import { getInputs } from './utils';
|
||||
import { getToolsAgentProperties } from '../agents/ToolsAgent/V2/description';
|
||||
import { toolsAgentExecute } from '../agents/ToolsAgent/V2/execute';
|
||||
|
||||
// Function used in the inputs expression to figure out which inputs to
|
||||
// display based on the agent type
|
||||
function getInputs(
|
||||
hasOutputParser?: boolean,
|
||||
needsFallback?: boolean,
|
||||
): Array<NodeConnectionType | INodeInputConfiguration> {
|
||||
interface SpecialInput {
|
||||
type: NodeConnectionType;
|
||||
filter?: INodeInputFilter;
|
||||
displayName: string;
|
||||
required?: boolean;
|
||||
}
|
||||
|
||||
const getInputData = (
|
||||
inputs: SpecialInput[],
|
||||
): Array<NodeConnectionType | INodeInputConfiguration> => {
|
||||
return inputs.map(({ type, filter, displayName, required }) => {
|
||||
const input: INodeInputConfiguration = {
|
||||
type,
|
||||
displayName,
|
||||
required,
|
||||
maxConnections: ['ai_languageModel', 'ai_memory', 'ai_outputParser'].includes(type)
|
||||
? 1
|
||||
: undefined,
|
||||
};
|
||||
|
||||
if (filter) {
|
||||
input.filter = filter;
|
||||
}
|
||||
|
||||
return input;
|
||||
});
|
||||
};
|
||||
|
||||
let specialInputs: SpecialInput[] = [
|
||||
{
|
||||
type: 'ai_languageModel',
|
||||
displayName: 'Chat Model',
|
||||
required: true,
|
||||
filter: {
|
||||
excludedNodes: [
|
||||
'@n8n/n8n-nodes-langchain.lmCohere',
|
||||
'@n8n/n8n-nodes-langchain.lmOllama',
|
||||
'n8n/n8n-nodes-langchain.lmOpenAi',
|
||||
'@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference',
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
type: 'ai_languageModel',
|
||||
displayName: 'Fallback Model',
|
||||
required: true,
|
||||
filter: {
|
||||
excludedNodes: [
|
||||
'@n8n/n8n-nodes-langchain.lmCohere',
|
||||
'@n8n/n8n-nodes-langchain.lmOllama',
|
||||
'n8n/n8n-nodes-langchain.lmOpenAi',
|
||||
'@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference',
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
displayName: 'Memory',
|
||||
type: 'ai_memory',
|
||||
},
|
||||
{
|
||||
displayName: 'Tool',
|
||||
type: 'ai_tool',
|
||||
},
|
||||
{
|
||||
displayName: 'Output Parser',
|
||||
type: 'ai_outputParser',
|
||||
},
|
||||
];
|
||||
|
||||
if (hasOutputParser === false) {
|
||||
specialInputs = specialInputs.filter((input) => input.type !== 'ai_outputParser');
|
||||
}
|
||||
if (needsFallback === false) {
|
||||
specialInputs = specialInputs.filter((input) => input.displayName !== 'Fallback Model');
|
||||
}
|
||||
return ['main', ...getInputData(specialInputs)];
|
||||
}
|
||||
|
||||
export class AgentV2 implements INodeType {
|
||||
description: INodeTypeDescription;
|
||||
|
||||
@@ -113,7 +27,7 @@ export class AgentV2 implements INodeType {
|
||||
inputs: `={{
|
||||
((hasOutputParser, needsFallback) => {
|
||||
${getInputs.toString()};
|
||||
return getInputs(hasOutputParser, needsFallback)
|
||||
return getInputs(true, hasOutputParser, needsFallback);
|
||||
})($parameter.hasOutputParser === undefined || $parameter.hasOutputParser === true, $parameter.needsFallback !== undefined && $parameter.needsFallback === true)
|
||||
}}`,
|
||||
outputs: [NodeConnectionTypes.Main],
|
||||
@@ -184,7 +98,7 @@ export class AgentV2 implements INodeType {
|
||||
},
|
||||
},
|
||||
},
|
||||
...toolsAgentProperties,
|
||||
...getToolsAgentProperties({ withStreaming: true }),
|
||||
],
|
||||
hints: [
|
||||
{
|
||||
|
||||
95
packages/@n8n/nodes-langchain/nodes/agents/Agent/V2/utils.ts
Normal file
95
packages/@n8n/nodes-langchain/nodes/agents/Agent/V2/utils.ts
Normal file
@@ -0,0 +1,95 @@
|
||||
// Function used in the inputs expression to figure out which inputs to
|
||||
|
||||
import {
|
||||
type INodeInputConfiguration,
|
||||
type INodeInputFilter,
|
||||
type NodeConnectionType,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
// display based on the agent type
|
||||
export function getInputs(
|
||||
hasMainInput?: boolean,
|
||||
hasOutputParser?: boolean,
|
||||
needsFallback?: boolean,
|
||||
): Array<NodeConnectionType | INodeInputConfiguration> {
|
||||
interface SpecialInput {
|
||||
type: NodeConnectionType;
|
||||
filter?: INodeInputFilter;
|
||||
displayName: string;
|
||||
required?: boolean;
|
||||
}
|
||||
|
||||
const getInputData = (
|
||||
inputs: SpecialInput[],
|
||||
): Array<NodeConnectionType | INodeInputConfiguration> => {
|
||||
return inputs.map(({ type, filter, displayName, required }) => {
|
||||
const input: INodeInputConfiguration = {
|
||||
type,
|
||||
displayName,
|
||||
required,
|
||||
maxConnections: ['ai_languageModel', 'ai_memory', 'ai_outputParser'].includes(type)
|
||||
? 1
|
||||
: undefined,
|
||||
};
|
||||
|
||||
if (filter) {
|
||||
input.filter = filter;
|
||||
}
|
||||
|
||||
return input;
|
||||
});
|
||||
};
|
||||
|
||||
let specialInputs: SpecialInput[] = [
|
||||
{
|
||||
type: 'ai_languageModel',
|
||||
displayName: 'Chat Model',
|
||||
required: true,
|
||||
filter: {
|
||||
excludedNodes: [
|
||||
'@n8n/n8n-nodes-langchain.lmCohere',
|
||||
'@n8n/n8n-nodes-langchain.lmOllama',
|
||||
'n8n/n8n-nodes-langchain.lmOpenAi',
|
||||
'@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference',
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
type: 'ai_languageModel',
|
||||
displayName: 'Fallback Model',
|
||||
required: true,
|
||||
filter: {
|
||||
excludedNodes: [
|
||||
'@n8n/n8n-nodes-langchain.lmCohere',
|
||||
'@n8n/n8n-nodes-langchain.lmOllama',
|
||||
'n8n/n8n-nodes-langchain.lmOpenAi',
|
||||
'@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference',
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
displayName: 'Memory',
|
||||
type: 'ai_memory',
|
||||
},
|
||||
{
|
||||
displayName: 'Tool',
|
||||
type: 'ai_tool',
|
||||
},
|
||||
{
|
||||
displayName: 'Output Parser',
|
||||
type: 'ai_outputParser',
|
||||
},
|
||||
];
|
||||
|
||||
if (hasOutputParser === false) {
|
||||
specialInputs = specialInputs.filter((input) => input.type !== 'ai_outputParser');
|
||||
}
|
||||
if (needsFallback === false) {
|
||||
specialInputs = specialInputs.filter((input) => input.displayName !== 'Fallback Model');
|
||||
}
|
||||
|
||||
// Note cannot use NodeConnectionType.Main
|
||||
// otherwise expression won't evaluate correctly on the FE
|
||||
const mainInputs = hasMainInput ? ['main' as NodeConnectionType] : [];
|
||||
return [...mainInputs, ...getInputData(specialInputs)];
|
||||
}
|
||||
@@ -4,7 +4,17 @@ import { getBatchingOptionFields } from '@utils/sharedFields';
|
||||
|
||||
import { commonOptions } from '../options';
|
||||
|
||||
export const toolsAgentProperties: INodeProperties[] = [
|
||||
const enableStreaminOption: INodeProperties = {
|
||||
displayName: 'Enable Streaming',
|
||||
name: 'enableStreaming',
|
||||
type: 'boolean',
|
||||
default: true,
|
||||
description: 'Whether this agent will stream the response in real-time as it generates text',
|
||||
};
|
||||
|
||||
export const getToolsAgentProperties = ({
|
||||
withStreaming,
|
||||
}: { withStreaming: boolean }): INodeProperties[] => [
|
||||
{
|
||||
displayName: 'Options',
|
||||
name: 'options',
|
||||
@@ -14,14 +24,7 @@ export const toolsAgentProperties: INodeProperties[] = [
|
||||
options: [
|
||||
...commonOptions,
|
||||
getBatchingOptionFields(undefined, 1),
|
||||
{
|
||||
displayName: 'Enable Streaming',
|
||||
name: 'enableStreaming',
|
||||
type: 'boolean',
|
||||
default: true,
|
||||
description:
|
||||
'Whether this agent will stream the response in real-time as it generates text',
|
||||
},
|
||||
...(withStreaming ? [enableStreaminOption] : []),
|
||||
],
|
||||
displayOptions: {
|
||||
hide: {
|
||||
|
||||
@@ -13,7 +13,7 @@ import type { BaseChatMemory } from 'langchain/memory';
|
||||
import type { DynamicStructuredTool, Tool } from 'langchain/tools';
|
||||
import omit from 'lodash/omit';
|
||||
import { jsonParse, NodeOperationError, sleep } from 'n8n-workflow';
|
||||
import type { IExecuteFunctions, INodeExecutionData } from 'n8n-workflow';
|
||||
import type { IExecuteFunctions, INodeExecutionData, ISupplyDataFunctions } from 'n8n-workflow';
|
||||
import assert from 'node:assert';
|
||||
|
||||
import { getPromptInputByType } from '@utils/helpers';
|
||||
@@ -167,9 +167,13 @@ async function processEventStream(
|
||||
* creates the agent, and processes each input item. The error handling for each item is also
|
||||
* managed here based on the node's continueOnFail setting.
|
||||
*
|
||||
* @param this Execute context. SupplyDataContext is passed when agent is as a tool
|
||||
*
|
||||
* @returns The array of execution data for all processed items
|
||||
*/
|
||||
export async function toolsAgentExecute(this: IExecuteFunctions): Promise<INodeExecutionData[][]> {
|
||||
export async function toolsAgentExecute(
|
||||
this: IExecuteFunctions | ISupplyDataFunctions,
|
||||
): Promise<INodeExecutionData[][]> {
|
||||
this.logger.debug('Executing Tools Agent V2');
|
||||
|
||||
const returnData: INodeExecutionData[] = [];
|
||||
@@ -247,9 +251,14 @@ export async function toolsAgentExecute(this: IExecuteFunctions): Promise<INodeE
|
||||
const executeOptions = { signal: this.getExecutionCancelSignal() };
|
||||
|
||||
// Check if streaming is actually available
|
||||
const isStreamingAvailable = this.isStreaming();
|
||||
const isStreamingAvailable = 'isStreaming' in this ? this.isStreaming?.() : undefined;
|
||||
|
||||
if (enableStreaming && isStreamingAvailable && this.getNode().typeVersion >= 2.1) {
|
||||
if (
|
||||
'isStreaming' in this &&
|
||||
enableStreaming &&
|
||||
isStreamingAvailable &&
|
||||
this.getNode().typeVersion >= 2.1
|
||||
) {
|
||||
const chatHistory = await memory?.chatHistory.getMessages();
|
||||
const eventStream = executor.streamEvents(
|
||||
{
|
||||
|
||||
@@ -7,7 +7,7 @@ import type { ToolsAgentAction } from 'langchain/dist/agents/tool_calling/output
|
||||
import type { BaseChatMemory } from 'langchain/memory';
|
||||
import { DynamicStructuredTool, type Tool } from 'langchain/tools';
|
||||
import { BINARY_ENCODING, jsonParse, NodeConnectionTypes, NodeOperationError } from 'n8n-workflow';
|
||||
import type { IExecuteFunctions } from 'n8n-workflow';
|
||||
import type { IExecuteFunctions, ISupplyDataFunctions } from 'n8n-workflow';
|
||||
import type { ZodObject } from 'zod';
|
||||
import { z } from 'zod';
|
||||
|
||||
@@ -42,7 +42,7 @@ export function getOutputParserSchema(
|
||||
* @returns A HumanMessage containing the binary image messages.
|
||||
*/
|
||||
export async function extractBinaryMessages(
|
||||
ctx: IExecuteFunctions,
|
||||
ctx: IExecuteFunctions | ISupplyDataFunctions,
|
||||
itemIndex: number,
|
||||
): Promise<HumanMessage> {
|
||||
const binaryData = ctx.getInputData()?.[itemIndex]?.binary ?? {};
|
||||
@@ -263,7 +263,7 @@ export const getAgentStepsParser =
|
||||
* @returns The validated chat model
|
||||
*/
|
||||
export async function getChatModel(
|
||||
ctx: IExecuteFunctions,
|
||||
ctx: IExecuteFunctions | ISupplyDataFunctions,
|
||||
index: number = 0,
|
||||
): Promise<BaseChatModel | undefined> {
|
||||
const connectedModels = await ctx.getInputConnectionData(NodeConnectionTypes.AiLanguageModel, 0);
|
||||
@@ -297,7 +297,7 @@ export async function getChatModel(
|
||||
* @returns The connected memory (if any)
|
||||
*/
|
||||
export async function getOptionalMemory(
|
||||
ctx: IExecuteFunctions,
|
||||
ctx: IExecuteFunctions | ISupplyDataFunctions,
|
||||
): Promise<BaseChatMemory | undefined> {
|
||||
return (await ctx.getInputConnectionData(NodeConnectionTypes.AiMemory, 0)) as
|
||||
| BaseChatMemory
|
||||
@@ -313,7 +313,7 @@ export async function getOptionalMemory(
|
||||
* @returns The array of connected tools
|
||||
*/
|
||||
export async function getTools(
|
||||
ctx: IExecuteFunctions,
|
||||
ctx: IExecuteFunctions | ISupplyDataFunctions,
|
||||
outputParser?: N8nOutputParser,
|
||||
): Promise<Array<DynamicStructuredTool | Tool>> {
|
||||
const tools = (await getConnectedTools(ctx, true, false)) as Array<DynamicStructuredTool | Tool>;
|
||||
@@ -343,7 +343,7 @@ export async function getTools(
|
||||
* @returns The array of prompt messages
|
||||
*/
|
||||
export async function prepareMessages(
|
||||
ctx: IExecuteFunctions,
|
||||
ctx: IExecuteFunctions | ISupplyDataFunctions,
|
||||
itemIndex: number,
|
||||
options: {
|
||||
systemMessage?: string;
|
||||
|
||||
@@ -2,7 +2,7 @@ import type { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import { mock } from 'jest-mock-extended';
|
||||
import { AgentExecutor } from 'langchain/agents';
|
||||
import type { Tool } from 'langchain/tools';
|
||||
import type { IExecuteFunctions, INode } from 'n8n-workflow';
|
||||
import type { ISupplyDataFunctions, IExecuteFunctions, INode } from 'n8n-workflow';
|
||||
|
||||
import * as helpers from '../../../../../utils/helpers';
|
||||
import * as outputParserModule from '../../../../../utils/output_parsers/N8nOutputParser';
|
||||
@@ -620,4 +620,60 @@ describe('toolsAgentExecute', () => {
|
||||
expect(result[0][0].json.output).toBe('Regular response');
|
||||
});
|
||||
});
|
||||
|
||||
it('should process items if SupplyDataContext is passed and isStreaming is not set', async () => {
|
||||
const mockSupplyDataContext = mock<ISupplyDataFunctions>();
|
||||
|
||||
// @ts-expect-error isStreaming is not supported by SupplyDataFunctions, but mock object still resolves it
|
||||
mockSupplyDataContext.isStreaming = undefined;
|
||||
|
||||
mockSupplyDataContext.logger = {
|
||||
debug: jest.fn(),
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
error: jest.fn(),
|
||||
};
|
||||
|
||||
const mockNode = mock<INode>();
|
||||
mockNode.typeVersion = 2.2; // version where streaming is supported
|
||||
mockSupplyDataContext.getNode.mockReturnValue(mockNode);
|
||||
mockSupplyDataContext.getInputData.mockReturnValue([{ json: { text: 'test input 1' } }]);
|
||||
|
||||
const mockModel = mock<BaseChatModel>();
|
||||
mockModel.bindTools = jest.fn();
|
||||
mockModel.lc_namespace = ['chat_models'];
|
||||
mockSupplyDataContext.getInputConnectionData.mockResolvedValue(mockModel);
|
||||
|
||||
const mockTools = [mock<Tool>()];
|
||||
jest.spyOn(helpers, 'getConnectedTools').mockResolvedValue(mockTools);
|
||||
|
||||
// Mock getNodeParameter to return default values
|
||||
mockSupplyDataContext.getNodeParameter.mockImplementation((param, _i, defaultValue) => {
|
||||
if (param === 'enableStreaming') return true;
|
||||
if (param === 'text') return 'test input';
|
||||
if (param === 'needsFallback') return false;
|
||||
if (param === 'options.batching.batchSize') return defaultValue;
|
||||
if (param === 'options.batching.delayBetweenBatches') return defaultValue;
|
||||
if (param === 'options')
|
||||
return {
|
||||
systemMessage: 'You are a helpful assistant',
|
||||
maxIterations: 10,
|
||||
returnIntermediateSteps: false,
|
||||
passthroughBinaryImages: true,
|
||||
};
|
||||
return defaultValue;
|
||||
});
|
||||
|
||||
const mockExecutor = {
|
||||
invoke: jest.fn().mockResolvedValueOnce({ output: { text: 'success 1' } }),
|
||||
};
|
||||
|
||||
jest.spyOn(AgentExecutor, 'fromAgentAndTools').mockReturnValue(mockExecutor as any);
|
||||
|
||||
const result = await toolsAgentExecute.call(mockSupplyDataContext);
|
||||
|
||||
expect(mockExecutor.invoke).toHaveBeenCalledTimes(1);
|
||||
expect(result[0]).toHaveLength(1);
|
||||
expect(result[0][0].json).toEqual({ output: { text: 'success 1' } });
|
||||
});
|
||||
});
|
||||
|
||||
@@ -6,6 +6,7 @@ import { z } from 'zod';
|
||||
|
||||
import type { ZodObjectAny } from '../../../../types/types';
|
||||
import { checkForStructuredTools } from '../agents/utils';
|
||||
import { getInputs } from '../V2/utils';
|
||||
|
||||
describe('checkForStructuredTools', () => {
|
||||
let mockNode: INode;
|
||||
@@ -105,3 +106,172 @@ describe('checkForStructuredTools', () => {
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('getInputs', () => {
|
||||
it('should include all inputs when no flags are set to false', () => {
|
||||
const inputs = getInputs(true, true, true);
|
||||
expect(inputs).toEqual([
|
||||
'main',
|
||||
{
|
||||
type: 'ai_languageModel',
|
||||
displayName: 'Chat Model',
|
||||
required: true,
|
||||
maxConnections: 1,
|
||||
filter: {
|
||||
excludedNodes: [
|
||||
'@n8n/n8n-nodes-langchain.lmCohere',
|
||||
'@n8n/n8n-nodes-langchain.lmOllama',
|
||||
'n8n/n8n-nodes-langchain.lmOpenAi',
|
||||
'@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference',
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
type: 'ai_languageModel',
|
||||
displayName: 'Fallback Model',
|
||||
required: true,
|
||||
maxConnections: 1,
|
||||
filter: {
|
||||
excludedNodes: [
|
||||
'@n8n/n8n-nodes-langchain.lmCohere',
|
||||
'@n8n/n8n-nodes-langchain.lmOllama',
|
||||
'n8n/n8n-nodes-langchain.lmOpenAi',
|
||||
'@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference',
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
type: 'ai_memory',
|
||||
displayName: 'Memory',
|
||||
maxConnections: 1,
|
||||
},
|
||||
{
|
||||
type: 'ai_tool',
|
||||
displayName: 'Tool',
|
||||
},
|
||||
{
|
||||
type: 'ai_outputParser',
|
||||
displayName: 'Output Parser',
|
||||
maxConnections: 1,
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('should exclude Output Parser when hasOutputParser is false', () => {
|
||||
const inputs = getInputs(true, false, true);
|
||||
expect(inputs).toEqual([
|
||||
'main',
|
||||
{
|
||||
type: 'ai_languageModel',
|
||||
displayName: 'Chat Model',
|
||||
required: true,
|
||||
maxConnections: 1,
|
||||
filter: {
|
||||
excludedNodes: [
|
||||
'@n8n/n8n-nodes-langchain.lmCohere',
|
||||
'@n8n/n8n-nodes-langchain.lmOllama',
|
||||
'n8n/n8n-nodes-langchain.lmOpenAi',
|
||||
'@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference',
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
type: 'ai_languageModel',
|
||||
displayName: 'Fallback Model',
|
||||
required: true,
|
||||
maxConnections: 1,
|
||||
filter: {
|
||||
excludedNodes: [
|
||||
'@n8n/n8n-nodes-langchain.lmCohere',
|
||||
'@n8n/n8n-nodes-langchain.lmOllama',
|
||||
'n8n/n8n-nodes-langchain.lmOpenAi',
|
||||
'@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference',
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
type: 'ai_memory',
|
||||
displayName: 'Memory',
|
||||
maxConnections: 1,
|
||||
},
|
||||
{
|
||||
type: 'ai_tool',
|
||||
displayName: 'Tool',
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('should exclude Fallback Model when needsFallback is false', () => {
|
||||
const inputs = getInputs(true, true, false);
|
||||
expect(inputs).toEqual([
|
||||
'main',
|
||||
{
|
||||
type: 'ai_languageModel',
|
||||
displayName: 'Chat Model',
|
||||
required: true,
|
||||
maxConnections: 1,
|
||||
filter: {
|
||||
excludedNodes: [
|
||||
'@n8n/n8n-nodes-langchain.lmCohere',
|
||||
'@n8n/n8n-nodes-langchain.lmOllama',
|
||||
'n8n/n8n-nodes-langchain.lmOpenAi',
|
||||
'@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference',
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
type: 'ai_memory',
|
||||
displayName: 'Memory',
|
||||
maxConnections: 1,
|
||||
},
|
||||
{
|
||||
type: 'ai_tool',
|
||||
displayName: 'Tool',
|
||||
},
|
||||
{
|
||||
type: 'ai_outputParser',
|
||||
displayName: 'Output Parser',
|
||||
maxConnections: 1,
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it('should include main input when hasMainInput is true', () => {
|
||||
const inputs = getInputs(true, true, true);
|
||||
expect(inputs[0]).toBe('main');
|
||||
});
|
||||
|
||||
it('should exclude main input when hasMainInput is false', () => {
|
||||
const inputs = getInputs(false, true, true);
|
||||
expect(inputs).not.toContain('main');
|
||||
});
|
||||
|
||||
it('should handle all flags set to false', () => {
|
||||
const inputs = getInputs(false, false, false);
|
||||
expect(inputs).toEqual([
|
||||
{
|
||||
type: 'ai_languageModel',
|
||||
displayName: 'Chat Model',
|
||||
required: true,
|
||||
maxConnections: 1,
|
||||
filter: {
|
||||
excludedNodes: [
|
||||
'@n8n/n8n-nodes-langchain.lmCohere',
|
||||
'@n8n/n8n-nodes-langchain.lmOllama',
|
||||
'n8n/n8n-nodes-langchain.lmOpenAi',
|
||||
'@n8n/n8n-nodes-langchain.lmOpenHuggingFaceInference',
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
type: 'ai_memory',
|
||||
displayName: 'Memory',
|
||||
maxConnections: 1,
|
||||
},
|
||||
{
|
||||
type: 'ai_tool',
|
||||
displayName: 'Tool',
|
||||
},
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -50,6 +50,7 @@
|
||||
"nodes": [
|
||||
"dist/nodes/vendors/OpenAi/OpenAi.node.js",
|
||||
"dist/nodes/agents/Agent/Agent.node.js",
|
||||
"dist/nodes/agents/Agent/AgentTool.node.js",
|
||||
"dist/nodes/agents/OpenAiAssistant/OpenAiAssistant.node.js",
|
||||
"dist/nodes/chains/ChainSummarization/ChainSummarization.node.js",
|
||||
"dist/nodes/chains/ChainLLM/ChainLlm.node.js",
|
||||
|
||||
@@ -143,3 +143,14 @@ export const textFromPreviousNode: INodeProperties = {
|
||||
},
|
||||
disabledOptions: { show: { promptType: ['auto'] } },
|
||||
};
|
||||
|
||||
export const toolDescription: INodeProperties = {
|
||||
displayName: 'Description',
|
||||
name: 'toolDescription',
|
||||
type: 'string',
|
||||
default: 'AI Agent that can call other tools',
|
||||
required: true,
|
||||
typeOptions: { rows: 2 },
|
||||
description:
|
||||
'Explain to the LLM what this tool does, a good, specific description would allow LLMs to produce expected results much more often',
|
||||
};
|
||||
|
||||
@@ -75,16 +75,16 @@ export function isToolsInstance(model: unknown): model is Tool {
|
||||
}
|
||||
|
||||
export function getPromptInputByType(options: {
|
||||
ctx: IExecuteFunctions;
|
||||
ctx: IExecuteFunctions | ISupplyDataFunctions;
|
||||
i: number;
|
||||
promptTypeKey: string;
|
||||
inputKey: string;
|
||||
}) {
|
||||
const { ctx, i, promptTypeKey, inputKey } = options;
|
||||
const prompt = ctx.getNodeParameter(promptTypeKey, i) as string;
|
||||
const promptType = ctx.getNodeParameter(promptTypeKey, i, 'define') as string;
|
||||
|
||||
let input;
|
||||
if (prompt === 'auto') {
|
||||
if (promptType === 'auto') {
|
||||
input = ctx.evaluateExpression('{{ $json["chatInput"] }}', i) as string;
|
||||
} else {
|
||||
input = ctx.getNodeParameter(inputKey, i) as string;
|
||||
@@ -186,7 +186,7 @@ export function escapeSingleCurlyBrackets(text?: string): string | undefined {
|
||||
}
|
||||
|
||||
export const getConnectedTools = async (
|
||||
ctx: IExecuteFunctions | IWebhookFunctions,
|
||||
ctx: IExecuteFunctions | IWebhookFunctions | ISupplyDataFunctions,
|
||||
enforceUniqueNames: boolean,
|
||||
convertStructuredTool: boolean = true,
|
||||
escapeCurlyBrackets: boolean = false,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import type { IExecuteFunctions } from 'n8n-workflow';
|
||||
import type { IExecuteFunctions, ISupplyDataFunctions } from 'n8n-workflow';
|
||||
import { NodeConnectionTypes } from 'n8n-workflow';
|
||||
|
||||
import { N8nItemListOutputParser } from './N8nItemListOutputParser';
|
||||
@@ -13,7 +13,7 @@ export type N8nOutputParser =
|
||||
export { N8nOutputFixingParser, N8nItemListOutputParser, N8nStructuredOutputParser };
|
||||
|
||||
export async function getOptionalOutputParser(
|
||||
ctx: IExecuteFunctions,
|
||||
ctx: IExecuteFunctions | ISupplyDataFunctions,
|
||||
index: number = 0,
|
||||
): Promise<N8nOutputParser | undefined> {
|
||||
let outputParser: N8nOutputParser | undefined;
|
||||
|
||||
Reference in New Issue
Block a user