mirror of
https://github.com/Abdulazizzn/n8n-enterprise-unlocked.git
synced 2025-12-16 17:46:45 +00:00
refactor(Basic LLM Chain Node): Refactor Basic LLM Chain & add tests (#13850)
This commit is contained in:
@@ -65,9 +65,11 @@ function getInputs(
|
|||||||
type,
|
type,
|
||||||
displayName,
|
displayName,
|
||||||
required: isModelType,
|
required: isModelType,
|
||||||
maxConnections: [NodeConnectionType.AiLanguageModel, NodeConnectionType.AiMemory].includes(
|
maxConnections: [
|
||||||
type as NodeConnectionType,
|
NodeConnectionType.AiLanguageModel,
|
||||||
)
|
NodeConnectionType.AiMemory,
|
||||||
|
NodeConnectionType.AiOutputParser,
|
||||||
|
].includes(type as NodeConnectionType)
|
||||||
? 1
|
? 1
|
||||||
: undefined,
|
: undefined,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,13 +1,11 @@
|
|||||||
import type { BaseChatMemory } from '@langchain/community/memory/chat_memory';
|
import type { BaseChatMemory } from '@langchain/community/memory/chat_memory';
|
||||||
import type { BaseOutputParser } from '@langchain/core/output_parsers';
|
|
||||||
import { PromptTemplate } from '@langchain/core/prompts';
|
import { PromptTemplate } from '@langchain/core/prompts';
|
||||||
import { initializeAgentExecutorWithOptions } from 'langchain/agents';
|
import { initializeAgentExecutorWithOptions } from 'langchain/agents';
|
||||||
import { CombiningOutputParser } from 'langchain/output_parsers';
|
|
||||||
import type { IExecuteFunctions, INodeExecutionData } from 'n8n-workflow';
|
import type { IExecuteFunctions, INodeExecutionData } from 'n8n-workflow';
|
||||||
import { NodeConnectionType, NodeOperationError } from 'n8n-workflow';
|
import { NodeConnectionType, NodeOperationError } from 'n8n-workflow';
|
||||||
|
|
||||||
import { isChatInstance, getPromptInputByType, getConnectedTools } from '@utils/helpers';
|
import { isChatInstance, getPromptInputByType, getConnectedTools } from '@utils/helpers';
|
||||||
import { getOptionalOutputParsers } from '@utils/output_parsers/N8nOutputParser';
|
import { getOptionalOutputParser } from '@utils/output_parsers/N8nOutputParser';
|
||||||
import { throwIfToolSchema } from '@utils/schemaParsing';
|
import { throwIfToolSchema } from '@utils/schemaParsing';
|
||||||
import { getTracingConfig } from '@utils/tracing';
|
import { getTracingConfig } from '@utils/tracing';
|
||||||
|
|
||||||
@@ -29,7 +27,7 @@ export async function conversationalAgentExecute(
|
|||||||
| undefined;
|
| undefined;
|
||||||
|
|
||||||
const tools = await getConnectedTools(this, nodeVersion >= 1.5, true, true);
|
const tools = await getConnectedTools(this, nodeVersion >= 1.5, true, true);
|
||||||
const outputParsers = await getOptionalOutputParsers(this);
|
const outputParser = await getOptionalOutputParser(this);
|
||||||
|
|
||||||
await checkForStructuredTools(tools, this.getNode(), 'Conversational Agent');
|
await checkForStructuredTools(tools, this.getNode(), 'Conversational Agent');
|
||||||
|
|
||||||
@@ -58,24 +56,15 @@ export async function conversationalAgentExecute(
|
|||||||
|
|
||||||
const returnData: INodeExecutionData[] = [];
|
const returnData: INodeExecutionData[] = [];
|
||||||
|
|
||||||
let outputParser: BaseOutputParser | undefined;
|
|
||||||
let prompt: PromptTemplate | undefined;
|
let prompt: PromptTemplate | undefined;
|
||||||
if (outputParsers.length) {
|
if (outputParser) {
|
||||||
if (outputParsers.length === 1) {
|
const formatInstructions = outputParser.getFormatInstructions();
|
||||||
outputParser = outputParsers[0];
|
|
||||||
} else {
|
|
||||||
outputParser = new CombiningOutputParser(...outputParsers);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (outputParser) {
|
prompt = new PromptTemplate({
|
||||||
const formatInstructions = outputParser.getFormatInstructions();
|
template: '{input}\n{formatInstructions}',
|
||||||
|
inputVariables: ['input'],
|
||||||
prompt = new PromptTemplate({
|
partialVariables: { formatInstructions },
|
||||||
template: '{input}\n{formatInstructions}',
|
});
|
||||||
inputVariables: ['input'],
|
|
||||||
partialVariables: { formatInstructions },
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const items = this.getInputData();
|
const items = this.getInputData();
|
||||||
@@ -104,7 +93,7 @@ export async function conversationalAgentExecute(
|
|||||||
|
|
||||||
const response = await agentExecutor
|
const response = await agentExecutor
|
||||||
.withConfig(getTracingConfig(this))
|
.withConfig(getTracingConfig(this))
|
||||||
.invoke({ input, outputParsers });
|
.invoke({ input, outputParser });
|
||||||
|
|
||||||
if (outputParser) {
|
if (outputParser) {
|
||||||
response.output = await extractParsedOutput(this, outputParser, response.output as string);
|
response.output = await extractParsedOutput(this, outputParser, response.output as string);
|
||||||
|
|||||||
@@ -1,10 +1,8 @@
|
|||||||
import type { BaseOutputParser } from '@langchain/core/output_parsers';
|
|
||||||
import { PromptTemplate } from '@langchain/core/prompts';
|
import { PromptTemplate } from '@langchain/core/prompts';
|
||||||
import { ChatOpenAI } from '@langchain/openai';
|
import { ChatOpenAI } from '@langchain/openai';
|
||||||
import type { AgentExecutorInput } from 'langchain/agents';
|
import type { AgentExecutorInput } from 'langchain/agents';
|
||||||
import { AgentExecutor, OpenAIAgent } from 'langchain/agents';
|
import { AgentExecutor, OpenAIAgent } from 'langchain/agents';
|
||||||
import { BufferMemory, type BaseChatMemory } from 'langchain/memory';
|
import { BufferMemory, type BaseChatMemory } from 'langchain/memory';
|
||||||
import { CombiningOutputParser } from 'langchain/output_parsers';
|
|
||||||
import {
|
import {
|
||||||
type IExecuteFunctions,
|
type IExecuteFunctions,
|
||||||
type INodeExecutionData,
|
type INodeExecutionData,
|
||||||
@@ -13,7 +11,7 @@ import {
|
|||||||
} from 'n8n-workflow';
|
} from 'n8n-workflow';
|
||||||
|
|
||||||
import { getConnectedTools, getPromptInputByType } from '@utils/helpers';
|
import { getConnectedTools, getPromptInputByType } from '@utils/helpers';
|
||||||
import { getOptionalOutputParsers } from '@utils/output_parsers/N8nOutputParser';
|
import { getOptionalOutputParser } from '@utils/output_parsers/N8nOutputParser';
|
||||||
import { getTracingConfig } from '@utils/tracing';
|
import { getTracingConfig } from '@utils/tracing';
|
||||||
|
|
||||||
import { extractParsedOutput } from '../utils';
|
import { extractParsedOutput } from '../utils';
|
||||||
@@ -38,7 +36,7 @@ export async function openAiFunctionsAgentExecute(
|
|||||||
| BaseChatMemory
|
| BaseChatMemory
|
||||||
| undefined;
|
| undefined;
|
||||||
const tools = await getConnectedTools(this, nodeVersion >= 1.5, false);
|
const tools = await getConnectedTools(this, nodeVersion >= 1.5, false);
|
||||||
const outputParsers = await getOptionalOutputParsers(this);
|
const outputParser = await getOptionalOutputParser(this);
|
||||||
const options = this.getNodeParameter('options', 0, {}) as {
|
const options = this.getNodeParameter('options', 0, {}) as {
|
||||||
systemMessage?: string;
|
systemMessage?: string;
|
||||||
maxIterations?: number;
|
maxIterations?: number;
|
||||||
@@ -67,12 +65,8 @@ export async function openAiFunctionsAgentExecute(
|
|||||||
|
|
||||||
const returnData: INodeExecutionData[] = [];
|
const returnData: INodeExecutionData[] = [];
|
||||||
|
|
||||||
let outputParser: BaseOutputParser | undefined;
|
|
||||||
let prompt: PromptTemplate | undefined;
|
let prompt: PromptTemplate | undefined;
|
||||||
if (outputParsers.length) {
|
if (outputParser) {
|
||||||
outputParser =
|
|
||||||
outputParsers.length === 1 ? outputParsers[0] : new CombiningOutputParser(...outputParsers);
|
|
||||||
|
|
||||||
const formatInstructions = outputParser.getFormatInstructions();
|
const formatInstructions = outputParser.getFormatInstructions();
|
||||||
|
|
||||||
prompt = new PromptTemplate({
|
prompt = new PromptTemplate({
|
||||||
@@ -107,7 +101,7 @@ export async function openAiFunctionsAgentExecute(
|
|||||||
|
|
||||||
const response = await agentExecutor
|
const response = await agentExecutor
|
||||||
.withConfig(getTracingConfig(this))
|
.withConfig(getTracingConfig(this))
|
||||||
.invoke({ input, outputParsers });
|
.invoke({ input, outputParser });
|
||||||
|
|
||||||
if (outputParser) {
|
if (outputParser) {
|
||||||
response.output = await extractParsedOutput(this, outputParser, response.output as string);
|
response.output = await extractParsedOutput(this, outputParser, response.output as string);
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
import type { BaseOutputParser } from '@langchain/core/output_parsers';
|
|
||||||
import { PromptTemplate } from '@langchain/core/prompts';
|
import { PromptTemplate } from '@langchain/core/prompts';
|
||||||
import { PlanAndExecuteAgentExecutor } from 'langchain/experimental/plan_and_execute';
|
import { PlanAndExecuteAgentExecutor } from 'langchain/experimental/plan_and_execute';
|
||||||
import { CombiningOutputParser } from 'langchain/output_parsers';
|
|
||||||
import {
|
import {
|
||||||
type IExecuteFunctions,
|
type IExecuteFunctions,
|
||||||
type INodeExecutionData,
|
type INodeExecutionData,
|
||||||
@@ -11,7 +9,7 @@ import {
|
|||||||
} from 'n8n-workflow';
|
} from 'n8n-workflow';
|
||||||
|
|
||||||
import { getConnectedTools, getPromptInputByType } from '@utils/helpers';
|
import { getConnectedTools, getPromptInputByType } from '@utils/helpers';
|
||||||
import { getOptionalOutputParsers } from '@utils/output_parsers/N8nOutputParser';
|
import { getOptionalOutputParser } from '@utils/output_parsers/N8nOutputParser';
|
||||||
import { throwIfToolSchema } from '@utils/schemaParsing';
|
import { throwIfToolSchema } from '@utils/schemaParsing';
|
||||||
import { getTracingConfig } from '@utils/tracing';
|
import { getTracingConfig } from '@utils/tracing';
|
||||||
|
|
||||||
@@ -30,7 +28,7 @@ export async function planAndExecuteAgentExecute(
|
|||||||
const tools = await getConnectedTools(this, nodeVersion >= 1.5, true, true);
|
const tools = await getConnectedTools(this, nodeVersion >= 1.5, true, true);
|
||||||
|
|
||||||
await checkForStructuredTools(tools, this.getNode(), 'Plan & Execute Agent');
|
await checkForStructuredTools(tools, this.getNode(), 'Plan & Execute Agent');
|
||||||
const outputParsers = await getOptionalOutputParsers(this);
|
const outputParser = await getOptionalOutputParser(this);
|
||||||
|
|
||||||
const options = this.getNodeParameter('options', 0, {}) as {
|
const options = this.getNodeParameter('options', 0, {}) as {
|
||||||
humanMessageTemplate?: string;
|
humanMessageTemplate?: string;
|
||||||
@@ -44,12 +42,8 @@ export async function planAndExecuteAgentExecute(
|
|||||||
|
|
||||||
const returnData: INodeExecutionData[] = [];
|
const returnData: INodeExecutionData[] = [];
|
||||||
|
|
||||||
let outputParser: BaseOutputParser | undefined;
|
|
||||||
let prompt: PromptTemplate | undefined;
|
let prompt: PromptTemplate | undefined;
|
||||||
if (outputParsers.length) {
|
if (outputParser) {
|
||||||
outputParser =
|
|
||||||
outputParsers.length === 1 ? outputParsers[0] : new CombiningOutputParser(...outputParsers);
|
|
||||||
|
|
||||||
const formatInstructions = outputParser.getFormatInstructions();
|
const formatInstructions = outputParser.getFormatInstructions();
|
||||||
|
|
||||||
prompt = new PromptTemplate({
|
prompt = new PromptTemplate({
|
||||||
@@ -84,7 +78,7 @@ export async function planAndExecuteAgentExecute(
|
|||||||
|
|
||||||
const response = await agentExecutor
|
const response = await agentExecutor
|
||||||
.withConfig(getTracingConfig(this))
|
.withConfig(getTracingConfig(this))
|
||||||
.invoke({ input, outputParsers });
|
.invoke({ input, outputParser });
|
||||||
|
|
||||||
if (outputParser) {
|
if (outputParser) {
|
||||||
response.output = await extractParsedOutput(this, outputParser, response.output as string);
|
response.output = await extractParsedOutput(this, outputParser, response.output as string);
|
||||||
|
|||||||
@@ -1,9 +1,7 @@
|
|||||||
import type { BaseLanguageModel } from '@langchain/core/language_models/base';
|
import type { BaseLanguageModel } from '@langchain/core/language_models/base';
|
||||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
import type { BaseOutputParser } from '@langchain/core/output_parsers';
|
|
||||||
import { PromptTemplate } from '@langchain/core/prompts';
|
import { PromptTemplate } from '@langchain/core/prompts';
|
||||||
import { AgentExecutor, ChatAgent, ZeroShotAgent } from 'langchain/agents';
|
import { AgentExecutor, ChatAgent, ZeroShotAgent } from 'langchain/agents';
|
||||||
import { CombiningOutputParser } from 'langchain/output_parsers';
|
|
||||||
import {
|
import {
|
||||||
type IExecuteFunctions,
|
type IExecuteFunctions,
|
||||||
type INodeExecutionData,
|
type INodeExecutionData,
|
||||||
@@ -12,7 +10,7 @@ import {
|
|||||||
} from 'n8n-workflow';
|
} from 'n8n-workflow';
|
||||||
|
|
||||||
import { getConnectedTools, getPromptInputByType, isChatInstance } from '@utils/helpers';
|
import { getConnectedTools, getPromptInputByType, isChatInstance } from '@utils/helpers';
|
||||||
import { getOptionalOutputParsers } from '@utils/output_parsers/N8nOutputParser';
|
import { getOptionalOutputParser } from '@utils/output_parsers/N8nOutputParser';
|
||||||
import { throwIfToolSchema } from '@utils/schemaParsing';
|
import { throwIfToolSchema } from '@utils/schemaParsing';
|
||||||
import { getTracingConfig } from '@utils/tracing';
|
import { getTracingConfig } from '@utils/tracing';
|
||||||
|
|
||||||
@@ -32,7 +30,7 @@ export async function reActAgentAgentExecute(
|
|||||||
|
|
||||||
await checkForStructuredTools(tools, this.getNode(), 'ReAct Agent');
|
await checkForStructuredTools(tools, this.getNode(), 'ReAct Agent');
|
||||||
|
|
||||||
const outputParsers = await getOptionalOutputParsers(this);
|
const outputParser = await getOptionalOutputParser(this);
|
||||||
|
|
||||||
const options = this.getNodeParameter('options', 0, {}) as {
|
const options = this.getNodeParameter('options', 0, {}) as {
|
||||||
prefix?: string;
|
prefix?: string;
|
||||||
@@ -66,12 +64,8 @@ export async function reActAgentAgentExecute(
|
|||||||
|
|
||||||
const returnData: INodeExecutionData[] = [];
|
const returnData: INodeExecutionData[] = [];
|
||||||
|
|
||||||
let outputParser: BaseOutputParser | undefined;
|
|
||||||
let prompt: PromptTemplate | undefined;
|
let prompt: PromptTemplate | undefined;
|
||||||
if (outputParsers.length) {
|
if (outputParser) {
|
||||||
outputParser =
|
|
||||||
outputParsers.length === 1 ? outputParsers[0] : new CombiningOutputParser(...outputParsers);
|
|
||||||
|
|
||||||
const formatInstructions = outputParser.getFormatInstructions();
|
const formatInstructions = outputParser.getFormatInstructions();
|
||||||
|
|
||||||
prompt = new PromptTemplate({
|
prompt = new PromptTemplate({
|
||||||
@@ -107,7 +101,7 @@ export async function reActAgentAgentExecute(
|
|||||||
|
|
||||||
const response = await agentExecutor
|
const response = await agentExecutor
|
||||||
.withConfig(getTracingConfig(this))
|
.withConfig(getTracingConfig(this))
|
||||||
.invoke({ input, outputParsers });
|
.invoke({ input, outputParser });
|
||||||
|
|
||||||
if (outputParser) {
|
if (outputParser) {
|
||||||
response.output = await extractParsedOutput(this, outputParser, response.output as string);
|
response.output = await extractParsedOutput(this, outputParser, response.output as string);
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ import { z } from 'zod';
|
|||||||
|
|
||||||
import { isChatInstance, getPromptInputByType, getConnectedTools } from '@utils/helpers';
|
import { isChatInstance, getPromptInputByType, getConnectedTools } from '@utils/helpers';
|
||||||
import {
|
import {
|
||||||
getOptionalOutputParsers,
|
getOptionalOutputParser,
|
||||||
type N8nOutputParser,
|
type N8nOutputParser,
|
||||||
} from '@utils/output_parsers/N8nOutputParser';
|
} from '@utils/output_parsers/N8nOutputParser';
|
||||||
|
|
||||||
@@ -392,8 +392,7 @@ export async function toolsAgentExecute(this: IExecuteFunctions): Promise<INodeE
|
|||||||
|
|
||||||
const returnData: INodeExecutionData[] = [];
|
const returnData: INodeExecutionData[] = [];
|
||||||
const items = this.getInputData();
|
const items = this.getInputData();
|
||||||
const outputParsers = await getOptionalOutputParsers(this);
|
const outputParser = await getOptionalOutputParser(this);
|
||||||
const outputParser = outputParsers?.[0];
|
|
||||||
const tools = await getTools(this, outputParser);
|
const tools = await getTools(this, outputParser);
|
||||||
|
|
||||||
for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
|
for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
|
||||||
|
|||||||
@@ -1,262 +1,32 @@
|
|||||||
import type { BaseLanguageModel } from '@langchain/core/language_models/base';
|
import type { BaseLanguageModel } from '@langchain/core/language_models/base';
|
||||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
|
||||||
import { HumanMessage } from '@langchain/core/messages';
|
|
||||||
import {
|
|
||||||
AIMessagePromptTemplate,
|
|
||||||
PromptTemplate,
|
|
||||||
SystemMessagePromptTemplate,
|
|
||||||
HumanMessagePromptTemplate,
|
|
||||||
ChatPromptTemplate,
|
|
||||||
} from '@langchain/core/prompts';
|
|
||||||
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
|
|
||||||
import { ChatOllama } from '@langchain/ollama';
|
|
||||||
import { LLMChain } from 'langchain/chains';
|
|
||||||
import { CombiningOutputParser } from 'langchain/output_parsers';
|
|
||||||
import type {
|
import type {
|
||||||
IBinaryData,
|
|
||||||
IDataObject,
|
|
||||||
IExecuteFunctions,
|
IExecuteFunctions,
|
||||||
INodeExecutionData,
|
INodeExecutionData,
|
||||||
INodeType,
|
INodeType,
|
||||||
INodeTypeDescription,
|
INodeTypeDescription,
|
||||||
} from 'n8n-workflow';
|
} from 'n8n-workflow';
|
||||||
|
import { NodeApiError, NodeConnectionType, NodeOperationError } from 'n8n-workflow';
|
||||||
|
|
||||||
|
import { getPromptInputByType } from '@utils/helpers';
|
||||||
|
import { getOptionalOutputParser } from '@utils/output_parsers/N8nOutputParser';
|
||||||
|
|
||||||
|
// Import from centralized module
|
||||||
import {
|
import {
|
||||||
ApplicationError,
|
executeChain,
|
||||||
NodeApiError,
|
formatResponse,
|
||||||
NodeConnectionType,
|
getInputs,
|
||||||
NodeOperationError,
|
nodeProperties,
|
||||||
} from 'n8n-workflow';
|
type MessageTemplate,
|
||||||
|
} from './methods';
|
||||||
import { promptTypeOptions, textFromPreviousNode } from '@utils/descriptions';
|
|
||||||
import { getPromptInputByType, isChatInstance } from '@utils/helpers';
|
|
||||||
import type { N8nOutputParser } from '@utils/output_parsers/N8nOutputParser';
|
|
||||||
import { getOptionalOutputParsers } from '@utils/output_parsers/N8nOutputParser';
|
|
||||||
import { getTemplateNoticeField } from '@utils/sharedFields';
|
|
||||||
import { getTracingConfig } from '@utils/tracing';
|
|
||||||
|
|
||||||
import { dataUriFromImageData, UnsupportedMimeTypeError } from './utils';
|
|
||||||
import {
|
import {
|
||||||
getCustomErrorMessage as getCustomOpenAiErrorMessage,
|
getCustomErrorMessage as getCustomOpenAiErrorMessage,
|
||||||
isOpenAiError,
|
isOpenAiError,
|
||||||
} from '../../vendors/OpenAi/helpers/error-handling';
|
} from '../../vendors/OpenAi/helpers/error-handling';
|
||||||
|
|
||||||
interface MessagesTemplate {
|
/**
|
||||||
type: string;
|
* Basic LLM Chain Node Implementation
|
||||||
message: string;
|
* Allows connecting to language models with optional structured output parsing
|
||||||
messageType: 'text' | 'imageBinary' | 'imageUrl';
|
*/
|
||||||
binaryImageDataKey?: string;
|
|
||||||
imageUrl?: string;
|
|
||||||
imageDetail?: 'auto' | 'low' | 'high';
|
|
||||||
}
|
|
||||||
|
|
||||||
async function getImageMessage(
|
|
||||||
context: IExecuteFunctions,
|
|
||||||
itemIndex: number,
|
|
||||||
message: MessagesTemplate,
|
|
||||||
) {
|
|
||||||
if (message.messageType !== 'imageBinary' && message.messageType !== 'imageUrl') {
|
|
||||||
// eslint-disable-next-line n8n-nodes-base/node-execute-block-wrong-error-thrown
|
|
||||||
throw new NodeOperationError(
|
|
||||||
context.getNode(),
|
|
||||||
'Invalid message type. Only imageBinary and imageUrl are supported',
|
|
||||||
);
|
|
||||||
}
|
|
||||||
const detail = message.imageDetail === 'auto' ? undefined : message.imageDetail;
|
|
||||||
if (message.messageType === 'imageUrl' && message.imageUrl) {
|
|
||||||
return new HumanMessage({
|
|
||||||
content: [
|
|
||||||
{
|
|
||||||
type: 'image_url',
|
|
||||||
image_url: {
|
|
||||||
url: message.imageUrl,
|
|
||||||
detail,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
],
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
const binaryDataKey = message.binaryImageDataKey ?? 'data';
|
|
||||||
const inputData = context.getInputData()[itemIndex];
|
|
||||||
const binaryData = inputData.binary?.[binaryDataKey] as IBinaryData;
|
|
||||||
|
|
||||||
if (!binaryData) {
|
|
||||||
throw new NodeOperationError(context.getNode(), 'No binary data set.');
|
|
||||||
}
|
|
||||||
|
|
||||||
const bufferData = await context.helpers.getBinaryDataBuffer(itemIndex, binaryDataKey);
|
|
||||||
const model = (await context.getInputConnectionData(
|
|
||||||
NodeConnectionType.AiLanguageModel,
|
|
||||||
0,
|
|
||||||
)) as BaseLanguageModel;
|
|
||||||
|
|
||||||
try {
|
|
||||||
const dataURI = dataUriFromImageData(binaryData, bufferData);
|
|
||||||
|
|
||||||
const directUriModels = [ChatGoogleGenerativeAI, ChatOllama];
|
|
||||||
const imageUrl = directUriModels.some((i) => model instanceof i)
|
|
||||||
? dataURI
|
|
||||||
: { url: dataURI, detail };
|
|
||||||
|
|
||||||
return new HumanMessage({
|
|
||||||
content: [
|
|
||||||
{
|
|
||||||
type: 'image_url',
|
|
||||||
image_url: imageUrl,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
});
|
|
||||||
} catch (error) {
|
|
||||||
if (error instanceof UnsupportedMimeTypeError)
|
|
||||||
throw new NodeOperationError(context.getNode(), error.message);
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async function getChainPromptTemplate(
|
|
||||||
context: IExecuteFunctions,
|
|
||||||
itemIndex: number,
|
|
||||||
llm: BaseLanguageModel | BaseChatModel,
|
|
||||||
messages?: MessagesTemplate[],
|
|
||||||
formatInstructions?: string,
|
|
||||||
query?: string,
|
|
||||||
) {
|
|
||||||
const queryTemplate = new PromptTemplate({
|
|
||||||
template: `{query}${formatInstructions ? '\n{formatInstructions}' : ''}`,
|
|
||||||
inputVariables: ['query'],
|
|
||||||
partialVariables: formatInstructions ? { formatInstructions } : undefined,
|
|
||||||
});
|
|
||||||
|
|
||||||
if (isChatInstance(llm)) {
|
|
||||||
const parsedMessages = await Promise.all(
|
|
||||||
(messages ?? []).map(async (message) => {
|
|
||||||
const messageClass = [
|
|
||||||
SystemMessagePromptTemplate,
|
|
||||||
AIMessagePromptTemplate,
|
|
||||||
HumanMessagePromptTemplate,
|
|
||||||
].find((m) => m.lc_name() === message.type);
|
|
||||||
|
|
||||||
if (!messageClass) {
|
|
||||||
// eslint-disable-next-line n8n-nodes-base/node-execute-block-wrong-error-thrown
|
|
||||||
throw new ApplicationError('Invalid message type', {
|
|
||||||
extra: { messageType: message.type },
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
if (messageClass === HumanMessagePromptTemplate && message.messageType !== 'text') {
|
|
||||||
const test = await getImageMessage(context, itemIndex, message);
|
|
||||||
return test;
|
|
||||||
}
|
|
||||||
|
|
||||||
const res = messageClass.fromTemplate(
|
|
||||||
// Since we're using the message as template, we need to escape any curly braces
|
|
||||||
// so LangChain doesn't try to parse them as variables
|
|
||||||
(message.message || '').replace(/[{}]/g, (match) => match + match),
|
|
||||||
);
|
|
||||||
return res;
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
|
|
||||||
const lastMessage = parsedMessages[parsedMessages.length - 1];
|
|
||||||
// If the last message is a human message and it has an array of content, we need to add the query to the last message
|
|
||||||
if (lastMessage instanceof HumanMessage && Array.isArray(lastMessage.content)) {
|
|
||||||
const humanMessage = new HumanMessagePromptTemplate(queryTemplate);
|
|
||||||
const test = await humanMessage.format({ query });
|
|
||||||
lastMessage.content.push({ text: test.content.toString(), type: 'text' });
|
|
||||||
} else {
|
|
||||||
parsedMessages.push(new HumanMessagePromptTemplate(queryTemplate));
|
|
||||||
}
|
|
||||||
return ChatPromptTemplate.fromMessages(parsedMessages);
|
|
||||||
}
|
|
||||||
|
|
||||||
return queryTemplate;
|
|
||||||
}
|
|
||||||
|
|
||||||
async function createSimpleLLMChain(
|
|
||||||
context: IExecuteFunctions,
|
|
||||||
llm: BaseLanguageModel,
|
|
||||||
query: string,
|
|
||||||
prompt: ChatPromptTemplate | PromptTemplate,
|
|
||||||
): Promise<string[]> {
|
|
||||||
const chain = new LLMChain({
|
|
||||||
llm,
|
|
||||||
prompt,
|
|
||||||
}).withConfig(getTracingConfig(context));
|
|
||||||
|
|
||||||
const response = (await chain.invoke({
|
|
||||||
query,
|
|
||||||
signal: context.getExecutionCancelSignal(),
|
|
||||||
})) as string[];
|
|
||||||
|
|
||||||
return Array.isArray(response) ? response : [response];
|
|
||||||
}
|
|
||||||
|
|
||||||
async function getChain(
|
|
||||||
context: IExecuteFunctions,
|
|
||||||
itemIndex: number,
|
|
||||||
query: string,
|
|
||||||
llm: BaseLanguageModel,
|
|
||||||
outputParsers: N8nOutputParser[],
|
|
||||||
messages?: MessagesTemplate[],
|
|
||||||
): Promise<unknown[]> {
|
|
||||||
const chatTemplate: ChatPromptTemplate | PromptTemplate = await getChainPromptTemplate(
|
|
||||||
context,
|
|
||||||
itemIndex,
|
|
||||||
llm,
|
|
||||||
messages,
|
|
||||||
undefined,
|
|
||||||
query,
|
|
||||||
);
|
|
||||||
|
|
||||||
// If there are no output parsers, create a simple LLM chain and execute the query
|
|
||||||
if (!outputParsers.length) {
|
|
||||||
return await createSimpleLLMChain(context, llm, query, chatTemplate);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If there's only one output parser, use it; otherwise, create a combined output parser
|
|
||||||
const combinedOutputParser =
|
|
||||||
outputParsers.length === 1 ? outputParsers[0] : new CombiningOutputParser(...outputParsers);
|
|
||||||
|
|
||||||
const formatInstructions = combinedOutputParser.getFormatInstructions();
|
|
||||||
|
|
||||||
// Create a prompt template incorporating the format instructions and query
|
|
||||||
const prompt = await getChainPromptTemplate(
|
|
||||||
context,
|
|
||||||
itemIndex,
|
|
||||||
llm,
|
|
||||||
messages,
|
|
||||||
formatInstructions,
|
|
||||||
query,
|
|
||||||
);
|
|
||||||
|
|
||||||
const chain = prompt.pipe(llm).pipe(combinedOutputParser);
|
|
||||||
const response = (await chain.withConfig(getTracingConfig(context)).invoke({ query })) as
|
|
||||||
| string
|
|
||||||
| string[];
|
|
||||||
|
|
||||||
return Array.isArray(response) ? response : [response];
|
|
||||||
}
|
|
||||||
|
|
||||||
function getInputs(parameters: IDataObject) {
|
|
||||||
const hasOutputParser = parameters?.hasOutputParser;
|
|
||||||
const inputs = [
|
|
||||||
{ displayName: '', type: NodeConnectionType.Main },
|
|
||||||
{
|
|
||||||
displayName: 'Model',
|
|
||||||
maxConnections: 1,
|
|
||||||
type: NodeConnectionType.AiLanguageModel,
|
|
||||||
required: true,
|
|
||||||
},
|
|
||||||
];
|
|
||||||
|
|
||||||
// If `hasOutputParser` is undefined it must be version 1.3 or earlier so we
|
|
||||||
// always add the output parser input
|
|
||||||
if (hasOutputParser === undefined || hasOutputParser === true) {
|
|
||||||
inputs.push({ displayName: 'Output Parser', type: NodeConnectionType.AiOutputParser });
|
|
||||||
}
|
|
||||||
return inputs;
|
|
||||||
}
|
|
||||||
|
|
||||||
export class ChainLlm implements INodeType {
|
export class ChainLlm implements INodeType {
|
||||||
description: INodeTypeDescription = {
|
description: INodeTypeDescription = {
|
||||||
displayName: 'Basic LLM Chain',
|
displayName: 'Basic LLM Chain',
|
||||||
@@ -287,253 +57,32 @@ export class ChainLlm implements INodeType {
|
|||||||
inputs: `={{ ((parameter) => { ${getInputs.toString()}; return getInputs(parameter) })($parameter) }}`,
|
inputs: `={{ ((parameter) => { ${getInputs.toString()}; return getInputs(parameter) })($parameter) }}`,
|
||||||
outputs: [NodeConnectionType.Main],
|
outputs: [NodeConnectionType.Main],
|
||||||
credentials: [],
|
credentials: [],
|
||||||
properties: [
|
properties: nodeProperties,
|
||||||
getTemplateNoticeField(1978),
|
|
||||||
{
|
|
||||||
displayName: 'Prompt',
|
|
||||||
name: 'prompt',
|
|
||||||
type: 'string',
|
|
||||||
required: true,
|
|
||||||
default: '={{ $json.input }}',
|
|
||||||
displayOptions: {
|
|
||||||
show: {
|
|
||||||
'@version': [1],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
displayName: 'Prompt',
|
|
||||||
name: 'prompt',
|
|
||||||
type: 'string',
|
|
||||||
required: true,
|
|
||||||
default: '={{ $json.chat_input }}',
|
|
||||||
displayOptions: {
|
|
||||||
show: {
|
|
||||||
'@version': [1.1, 1.2],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
displayName: 'Prompt',
|
|
||||||
name: 'prompt',
|
|
||||||
type: 'string',
|
|
||||||
required: true,
|
|
||||||
default: '={{ $json.chatInput }}',
|
|
||||||
displayOptions: {
|
|
||||||
show: {
|
|
||||||
'@version': [1.3],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
...promptTypeOptions,
|
|
||||||
displayOptions: {
|
|
||||||
hide: {
|
|
||||||
'@version': [1, 1.1, 1.2, 1.3],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
...textFromPreviousNode,
|
|
||||||
displayOptions: { show: { promptType: ['auto'], '@version': [{ _cnd: { gte: 1.5 } }] } },
|
|
||||||
},
|
|
||||||
{
|
|
||||||
displayName: 'Prompt (User Message)',
|
|
||||||
name: 'text',
|
|
||||||
type: 'string',
|
|
||||||
required: true,
|
|
||||||
default: '',
|
|
||||||
placeholder: 'e.g. Hello, how can you help me?',
|
|
||||||
typeOptions: {
|
|
||||||
rows: 2,
|
|
||||||
},
|
|
||||||
displayOptions: {
|
|
||||||
show: {
|
|
||||||
promptType: ['define'],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
displayName: 'Require Specific Output Format',
|
|
||||||
name: 'hasOutputParser',
|
|
||||||
type: 'boolean',
|
|
||||||
default: false,
|
|
||||||
noDataExpression: true,
|
|
||||||
displayOptions: {
|
|
||||||
hide: {
|
|
||||||
'@version': [1, 1.1, 1.3],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
displayName: 'Chat Messages (if Using a Chat Model)',
|
|
||||||
name: 'messages',
|
|
||||||
type: 'fixedCollection',
|
|
||||||
typeOptions: {
|
|
||||||
multipleValues: true,
|
|
||||||
},
|
|
||||||
default: {},
|
|
||||||
placeholder: 'Add prompt',
|
|
||||||
options: [
|
|
||||||
{
|
|
||||||
name: 'messageValues',
|
|
||||||
displayName: 'Prompt',
|
|
||||||
values: [
|
|
||||||
{
|
|
||||||
displayName: 'Type Name or ID',
|
|
||||||
name: 'type',
|
|
||||||
type: 'options',
|
|
||||||
options: [
|
|
||||||
{
|
|
||||||
name: 'AI',
|
|
||||||
value: AIMessagePromptTemplate.lc_name(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'System',
|
|
||||||
value: SystemMessagePromptTemplate.lc_name(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'User',
|
|
||||||
value: HumanMessagePromptTemplate.lc_name(),
|
|
||||||
},
|
|
||||||
],
|
|
||||||
default: SystemMessagePromptTemplate.lc_name(),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
displayName: 'Message Type',
|
|
||||||
name: 'messageType',
|
|
||||||
type: 'options',
|
|
||||||
displayOptions: {
|
|
||||||
show: {
|
|
||||||
type: [HumanMessagePromptTemplate.lc_name()],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
options: [
|
|
||||||
{
|
|
||||||
name: 'Text',
|
|
||||||
value: 'text',
|
|
||||||
description: 'Simple text message',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'Image (Binary)',
|
|
||||||
value: 'imageBinary',
|
|
||||||
description: 'Process the binary input from the previous node',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'Image (URL)',
|
|
||||||
value: 'imageUrl',
|
|
||||||
description: 'Process the image from the specified URL',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
default: 'text',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
displayName: 'Image Data Field Name',
|
|
||||||
name: 'binaryImageDataKey',
|
|
||||||
type: 'string',
|
|
||||||
default: 'data',
|
|
||||||
required: true,
|
|
||||||
description:
|
|
||||||
'The name of the field in the chain’s input that contains the binary image file to be processed',
|
|
||||||
displayOptions: {
|
|
||||||
show: {
|
|
||||||
messageType: ['imageBinary'],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
displayName: 'Image URL',
|
|
||||||
name: 'imageUrl',
|
|
||||||
type: 'string',
|
|
||||||
default: '',
|
|
||||||
required: true,
|
|
||||||
description: 'URL to the image to be processed',
|
|
||||||
displayOptions: {
|
|
||||||
show: {
|
|
||||||
messageType: ['imageUrl'],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
displayName: 'Image Details',
|
|
||||||
description:
|
|
||||||
'Control how the model processes the image and generates its textual understanding',
|
|
||||||
name: 'imageDetail',
|
|
||||||
type: 'options',
|
|
||||||
displayOptions: {
|
|
||||||
show: {
|
|
||||||
type: [HumanMessagePromptTemplate.lc_name()],
|
|
||||||
messageType: ['imageBinary', 'imageUrl'],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
options: [
|
|
||||||
{
|
|
||||||
name: 'Auto',
|
|
||||||
value: 'auto',
|
|
||||||
description:
|
|
||||||
'Model will use the auto setting which will look at the image input size and decide if it should use the low or high setting',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'Low',
|
|
||||||
value: 'low',
|
|
||||||
description:
|
|
||||||
'The model will receive a low-res 512px x 512px version of the image, and represent the image with a budget of 65 tokens. This allows the API to return faster responses and consume fewer input tokens for use cases that do not require high detail.',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'High',
|
|
||||||
value: 'high',
|
|
||||||
description:
|
|
||||||
'Allows the model to see the low res image and then creates detailed crops of input images as 512px squares based on the input image size. Each of the detailed crops uses twice the token budget (65 tokens) for a total of 129 tokens.',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
default: 'auto',
|
|
||||||
},
|
|
||||||
|
|
||||||
{
|
|
||||||
displayName: 'Message',
|
|
||||||
name: 'message',
|
|
||||||
type: 'string',
|
|
||||||
required: true,
|
|
||||||
displayOptions: {
|
|
||||||
hide: {
|
|
||||||
messageType: ['imageBinary', 'imageUrl'],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
default: '',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
{
|
|
||||||
displayName: `Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='${NodeConnectionType.AiOutputParser}'>output parser</a> on the canvas to specify the output format you require`,
|
|
||||||
name: 'notice',
|
|
||||||
type: 'notice',
|
|
||||||
default: '',
|
|
||||||
displayOptions: {
|
|
||||||
show: {
|
|
||||||
hasOutputParser: [true],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
],
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Main execution method for the node
|
||||||
|
*/
|
||||||
async execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]> {
|
async execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]> {
|
||||||
this.logger.debug('Executing LLM Chain');
|
this.logger.debug('Executing Basic LLM Chain');
|
||||||
const items = this.getInputData();
|
const items = this.getInputData();
|
||||||
|
|
||||||
const returnData: INodeExecutionData[] = [];
|
const returnData: INodeExecutionData[] = [];
|
||||||
|
|
||||||
|
// Process each input item
|
||||||
for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
|
for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
|
||||||
try {
|
try {
|
||||||
let prompt: string;
|
// Get the language model
|
||||||
const llm = (await this.getInputConnectionData(
|
const llm = (await this.getInputConnectionData(
|
||||||
NodeConnectionType.AiLanguageModel,
|
NodeConnectionType.AiLanguageModel,
|
||||||
0,
|
0,
|
||||||
)) as BaseLanguageModel;
|
)) as BaseLanguageModel;
|
||||||
|
|
||||||
const outputParsers = await getOptionalOutputParsers(this);
|
// Get output parser if configured
|
||||||
|
const outputParser = await getOptionalOutputParser(this);
|
||||||
|
|
||||||
|
// Get user prompt based on node version
|
||||||
|
let prompt: string;
|
||||||
|
|
||||||
if (this.getNode().typeVersion <= 1.3) {
|
if (this.getNode().typeVersion <= 1.3) {
|
||||||
prompt = this.getNodeParameter('prompt', itemIndex) as string;
|
prompt = this.getNodeParameter('prompt', itemIndex) as string;
|
||||||
} else {
|
} else {
|
||||||
@@ -544,47 +93,37 @@ export class ChainLlm implements INodeType {
|
|||||||
promptTypeKey: 'promptType',
|
promptTypeKey: 'promptType',
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
const messages = this.getNodeParameter(
|
|
||||||
'messages.messageValues',
|
|
||||||
itemIndex,
|
|
||||||
[],
|
|
||||||
) as MessagesTemplate[];
|
|
||||||
|
|
||||||
|
// Validate prompt
|
||||||
if (prompt === undefined) {
|
if (prompt === undefined) {
|
||||||
throw new NodeOperationError(this.getNode(), "The 'prompt' parameter is empty.");
|
throw new NodeOperationError(this.getNode(), "The 'prompt' parameter is empty.");
|
||||||
}
|
}
|
||||||
|
|
||||||
const responses = await getChain(this, itemIndex, prompt, llm, outputParsers, messages);
|
// Get chat messages if configured
|
||||||
|
const messages = this.getNodeParameter(
|
||||||
|
'messages.messageValues',
|
||||||
|
itemIndex,
|
||||||
|
[],
|
||||||
|
) as MessageTemplate[];
|
||||||
|
|
||||||
|
// Execute the chain
|
||||||
|
const responses = await executeChain({
|
||||||
|
context: this,
|
||||||
|
itemIndex,
|
||||||
|
query: prompt,
|
||||||
|
llm,
|
||||||
|
outputParser,
|
||||||
|
messages,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Process each response and add to return data
|
||||||
responses.forEach((response) => {
|
responses.forEach((response) => {
|
||||||
let data: IDataObject;
|
|
||||||
if (typeof response === 'string') {
|
|
||||||
data = {
|
|
||||||
response: {
|
|
||||||
text: response.trim(),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
} else if (Array.isArray(response)) {
|
|
||||||
data = {
|
|
||||||
data: response,
|
|
||||||
};
|
|
||||||
} else if (response instanceof Object) {
|
|
||||||
data = response as IDataObject;
|
|
||||||
} else {
|
|
||||||
data = {
|
|
||||||
response: {
|
|
||||||
text: response,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
returnData.push({
|
returnData.push({
|
||||||
json: data,
|
json: formatResponse(response),
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// If the error is an OpenAI's rate limit error, we want to handle it differently
|
// Handle OpenAI specific rate limit errors
|
||||||
// because OpenAI has multiple different rate limit errors
|
|
||||||
if (error instanceof NodeApiError && isOpenAiError(error.cause)) {
|
if (error instanceof NodeApiError && isOpenAiError(error.cause)) {
|
||||||
const openAiErrorCode: string | undefined = (error.cause as any).error?.code;
|
const openAiErrorCode: string | undefined = (error.cause as any).error?.code;
|
||||||
if (openAiErrorCode) {
|
if (openAiErrorCode) {
|
||||||
@@ -595,6 +134,7 @@ export class ChainLlm implements INodeType {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Continue on failure if configured
|
||||||
if (this.continueOnFail()) {
|
if (this.continueOnFail()) {
|
||||||
returnData.push({ json: { error: error.message }, pairedItem: { item: itemIndex } });
|
returnData.push({ json: { error: error.message }, pairedItem: { item: itemIndex } });
|
||||||
continue;
|
continue;
|
||||||
|
|||||||
@@ -0,0 +1,90 @@
|
|||||||
|
import type { BaseLanguageModel } from '@langchain/core/language_models/base';
|
||||||
|
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||||
|
import type { ChatPromptTemplate, PromptTemplate } from '@langchain/core/prompts';
|
||||||
|
import type { IExecuteFunctions } from 'n8n-workflow';
|
||||||
|
|
||||||
|
import { getTracingConfig } from '@utils/tracing';
|
||||||
|
|
||||||
|
import { createPromptTemplate } from './promptUtils';
|
||||||
|
import type { ChainExecutionParams } from './types';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a simple chain for LLMs without output parsers
|
||||||
|
*/
|
||||||
|
async function executeSimpleChain({
|
||||||
|
context,
|
||||||
|
llm,
|
||||||
|
query,
|
||||||
|
prompt,
|
||||||
|
}: {
|
||||||
|
context: IExecuteFunctions;
|
||||||
|
llm: BaseLanguageModel;
|
||||||
|
query: string;
|
||||||
|
prompt: ChatPromptTemplate | PromptTemplate;
|
||||||
|
}): Promise<string[]> {
|
||||||
|
const chain = prompt
|
||||||
|
.pipe(llm)
|
||||||
|
.pipe(new StringOutputParser())
|
||||||
|
.withConfig(getTracingConfig(context));
|
||||||
|
|
||||||
|
// Execute the chain
|
||||||
|
const response = await chain.invoke({
|
||||||
|
query,
|
||||||
|
signal: context.getExecutionCancelSignal(),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Ensure response is always returned as an array
|
||||||
|
return [response];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates and executes an LLM chain with the given prompt and optional output parsers
|
||||||
|
*/
|
||||||
|
export async function executeChain({
|
||||||
|
context,
|
||||||
|
itemIndex,
|
||||||
|
query,
|
||||||
|
llm,
|
||||||
|
outputParser,
|
||||||
|
messages,
|
||||||
|
}: ChainExecutionParams): Promise<unknown[]> {
|
||||||
|
// If no output parsers provided, use a simple chain with basic prompt template
|
||||||
|
if (!outputParser) {
|
||||||
|
const promptTemplate = await createPromptTemplate({
|
||||||
|
context,
|
||||||
|
itemIndex,
|
||||||
|
llm,
|
||||||
|
messages,
|
||||||
|
query,
|
||||||
|
});
|
||||||
|
|
||||||
|
return await executeSimpleChain({
|
||||||
|
context,
|
||||||
|
llm,
|
||||||
|
query,
|
||||||
|
prompt: promptTemplate,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
const formatInstructions = outputParser.getFormatInstructions();
|
||||||
|
|
||||||
|
// Create a prompt template with format instructions
|
||||||
|
const promptWithInstructions = await createPromptTemplate({
|
||||||
|
context,
|
||||||
|
itemIndex,
|
||||||
|
llm,
|
||||||
|
messages,
|
||||||
|
formatInstructions,
|
||||||
|
query,
|
||||||
|
});
|
||||||
|
|
||||||
|
const chain = promptWithInstructions
|
||||||
|
.pipe(llm)
|
||||||
|
.pipe(outputParser)
|
||||||
|
.withConfig(getTracingConfig(context));
|
||||||
|
const response = await chain.invoke({ query }, { signal: context.getExecutionCancelSignal() });
|
||||||
|
|
||||||
|
// Ensure response is always returned as an array
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
|
||||||
|
return Array.isArray(response) ? response : [response];
|
||||||
|
}
|
||||||
@@ -0,0 +1,273 @@
|
|||||||
|
import {
|
||||||
|
AIMessagePromptTemplate,
|
||||||
|
HumanMessagePromptTemplate,
|
||||||
|
SystemMessagePromptTemplate,
|
||||||
|
} from '@langchain/core/prompts';
|
||||||
|
import type { IDataObject, INodeProperties } from 'n8n-workflow';
|
||||||
|
import { NodeConnectionType } from 'n8n-workflow';
|
||||||
|
|
||||||
|
import { promptTypeOptions, textFromPreviousNode } from '@utils/descriptions';
|
||||||
|
import { getTemplateNoticeField } from '@utils/sharedFields';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Dynamic input configuration generation based on node parameters
|
||||||
|
*/
|
||||||
|
export function getInputs(parameters: IDataObject) {
|
||||||
|
const inputs = [
|
||||||
|
{ displayName: '', type: NodeConnectionType.Main },
|
||||||
|
{
|
||||||
|
displayName: 'Model',
|
||||||
|
maxConnections: 1,
|
||||||
|
type: NodeConnectionType.AiLanguageModel,
|
||||||
|
required: true,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
// If `hasOutputParser` is undefined it must be version 1.3 or earlier so we
|
||||||
|
// always add the output parser input
|
||||||
|
const hasOutputParser = parameters?.hasOutputParser;
|
||||||
|
if (hasOutputParser === undefined || hasOutputParser === true) {
|
||||||
|
inputs.push({
|
||||||
|
displayName: 'Output Parser',
|
||||||
|
type: NodeConnectionType.AiOutputParser,
|
||||||
|
maxConnections: 1,
|
||||||
|
required: false,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
return inputs;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Node properties configuration
|
||||||
|
*/
|
||||||
|
export const nodeProperties: INodeProperties[] = [
|
||||||
|
getTemplateNoticeField(1978),
|
||||||
|
{
|
||||||
|
displayName: 'Prompt',
|
||||||
|
name: 'prompt',
|
||||||
|
type: 'string',
|
||||||
|
required: true,
|
||||||
|
default: '={{ $json.input }}',
|
||||||
|
displayOptions: {
|
||||||
|
show: {
|
||||||
|
'@version': [1],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
displayName: 'Prompt',
|
||||||
|
name: 'prompt',
|
||||||
|
type: 'string',
|
||||||
|
required: true,
|
||||||
|
default: '={{ $json.chat_input }}',
|
||||||
|
displayOptions: {
|
||||||
|
show: {
|
||||||
|
'@version': [1.1, 1.2],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
displayName: 'Prompt',
|
||||||
|
name: 'prompt',
|
||||||
|
type: 'string',
|
||||||
|
required: true,
|
||||||
|
default: '={{ $json.chatInput }}',
|
||||||
|
displayOptions: {
|
||||||
|
show: {
|
||||||
|
'@version': [1.3],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
...promptTypeOptions,
|
||||||
|
displayOptions: {
|
||||||
|
hide: {
|
||||||
|
'@version': [1, 1.1, 1.2, 1.3],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
...textFromPreviousNode,
|
||||||
|
displayOptions: { show: { promptType: ['auto'], '@version': [{ _cnd: { gte: 1.5 } }] } },
|
||||||
|
},
|
||||||
|
{
|
||||||
|
displayName: 'Prompt (User Message)',
|
||||||
|
name: 'text',
|
||||||
|
type: 'string',
|
||||||
|
required: true,
|
||||||
|
default: '',
|
||||||
|
placeholder: 'e.g. Hello, how can you help me?',
|
||||||
|
typeOptions: {
|
||||||
|
rows: 2,
|
||||||
|
},
|
||||||
|
displayOptions: {
|
||||||
|
show: {
|
||||||
|
promptType: ['define'],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
displayName: 'Require Specific Output Format',
|
||||||
|
name: 'hasOutputParser',
|
||||||
|
type: 'boolean',
|
||||||
|
default: false,
|
||||||
|
noDataExpression: true,
|
||||||
|
displayOptions: {
|
||||||
|
hide: {
|
||||||
|
'@version': [1, 1.1, 1.3],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
displayName: 'Chat Messages (if Using a Chat Model)',
|
||||||
|
name: 'messages',
|
||||||
|
type: 'fixedCollection',
|
||||||
|
typeOptions: {
|
||||||
|
multipleValues: true,
|
||||||
|
},
|
||||||
|
default: {},
|
||||||
|
placeholder: 'Add prompt',
|
||||||
|
options: [
|
||||||
|
{
|
||||||
|
name: 'messageValues',
|
||||||
|
displayName: 'Prompt',
|
||||||
|
values: [
|
||||||
|
{
|
||||||
|
displayName: 'Type Name or ID',
|
||||||
|
name: 'type',
|
||||||
|
type: 'options',
|
||||||
|
options: [
|
||||||
|
{
|
||||||
|
name: 'AI',
|
||||||
|
value: AIMessagePromptTemplate.lc_name(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'System',
|
||||||
|
value: SystemMessagePromptTemplate.lc_name(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'User',
|
||||||
|
value: HumanMessagePromptTemplate.lc_name(),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
default: SystemMessagePromptTemplate.lc_name(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
displayName: 'Message Type',
|
||||||
|
name: 'messageType',
|
||||||
|
type: 'options',
|
||||||
|
displayOptions: {
|
||||||
|
show: {
|
||||||
|
type: [HumanMessagePromptTemplate.lc_name()],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
options: [
|
||||||
|
{
|
||||||
|
name: 'Text',
|
||||||
|
value: 'text',
|
||||||
|
description: 'Simple text message',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Image (Binary)',
|
||||||
|
value: 'imageBinary',
|
||||||
|
description: 'Process the binary input from the previous node',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Image (URL)',
|
||||||
|
value: 'imageUrl',
|
||||||
|
description: 'Process the image from the specified URL',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
default: 'text',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
displayName: 'Image Data Field Name',
|
||||||
|
name: 'binaryImageDataKey',
|
||||||
|
type: 'string',
|
||||||
|
default: 'data',
|
||||||
|
required: true,
|
||||||
|
description:
|
||||||
|
"The name of the field in the chain's input that contains the binary image file to be processed",
|
||||||
|
displayOptions: {
|
||||||
|
show: {
|
||||||
|
messageType: ['imageBinary'],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
displayName: 'Image URL',
|
||||||
|
name: 'imageUrl',
|
||||||
|
type: 'string',
|
||||||
|
default: '',
|
||||||
|
required: true,
|
||||||
|
description: 'URL to the image to be processed',
|
||||||
|
displayOptions: {
|
||||||
|
show: {
|
||||||
|
messageType: ['imageUrl'],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
displayName: 'Image Details',
|
||||||
|
description:
|
||||||
|
'Control how the model processes the image and generates its textual understanding',
|
||||||
|
name: 'imageDetail',
|
||||||
|
type: 'options',
|
||||||
|
displayOptions: {
|
||||||
|
show: {
|
||||||
|
type: [HumanMessagePromptTemplate.lc_name()],
|
||||||
|
messageType: ['imageBinary', 'imageUrl'],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
options: [
|
||||||
|
{
|
||||||
|
name: 'Auto',
|
||||||
|
value: 'auto',
|
||||||
|
description:
|
||||||
|
'Model will use the auto setting which will look at the image input size and decide if it should use the low or high setting',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'Low',
|
||||||
|
value: 'low',
|
||||||
|
description:
|
||||||
|
'The model will receive a low-res 512px x 512px version of the image, and represent the image with a budget of 65 tokens. This allows the API to return faster responses and consume fewer input tokens for use cases that do not require high detail.',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: 'High',
|
||||||
|
value: 'high',
|
||||||
|
description:
|
||||||
|
'Allows the model to see the low res image and then creates detailed crops of input images as 512px squares based on the input image size. Each of the detailed crops uses twice the token budget (65 tokens) for a total of 129 tokens.',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
default: 'auto',
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
displayName: 'Message',
|
||||||
|
name: 'message',
|
||||||
|
type: 'string',
|
||||||
|
required: true,
|
||||||
|
displayOptions: {
|
||||||
|
hide: {
|
||||||
|
messageType: ['imageBinary', 'imageUrl'],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
default: '',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
displayName: `Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='${NodeConnectionType.AiOutputParser}'>output parser</a> on the canvas to specify the output format you require`,
|
||||||
|
name: 'notice',
|
||||||
|
type: 'notice',
|
||||||
|
default: '',
|
||||||
|
displayOptions: {
|
||||||
|
show: {
|
||||||
|
hasOutputParser: [true],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
];
|
||||||
@@ -0,0 +1,99 @@
|
|||||||
|
import type { BaseLanguageModel } from '@langchain/core/language_models/base';
|
||||||
|
import { HumanMessage } from '@langchain/core/messages';
|
||||||
|
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
|
||||||
|
import { ChatOllama } from '@langchain/ollama';
|
||||||
|
import type { IExecuteFunctions, IBinaryData } from 'n8n-workflow';
|
||||||
|
import { NodeOperationError, NodeConnectionType, OperationalError } from 'n8n-workflow';
|
||||||
|
|
||||||
|
import type { MessageTemplate } from './types';
|
||||||
|
|
||||||
|
export class UnsupportedMimeTypeError extends OperationalError {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Converts binary image data to a data URI
|
||||||
|
*/
|
||||||
|
export function dataUriFromImageData(binaryData: IBinaryData, bufferData: Buffer): string {
|
||||||
|
if (!binaryData.mimeType?.startsWith('image/')) {
|
||||||
|
throw new UnsupportedMimeTypeError(
|
||||||
|
`${binaryData.mimeType} is not a supported type of binary data. Only images are supported.`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return `data:${binaryData.mimeType};base64,${bufferData.toString('base64')}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a human message with image content from either binary data or URL
|
||||||
|
*/
|
||||||
|
export async function createImageMessage({
|
||||||
|
context,
|
||||||
|
itemIndex,
|
||||||
|
message,
|
||||||
|
}: {
|
||||||
|
context: IExecuteFunctions;
|
||||||
|
itemIndex: number;
|
||||||
|
message: MessageTemplate;
|
||||||
|
}): Promise<HumanMessage> {
|
||||||
|
// Validate message type
|
||||||
|
if (message.messageType !== 'imageBinary' && message.messageType !== 'imageUrl') {
|
||||||
|
throw new NodeOperationError(
|
||||||
|
context.getNode(),
|
||||||
|
'Invalid message type. Only imageBinary and imageUrl are supported',
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const detail = message.imageDetail === 'auto' ? undefined : message.imageDetail;
|
||||||
|
|
||||||
|
// Handle image URL case
|
||||||
|
if (message.messageType === 'imageUrl' && message.imageUrl) {
|
||||||
|
return new HumanMessage({
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'image_url',
|
||||||
|
image_url: {
|
||||||
|
url: message.imageUrl,
|
||||||
|
detail,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle binary image case
|
||||||
|
const binaryDataKey = message.binaryImageDataKey ?? 'data';
|
||||||
|
const inputData = context.getInputData()[itemIndex];
|
||||||
|
const binaryData = inputData.binary?.[binaryDataKey] as IBinaryData;
|
||||||
|
|
||||||
|
if (!binaryData) {
|
||||||
|
throw new NodeOperationError(context.getNode(), 'No binary data set.');
|
||||||
|
}
|
||||||
|
|
||||||
|
const bufferData = await context.helpers.getBinaryDataBuffer(itemIndex, binaryDataKey);
|
||||||
|
const model = (await context.getInputConnectionData(
|
||||||
|
NodeConnectionType.AiLanguageModel,
|
||||||
|
0,
|
||||||
|
)) as BaseLanguageModel;
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Create data URI from binary data
|
||||||
|
const dataURI = dataUriFromImageData(binaryData, bufferData);
|
||||||
|
|
||||||
|
// Some models need different image URL formats
|
||||||
|
const directUriModels = [ChatGoogleGenerativeAI, ChatOllama];
|
||||||
|
const imageUrl = directUriModels.some((i) => model instanceof i)
|
||||||
|
? dataURI
|
||||||
|
: { url: dataURI, detail };
|
||||||
|
|
||||||
|
return new HumanMessage({
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'image_url',
|
||||||
|
image_url: imageUrl,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof UnsupportedMimeTypeError)
|
||||||
|
throw new NodeOperationError(context.getNode(), error.message);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
export { executeChain } from './chainExecutor';
|
||||||
|
export { getInputs, nodeProperties } from './config';
|
||||||
|
export { formatResponse } from './responseFormatter';
|
||||||
|
export type { MessageTemplate } from './types';
|
||||||
@@ -0,0 +1,145 @@
|
|||||||
|
import { HumanMessage } from '@langchain/core/messages';
|
||||||
|
import type { BaseMessagePromptTemplateLike } from '@langchain/core/prompts';
|
||||||
|
import {
|
||||||
|
AIMessagePromptTemplate,
|
||||||
|
PromptTemplate,
|
||||||
|
SystemMessagePromptTemplate,
|
||||||
|
HumanMessagePromptTemplate,
|
||||||
|
ChatPromptTemplate,
|
||||||
|
} from '@langchain/core/prompts';
|
||||||
|
import type { IExecuteFunctions } from 'n8n-workflow';
|
||||||
|
import { OperationalError } from 'n8n-workflow';
|
||||||
|
|
||||||
|
import { isChatInstance } from '@utils/helpers';
|
||||||
|
|
||||||
|
import { createImageMessage } from './imageUtils';
|
||||||
|
import type { MessageTemplate, PromptParams } from './types';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a basic query template that may include format instructions
|
||||||
|
*/
|
||||||
|
function buildQueryTemplate(formatInstructions?: string): PromptTemplate {
|
||||||
|
return new PromptTemplate({
|
||||||
|
template: `{query}${formatInstructions ? '\n{formatInstructions}' : ''}`,
|
||||||
|
inputVariables: ['query'],
|
||||||
|
partialVariables: formatInstructions ? { formatInstructions } : undefined,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process an array of message templates into LangChain message objects
|
||||||
|
*/
|
||||||
|
async function processMessageTemplates({
|
||||||
|
context,
|
||||||
|
itemIndex,
|
||||||
|
messages,
|
||||||
|
}: {
|
||||||
|
context: IExecuteFunctions;
|
||||||
|
itemIndex: number;
|
||||||
|
messages: MessageTemplate[];
|
||||||
|
}): Promise<BaseMessagePromptTemplateLike[]> {
|
||||||
|
return await Promise.all(
|
||||||
|
messages.map(async (message) => {
|
||||||
|
// Find the appropriate message class based on type
|
||||||
|
const messageClass = [
|
||||||
|
SystemMessagePromptTemplate,
|
||||||
|
AIMessagePromptTemplate,
|
||||||
|
HumanMessagePromptTemplate,
|
||||||
|
].find((m) => m.lc_name() === message.type);
|
||||||
|
|
||||||
|
if (!messageClass) {
|
||||||
|
throw new OperationalError('Invalid message type', {
|
||||||
|
extra: { messageType: message.type },
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle image messages specially for human messages
|
||||||
|
if (messageClass === HumanMessagePromptTemplate && message.messageType !== 'text') {
|
||||||
|
return await createImageMessage({ context, itemIndex, message });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process text messages
|
||||||
|
// Escape curly braces in the message to prevent LangChain from treating them as variables
|
||||||
|
return messageClass.fromTemplate(
|
||||||
|
(message.message || '').replace(/[{}]/g, (match) => match + match),
|
||||||
|
);
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Finalizes the prompt template by adding or updating the query in the message chain
|
||||||
|
*/
|
||||||
|
async function finalizePromptTemplate({
|
||||||
|
parsedMessages,
|
||||||
|
queryTemplate,
|
||||||
|
query,
|
||||||
|
}: {
|
||||||
|
parsedMessages: BaseMessagePromptTemplateLike[];
|
||||||
|
queryTemplate: PromptTemplate;
|
||||||
|
query?: string;
|
||||||
|
}): Promise<ChatPromptTemplate> {
|
||||||
|
// Check if the last message is a human message with multi-content array
|
||||||
|
const lastMessage = parsedMessages[parsedMessages.length - 1];
|
||||||
|
|
||||||
|
if (lastMessage instanceof HumanMessage && Array.isArray(lastMessage.content)) {
|
||||||
|
// Add the query to the existing human message content
|
||||||
|
const humanMessage = new HumanMessagePromptTemplate(queryTemplate);
|
||||||
|
|
||||||
|
// Format the message with the query and add the content synchronously
|
||||||
|
const formattedMessage = await humanMessage.format({ query });
|
||||||
|
|
||||||
|
// Create a new array with the existing content plus the new item
|
||||||
|
if (Array.isArray(lastMessage.content)) {
|
||||||
|
// Clone the current content array and add the new item
|
||||||
|
const updatedContent = [
|
||||||
|
...lastMessage.content,
|
||||||
|
{
|
||||||
|
text: formattedMessage.content.toString(),
|
||||||
|
type: 'text',
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
// Replace the content with the updated array
|
||||||
|
lastMessage.content = updatedContent;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Otherwise, add a new human message with the query
|
||||||
|
parsedMessages.push(new HumanMessagePromptTemplate(queryTemplate));
|
||||||
|
}
|
||||||
|
|
||||||
|
return ChatPromptTemplate.fromMessages(parsedMessages);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builds the appropriate prompt template based on model type (chat vs completion)
|
||||||
|
* and provided messages
|
||||||
|
*/
|
||||||
|
export async function createPromptTemplate({
|
||||||
|
context,
|
||||||
|
itemIndex,
|
||||||
|
llm,
|
||||||
|
messages,
|
||||||
|
formatInstructions,
|
||||||
|
query,
|
||||||
|
}: PromptParams) {
|
||||||
|
// Create base query template
|
||||||
|
const queryTemplate = buildQueryTemplate(formatInstructions);
|
||||||
|
|
||||||
|
// For non-chat models, just return the query template
|
||||||
|
if (!isChatInstance(llm)) {
|
||||||
|
return queryTemplate;
|
||||||
|
}
|
||||||
|
|
||||||
|
// For chat models, process the messages if provided
|
||||||
|
const parsedMessages = messages?.length
|
||||||
|
? await processMessageTemplates({ context, itemIndex, messages })
|
||||||
|
: [];
|
||||||
|
|
||||||
|
// Add or update the query in the message chain
|
||||||
|
return await finalizePromptTemplate({
|
||||||
|
parsedMessages,
|
||||||
|
queryTemplate,
|
||||||
|
query,
|
||||||
|
});
|
||||||
|
}
|
||||||
@@ -0,0 +1,30 @@
|
|||||||
|
import type { IDataObject } from 'n8n-workflow';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Formats the response from the LLM chain into a consistent structure
|
||||||
|
*/
|
||||||
|
export function formatResponse(response: unknown): IDataObject {
|
||||||
|
if (typeof response === 'string') {
|
||||||
|
return {
|
||||||
|
response: {
|
||||||
|
text: response.trim(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Array.isArray(response)) {
|
||||||
|
return {
|
||||||
|
data: response,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (response instanceof Object) {
|
||||||
|
return response as IDataObject;
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
response: {
|
||||||
|
text: response,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -0,0 +1,41 @@
|
|||||||
|
import type { BaseLanguageModel } from '@langchain/core/language_models/base';
|
||||||
|
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
|
import type { IExecuteFunctions } from 'n8n-workflow';
|
||||||
|
|
||||||
|
import type { N8nOutputParser } from '@utils/output_parsers/N8nOutputParser';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Interface for describing a message template in the UI
|
||||||
|
*/
|
||||||
|
export interface MessageTemplate {
|
||||||
|
type: string;
|
||||||
|
message: string;
|
||||||
|
messageType: 'text' | 'imageBinary' | 'imageUrl';
|
||||||
|
binaryImageDataKey?: string;
|
||||||
|
imageUrl?: string;
|
||||||
|
imageDetail?: 'auto' | 'low' | 'high';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parameters for prompt creation
|
||||||
|
*/
|
||||||
|
export interface PromptParams {
|
||||||
|
context: IExecuteFunctions;
|
||||||
|
itemIndex: number;
|
||||||
|
llm: BaseLanguageModel | BaseChatModel;
|
||||||
|
messages?: MessageTemplate[];
|
||||||
|
formatInstructions?: string;
|
||||||
|
query?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parameters for chain execution
|
||||||
|
*/
|
||||||
|
export interface ChainExecutionParams {
|
||||||
|
context: IExecuteFunctions;
|
||||||
|
itemIndex: number;
|
||||||
|
query: string;
|
||||||
|
llm: BaseLanguageModel;
|
||||||
|
outputParser?: N8nOutputParser;
|
||||||
|
messages?: MessageTemplate[];
|
||||||
|
}
|
||||||
@@ -0,0 +1,187 @@
|
|||||||
|
/* eslint-disable @typescript-eslint/no-unsafe-return */
|
||||||
|
/* eslint-disable @typescript-eslint/no-unsafe-assignment */
|
||||||
|
import { FakeChatModel } from '@langchain/core/utils/testing';
|
||||||
|
import { mock } from 'jest-mock-extended';
|
||||||
|
import type { IExecuteFunctions, INode } from 'n8n-workflow';
|
||||||
|
import { NodeConnectionType } from 'n8n-workflow';
|
||||||
|
|
||||||
|
import * as helperModule from '@utils/helpers';
|
||||||
|
import * as outputParserModule from '@utils/output_parsers/N8nOutputParser';
|
||||||
|
|
||||||
|
import { ChainLlm } from '../ChainLlm.node';
|
||||||
|
import * as executeChainModule from '../methods/chainExecutor';
|
||||||
|
|
||||||
|
jest.mock('@utils/helpers', () => ({
|
||||||
|
getPromptInputByType: jest.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
jest.mock('@utils/output_parsers/N8nOutputParser', () => ({
|
||||||
|
getOptionalOutputParser: jest.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
jest.mock('../methods/chainExecutor', () => ({
|
||||||
|
executeChain: jest.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
describe('ChainLlm Node', () => {
|
||||||
|
let node: ChainLlm;
|
||||||
|
let mockExecuteFunction: jest.Mocked<IExecuteFunctions>;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
node = new ChainLlm();
|
||||||
|
mockExecuteFunction = mock<IExecuteFunctions>();
|
||||||
|
|
||||||
|
mockExecuteFunction.logger = {
|
||||||
|
debug: jest.fn(),
|
||||||
|
info: jest.fn(),
|
||||||
|
warn: jest.fn(),
|
||||||
|
error: jest.fn(),
|
||||||
|
};
|
||||||
|
|
||||||
|
mockExecuteFunction.getInputData.mockReturnValue([{ json: {} }]);
|
||||||
|
mockExecuteFunction.getNode.mockReturnValue({
|
||||||
|
name: 'Chain LLM',
|
||||||
|
typeVersion: 1.5,
|
||||||
|
parameters: {},
|
||||||
|
} as INode);
|
||||||
|
|
||||||
|
mockExecuteFunction.getNodeParameter.mockImplementation((param, _itemIndex, defaultValue) => {
|
||||||
|
if (param === 'messages.messageValues') return [];
|
||||||
|
return defaultValue;
|
||||||
|
});
|
||||||
|
|
||||||
|
const fakeLLM = new FakeChatModel({});
|
||||||
|
mockExecuteFunction.getInputConnectionData.mockResolvedValue(fakeLLM);
|
||||||
|
|
||||||
|
jest.clearAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('description', () => {
|
||||||
|
it('should have the expected properties', () => {
|
||||||
|
expect(node.description).toBeDefined();
|
||||||
|
expect(node.description.name).toBe('chainLlm');
|
||||||
|
expect(node.description.displayName).toBe('Basic LLM Chain');
|
||||||
|
expect(node.description.version).toContain(1.5);
|
||||||
|
expect(node.description.properties).toBeDefined();
|
||||||
|
expect(node.description.inputs).toBeDefined();
|
||||||
|
expect(node.description.outputs).toEqual([NodeConnectionType.Main]);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('execute', () => {
|
||||||
|
it('should execute the chain with the correct parameters', async () => {
|
||||||
|
(helperModule.getPromptInputByType as jest.Mock).mockReturnValue('Test prompt');
|
||||||
|
|
||||||
|
(outputParserModule.getOptionalOutputParser as jest.Mock).mockResolvedValue(undefined);
|
||||||
|
|
||||||
|
(executeChainModule.executeChain as jest.Mock).mockResolvedValue(['Test response']);
|
||||||
|
|
||||||
|
const result = await node.execute.call(mockExecuteFunction);
|
||||||
|
|
||||||
|
expect(executeChainModule.executeChain).toHaveBeenCalledWith({
|
||||||
|
context: mockExecuteFunction,
|
||||||
|
itemIndex: 0,
|
||||||
|
query: 'Test prompt',
|
||||||
|
llm: expect.any(FakeChatModel),
|
||||||
|
outputParser: undefined,
|
||||||
|
messages: [],
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(mockExecuteFunction.logger.debug).toHaveBeenCalledWith('Executing Basic LLM Chain');
|
||||||
|
|
||||||
|
expect(result).toEqual([[{ json: expect.any(Object) }]]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle multiple input items', async () => {
|
||||||
|
// Set up multiple input items
|
||||||
|
mockExecuteFunction.getInputData.mockReturnValue([
|
||||||
|
{ json: { item: 1 } },
|
||||||
|
{ json: { item: 2 } },
|
||||||
|
]);
|
||||||
|
|
||||||
|
(helperModule.getPromptInputByType as jest.Mock)
|
||||||
|
.mockReturnValueOnce('Test prompt 1')
|
||||||
|
.mockReturnValueOnce('Test prompt 2');
|
||||||
|
|
||||||
|
(outputParserModule.getOptionalOutputParser as jest.Mock).mockResolvedValue(undefined);
|
||||||
|
|
||||||
|
(executeChainModule.executeChain as jest.Mock)
|
||||||
|
.mockResolvedValueOnce(['Response 1'])
|
||||||
|
.mockResolvedValueOnce(['Response 2']);
|
||||||
|
|
||||||
|
const result = await node.execute.call(mockExecuteFunction);
|
||||||
|
|
||||||
|
expect(executeChainModule.executeChain).toHaveBeenCalledTimes(2);
|
||||||
|
|
||||||
|
expect(result[0]).toHaveLength(2);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should use the prompt parameter directly for older versions', async () => {
|
||||||
|
// Set an older version
|
||||||
|
mockExecuteFunction.getNode.mockReturnValue({
|
||||||
|
name: 'Chain LLM',
|
||||||
|
typeVersion: 1.3,
|
||||||
|
parameters: {},
|
||||||
|
} as INode);
|
||||||
|
|
||||||
|
mockExecuteFunction.getNodeParameter.mockImplementation((param, _itemIndex, defaultValue) => {
|
||||||
|
if (param === 'prompt') return 'Old version prompt';
|
||||||
|
if (param === 'messages.messageValues') return [];
|
||||||
|
return defaultValue;
|
||||||
|
});
|
||||||
|
|
||||||
|
(executeChainModule.executeChain as jest.Mock).mockResolvedValue(['Test response']);
|
||||||
|
|
||||||
|
(outputParserModule.getOptionalOutputParser as jest.Mock).mockResolvedValue(undefined);
|
||||||
|
|
||||||
|
await node.execute.call(mockExecuteFunction);
|
||||||
|
|
||||||
|
expect(executeChainModule.executeChain).toHaveBeenCalledWith({
|
||||||
|
context: mockExecuteFunction,
|
||||||
|
itemIndex: 0,
|
||||||
|
query: 'Old version prompt',
|
||||||
|
llm: expect.any(Object),
|
||||||
|
outputParser: undefined,
|
||||||
|
messages: expect.any(Array),
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw an error if prompt is empty', async () => {
|
||||||
|
(helperModule.getPromptInputByType as jest.Mock).mockReturnValue(undefined);
|
||||||
|
|
||||||
|
(outputParserModule.getOptionalOutputParser as jest.Mock).mockResolvedValue(undefined);
|
||||||
|
|
||||||
|
mockExecuteFunction.getNode.mockReturnValue({ name: 'Test Node' } as INode);
|
||||||
|
|
||||||
|
await expect(node.execute.call(mockExecuteFunction)).rejects.toThrow(/prompt.*empty/);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should continue on failure when configured', async () => {
|
||||||
|
(helperModule.getPromptInputByType as jest.Mock).mockReturnValue('Test prompt');
|
||||||
|
|
||||||
|
const error = new Error('Test error');
|
||||||
|
(executeChainModule.executeChain as jest.Mock).mockRejectedValue(error);
|
||||||
|
|
||||||
|
mockExecuteFunction.continueOnFail.mockReturnValue(true);
|
||||||
|
|
||||||
|
const result = await node.execute.call(mockExecuteFunction);
|
||||||
|
|
||||||
|
expect(result).toEqual([[{ json: { error: 'Test error' }, pairedItem: { item: 0 } }]]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle multiple response items from executeChain', async () => {
|
||||||
|
(helperModule.getPromptInputByType as jest.Mock).mockReturnValue('Test prompt');
|
||||||
|
|
||||||
|
(outputParserModule.getOptionalOutputParser as jest.Mock).mockResolvedValue(undefined);
|
||||||
|
|
||||||
|
(executeChainModule.executeChain as jest.Mock).mockResolvedValue([
|
||||||
|
'Response 1',
|
||||||
|
'Response 2',
|
||||||
|
]);
|
||||||
|
|
||||||
|
const result = await node.execute.call(mockExecuteFunction);
|
||||||
|
|
||||||
|
expect(result[0]).toHaveLength(2);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -0,0 +1,223 @@
|
|||||||
|
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||||
|
import { ChatPromptTemplate, PromptTemplate } from '@langchain/core/prompts';
|
||||||
|
import { FakeLLM, FakeChatModel } from '@langchain/core/utils/testing';
|
||||||
|
import { mock } from 'jest-mock-extended';
|
||||||
|
import type { IExecuteFunctions } from 'n8n-workflow';
|
||||||
|
|
||||||
|
import type { N8nOutputParser } from '@utils/output_parsers/N8nOutputParser';
|
||||||
|
import * as tracing from '@utils/tracing';
|
||||||
|
|
||||||
|
import { executeChain } from '../methods/chainExecutor';
|
||||||
|
import * as promptUtils from '../methods/promptUtils';
|
||||||
|
|
||||||
|
jest.mock('@utils/tracing', () => ({
|
||||||
|
getTracingConfig: jest.fn(() => ({})),
|
||||||
|
}));
|
||||||
|
|
||||||
|
jest.mock('../methods/promptUtils', () => ({
|
||||||
|
createPromptTemplate: jest.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
describe('chainExecutor', () => {
|
||||||
|
let mockContext: jest.Mocked<IExecuteFunctions>;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
mockContext = mock<IExecuteFunctions>();
|
||||||
|
mockContext.getExecutionCancelSignal = jest.fn().mockReturnValue(undefined);
|
||||||
|
jest.clearAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('executeChain', () => {
|
||||||
|
it('should execute a simple chain without output parsers', async () => {
|
||||||
|
const fakeLLM = new FakeLLM({ response: 'Test response' });
|
||||||
|
const mockPromptTemplate = new PromptTemplate({
|
||||||
|
template: '{query}',
|
||||||
|
inputVariables: ['query'],
|
||||||
|
});
|
||||||
|
|
||||||
|
const mockChain = {
|
||||||
|
invoke: jest.fn().mockResolvedValue('Test response'),
|
||||||
|
};
|
||||||
|
const withConfigMock = jest.fn().mockReturnValue(mockChain);
|
||||||
|
const pipeStringOutputParserMock = jest.fn().mockReturnValue({
|
||||||
|
withConfig: withConfigMock,
|
||||||
|
});
|
||||||
|
const pipeMock = jest.fn().mockReturnValue({
|
||||||
|
pipe: pipeStringOutputParserMock,
|
||||||
|
});
|
||||||
|
|
||||||
|
mockPromptTemplate.pipe = pipeMock;
|
||||||
|
fakeLLM.pipe = jest.fn();
|
||||||
|
|
||||||
|
(promptUtils.createPromptTemplate as jest.Mock).mockResolvedValue(mockPromptTemplate);
|
||||||
|
|
||||||
|
const result = await executeChain({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
query: 'Hello',
|
||||||
|
llm: fakeLLM,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(promptUtils.createPromptTemplate).toHaveBeenCalledWith({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
llm: fakeLLM,
|
||||||
|
messages: undefined,
|
||||||
|
query: 'Hello',
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(pipeMock).toHaveBeenCalledWith(fakeLLM);
|
||||||
|
expect(pipeStringOutputParserMock).toHaveBeenCalledWith(expect.any(StringOutputParser));
|
||||||
|
expect(withConfigMock).toHaveBeenCalledWith(expect.any(Object));
|
||||||
|
|
||||||
|
expect(result).toEqual(['Test response']);
|
||||||
|
|
||||||
|
expect(tracing.getTracingConfig).toHaveBeenCalledWith(mockContext);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should execute a chain with a single output parser', async () => {
|
||||||
|
const fakeLLM = new FakeLLM({ response: 'Test response' });
|
||||||
|
const mockPromptTemplate = new PromptTemplate({
|
||||||
|
template: '{query}\n{formatInstructions}',
|
||||||
|
inputVariables: ['query'],
|
||||||
|
partialVariables: { formatInstructions: 'Format as JSON' },
|
||||||
|
});
|
||||||
|
|
||||||
|
const mockChain = {
|
||||||
|
invoke: jest.fn().mockResolvedValue({ result: 'Test response' }),
|
||||||
|
};
|
||||||
|
const withConfigMock = jest.fn().mockReturnValue(mockChain);
|
||||||
|
const pipeOutputParserMock = jest.fn().mockReturnValue({
|
||||||
|
withConfig: withConfigMock,
|
||||||
|
});
|
||||||
|
const pipeMock = jest.fn().mockReturnValue({
|
||||||
|
pipe: pipeOutputParserMock,
|
||||||
|
});
|
||||||
|
|
||||||
|
mockPromptTemplate.pipe = pipeMock;
|
||||||
|
fakeLLM.pipe = jest.fn();
|
||||||
|
|
||||||
|
(promptUtils.createPromptTemplate as jest.Mock).mockResolvedValue(mockPromptTemplate);
|
||||||
|
|
||||||
|
const result = await executeChain({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
query: 'Hello',
|
||||||
|
llm: fakeLLM,
|
||||||
|
outputParser: mock<N8nOutputParser>(),
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(promptUtils.createPromptTemplate).toHaveBeenCalledWith({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
llm: fakeLLM,
|
||||||
|
messages: undefined,
|
||||||
|
query: 'Hello',
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result).toEqual([{ result: 'Test response' }]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should wrap non-array responses in an array', async () => {
|
||||||
|
const fakeLLM = new FakeLLM({ response: 'Test response' });
|
||||||
|
const mockPromptTemplate = new PromptTemplate({
|
||||||
|
template: '{query}',
|
||||||
|
inputVariables: ['query'],
|
||||||
|
});
|
||||||
|
|
||||||
|
const mockOutputParser = mock<N8nOutputParser>();
|
||||||
|
|
||||||
|
const mockChain = {
|
||||||
|
invoke: jest.fn().mockResolvedValue({ result: 'Test response' }),
|
||||||
|
};
|
||||||
|
const withConfigMock = jest.fn().mockReturnValue(mockChain);
|
||||||
|
const pipeOutputParserMock = jest.fn().mockReturnValue({
|
||||||
|
withConfig: withConfigMock,
|
||||||
|
});
|
||||||
|
const pipeMock = jest.fn().mockReturnValue({
|
||||||
|
pipe: pipeOutputParserMock,
|
||||||
|
});
|
||||||
|
|
||||||
|
mockPromptTemplate.pipe = pipeMock;
|
||||||
|
fakeLLM.pipe = jest.fn();
|
||||||
|
|
||||||
|
(promptUtils.createPromptTemplate as jest.Mock).mockResolvedValue(mockPromptTemplate);
|
||||||
|
|
||||||
|
const result = await executeChain({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
query: 'Hello',
|
||||||
|
llm: fakeLLM,
|
||||||
|
outputParser: mockOutputParser,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(Array.isArray(result)).toBe(true);
|
||||||
|
expect(result).toEqual([{ result: 'Test response' }]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should pass the execution cancel signal to the chain', async () => {
|
||||||
|
// For this test, we'll just verify that getExecutionCancelSignal is called
|
||||||
|
const fakeLLM = new FakeLLM({ response: 'Test response' });
|
||||||
|
const mockPromptTemplate = new PromptTemplate({
|
||||||
|
template: '{query}',
|
||||||
|
inputVariables: ['query'],
|
||||||
|
});
|
||||||
|
|
||||||
|
const mockChain = {
|
||||||
|
invoke: jest.fn().mockResolvedValue('Test response'),
|
||||||
|
};
|
||||||
|
const withConfigMock = jest.fn().mockReturnValue(mockChain);
|
||||||
|
const pipeStringOutputParserMock = jest.fn().mockReturnValue({
|
||||||
|
withConfig: withConfigMock,
|
||||||
|
});
|
||||||
|
const pipeMock = jest.fn().mockReturnValue({
|
||||||
|
pipe: pipeStringOutputParserMock,
|
||||||
|
});
|
||||||
|
|
||||||
|
mockPromptTemplate.pipe = pipeMock;
|
||||||
|
fakeLLM.pipe = jest.fn();
|
||||||
|
|
||||||
|
(promptUtils.createPromptTemplate as jest.Mock).mockResolvedValue(mockPromptTemplate);
|
||||||
|
|
||||||
|
await executeChain({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
query: 'Hello',
|
||||||
|
llm: fakeLLM,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(mockContext.getExecutionCancelSignal).toHaveBeenCalled();
|
||||||
|
expect(mockChain.invoke).toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should support chat models', async () => {
|
||||||
|
const fakeChatModel = new FakeChatModel({});
|
||||||
|
const mockChatPromptTemplate = ChatPromptTemplate.fromMessages([]);
|
||||||
|
|
||||||
|
const mockChain = {
|
||||||
|
invoke: jest.fn().mockResolvedValue('Test chat response'),
|
||||||
|
};
|
||||||
|
const withConfigMock = jest.fn().mockReturnValue(mockChain);
|
||||||
|
const pipeStringOutputParserMock = jest.fn().mockReturnValue({
|
||||||
|
withConfig: withConfigMock,
|
||||||
|
});
|
||||||
|
const pipeMock = jest.fn().mockReturnValue({
|
||||||
|
pipe: pipeStringOutputParserMock,
|
||||||
|
});
|
||||||
|
|
||||||
|
mockChatPromptTemplate.pipe = pipeMock;
|
||||||
|
fakeChatModel.pipe = jest.fn();
|
||||||
|
|
||||||
|
(promptUtils.createPromptTemplate as jest.Mock).mockResolvedValue(mockChatPromptTemplate);
|
||||||
|
|
||||||
|
const result = await executeChain({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
query: 'Hello',
|
||||||
|
llm: fakeChatModel,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result).toEqual(['Test chat response']);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -0,0 +1,49 @@
|
|||||||
|
import { NodeConnectionType } from 'n8n-workflow';
|
||||||
|
|
||||||
|
import { getInputs, nodeProperties } from '../methods/config';
|
||||||
|
|
||||||
|
describe('config', () => {
|
||||||
|
describe('getInputs', () => {
|
||||||
|
it('should return basic inputs for all parameters', () => {
|
||||||
|
const inputs = getInputs({});
|
||||||
|
|
||||||
|
expect(inputs).toHaveLength(3);
|
||||||
|
expect(inputs[0].type).toBe(NodeConnectionType.Main);
|
||||||
|
expect(inputs[1].type).toBe(NodeConnectionType.AiLanguageModel);
|
||||||
|
expect(inputs[2].type).toBe(NodeConnectionType.AiOutputParser);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should exclude the OutputParser when hasOutputParser is false', () => {
|
||||||
|
const inputs = getInputs({ hasOutputParser: false });
|
||||||
|
|
||||||
|
expect(inputs).toHaveLength(2);
|
||||||
|
expect(inputs[0].type).toBe(NodeConnectionType.Main);
|
||||||
|
expect(inputs[1].type).toBe(NodeConnectionType.AiLanguageModel);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should include the OutputParser when hasOutputParser is true', () => {
|
||||||
|
const inputs = getInputs({ hasOutputParser: true });
|
||||||
|
|
||||||
|
expect(inputs).toHaveLength(3);
|
||||||
|
expect(inputs[2].type).toBe(NodeConnectionType.AiOutputParser);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('nodeProperties', () => {
|
||||||
|
it('should have the expected properties', () => {
|
||||||
|
expect(Array.isArray(nodeProperties)).toBe(true);
|
||||||
|
expect(nodeProperties.length).toBeGreaterThan(0);
|
||||||
|
|
||||||
|
const promptParams = nodeProperties.filter((prop) => prop.name === 'prompt');
|
||||||
|
expect(promptParams.length).toBeGreaterThan(0);
|
||||||
|
|
||||||
|
const messagesParam = nodeProperties.find((prop) => prop.name === 'messages');
|
||||||
|
expect(messagesParam).toBeDefined();
|
||||||
|
expect(messagesParam?.type).toBe('fixedCollection');
|
||||||
|
|
||||||
|
const hasOutputParserParam = nodeProperties.find((prop) => prop.name === 'hasOutputParser');
|
||||||
|
expect(hasOutputParserParam).toBeDefined();
|
||||||
|
expect(hasOutputParserParam?.type).toBe('boolean');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -0,0 +1,262 @@
|
|||||||
|
/* eslint-disable @typescript-eslint/no-unsafe-call */
|
||||||
|
import { HumanMessage } from '@langchain/core/messages';
|
||||||
|
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
|
||||||
|
import { ChatOllama } from '@langchain/ollama';
|
||||||
|
import { mock } from 'jest-mock-extended';
|
||||||
|
import type { IExecuteFunctions, IBinaryData, INode } from 'n8n-workflow';
|
||||||
|
import { NodeOperationError } from 'n8n-workflow';
|
||||||
|
|
||||||
|
import {
|
||||||
|
createImageMessage,
|
||||||
|
dataUriFromImageData,
|
||||||
|
UnsupportedMimeTypeError,
|
||||||
|
} from '../methods/imageUtils';
|
||||||
|
import type { MessageTemplate } from '../methods/types';
|
||||||
|
|
||||||
|
// Mock ChatGoogleGenerativeAI and ChatOllama
|
||||||
|
jest.mock('@langchain/google-genai', () => ({
|
||||||
|
ChatGoogleGenerativeAI: class MockChatGoogleGenerativeAI {},
|
||||||
|
}));
|
||||||
|
|
||||||
|
jest.mock('@langchain/ollama', () => ({
|
||||||
|
ChatOllama: class MockChatOllama {},
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Create a better mock for IExecuteFunctions that includes helpers
|
||||||
|
const createMockExecuteFunctions = () => {
|
||||||
|
const mockExec = mock<IExecuteFunctions>();
|
||||||
|
// Add missing helpers property with mocked getBinaryDataBuffer
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
|
||||||
|
mockExec.helpers = {
|
||||||
|
getBinaryDataBuffer: jest.fn().mockResolvedValue(Buffer.from('Test image data')),
|
||||||
|
} as any;
|
||||||
|
return mockExec;
|
||||||
|
};
|
||||||
|
|
||||||
|
describe('imageUtils', () => {
|
||||||
|
describe('dataUriFromImageData', () => {
|
||||||
|
it('should convert image data to data URI', () => {
|
||||||
|
const mockBuffer = Buffer.from('Test data');
|
||||||
|
const mockBinaryData = mock<IBinaryData>({ mimeType: 'image/jpeg' });
|
||||||
|
|
||||||
|
const dataUri = dataUriFromImageData(mockBinaryData, mockBuffer);
|
||||||
|
expect(dataUri).toBe('data:image/jpeg;base64,VGVzdCBkYXRh');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw UnsupportedMimeTypeError for non-images', () => {
|
||||||
|
const mockBuffer = Buffer.from('Test data');
|
||||||
|
const mockBinaryData = mock<IBinaryData>({ mimeType: 'text/plain' });
|
||||||
|
|
||||||
|
expect(() => {
|
||||||
|
dataUriFromImageData(mockBinaryData, mockBuffer);
|
||||||
|
}).toThrow(UnsupportedMimeTypeError);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('createImageMessage', () => {
|
||||||
|
let mockContext: jest.Mocked<IExecuteFunctions>;
|
||||||
|
let mockBuffer: Buffer;
|
||||||
|
let mockBinaryData: jest.Mocked<IBinaryData>;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
mockContext = createMockExecuteFunctions();
|
||||||
|
mockBuffer = Buffer.from('Test image data');
|
||||||
|
mockBinaryData = mock<IBinaryData>({ mimeType: 'image/png' });
|
||||||
|
|
||||||
|
// Mock required methods
|
||||||
|
mockContext.getInputData.mockReturnValue([{ binary: { data: mockBinaryData }, json: {} }]);
|
||||||
|
(mockContext.helpers.getBinaryDataBuffer as jest.Mock).mockResolvedValue(mockBuffer);
|
||||||
|
mockContext.getInputConnectionData.mockResolvedValue({});
|
||||||
|
mockContext.getNode.mockReturnValue({ name: 'TestNode' } as INode);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw an error for invalid message type', async () => {
|
||||||
|
const message: MessageTemplate = {
|
||||||
|
type: 'HumanMessagePromptTemplate',
|
||||||
|
message: '',
|
||||||
|
messageType: 'text', // Invalid for this test case
|
||||||
|
};
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
createImageMessage({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
message,
|
||||||
|
}),
|
||||||
|
).rejects.toThrow(NodeOperationError);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle image URL messages', async () => {
|
||||||
|
const message: MessageTemplate = {
|
||||||
|
type: 'HumanMessagePromptTemplate',
|
||||||
|
message: '',
|
||||||
|
messageType: 'imageUrl',
|
||||||
|
imageUrl: 'https://example.com/image.jpg',
|
||||||
|
imageDetail: 'high',
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = await createImageMessage({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
message,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result).toBeInstanceOf(HumanMessage);
|
||||||
|
expect(result.content).toEqual([
|
||||||
|
{
|
||||||
|
type: 'image_url',
|
||||||
|
image_url: {
|
||||||
|
url: 'https://example.com/image.jpg',
|
||||||
|
detail: 'high',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle image URL messages with auto detail', async () => {
|
||||||
|
const message: MessageTemplate = {
|
||||||
|
type: 'HumanMessagePromptTemplate',
|
||||||
|
message: '',
|
||||||
|
messageType: 'imageUrl',
|
||||||
|
imageUrl: 'https://example.com/image.jpg',
|
||||||
|
imageDetail: 'auto',
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = await createImageMessage({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
message,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result).toBeInstanceOf(HumanMessage);
|
||||||
|
expect(result.content).toEqual([
|
||||||
|
{
|
||||||
|
type: 'image_url',
|
||||||
|
image_url: {
|
||||||
|
url: 'https://example.com/image.jpg',
|
||||||
|
detail: undefined, // Auto becomes undefined
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw an error when binary data is missing', async () => {
|
||||||
|
// Set up missing binary data
|
||||||
|
mockContext.getInputData.mockReturnValue([{ json: {} }]); // No binary data
|
||||||
|
|
||||||
|
const message: MessageTemplate = {
|
||||||
|
type: 'HumanMessagePromptTemplate',
|
||||||
|
message: '',
|
||||||
|
messageType: 'imageBinary',
|
||||||
|
binaryImageDataKey: 'data',
|
||||||
|
};
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
createImageMessage({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
message,
|
||||||
|
}),
|
||||||
|
).rejects.toThrow('No binary data set.');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle binary image data for regular models', async () => {
|
||||||
|
const message: MessageTemplate = {
|
||||||
|
type: 'HumanMessagePromptTemplate',
|
||||||
|
message: '',
|
||||||
|
messageType: 'imageBinary',
|
||||||
|
binaryImageDataKey: 'data',
|
||||||
|
imageDetail: 'low',
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = await createImageMessage({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
message,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result).toBeInstanceOf(HumanMessage);
|
||||||
|
expect(result.content).toEqual([
|
||||||
|
{
|
||||||
|
type: 'image_url',
|
||||||
|
image_url: {
|
||||||
|
url: 'data:image/png;base64,VGVzdCBpbWFnZSBkYXRh',
|
||||||
|
detail: 'low',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle image data differently for GoogleGenerativeAI models', async () => {
|
||||||
|
// Mock a Google model - using our mocked class
|
||||||
|
mockContext.getInputConnectionData.mockResolvedValue(new ChatGoogleGenerativeAI());
|
||||||
|
|
||||||
|
const message: MessageTemplate = {
|
||||||
|
type: 'HumanMessagePromptTemplate',
|
||||||
|
message: '',
|
||||||
|
messageType: 'imageBinary',
|
||||||
|
binaryImageDataKey: 'data',
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = await createImageMessage({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
message,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result).toBeInstanceOf(HumanMessage);
|
||||||
|
expect(result.content).toEqual([
|
||||||
|
{
|
||||||
|
type: 'image_url',
|
||||||
|
image_url: 'data:image/png;base64,VGVzdCBpbWFnZSBkYXRh',
|
||||||
|
},
|
||||||
|
]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle image data differently for Ollama models', async () => {
|
||||||
|
// Mock an Ollama model - using our mocked class
|
||||||
|
mockContext.getInputConnectionData.mockResolvedValue(new ChatOllama());
|
||||||
|
|
||||||
|
const message: MessageTemplate = {
|
||||||
|
type: 'HumanMessagePromptTemplate',
|
||||||
|
message: '',
|
||||||
|
messageType: 'imageBinary',
|
||||||
|
binaryImageDataKey: 'data',
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = await createImageMessage({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
message,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result).toBeInstanceOf(HumanMessage);
|
||||||
|
expect(result.content).toEqual([
|
||||||
|
{
|
||||||
|
type: 'image_url',
|
||||||
|
image_url: 'data:image/png;base64,VGVzdCBpbWFnZSBkYXRh',
|
||||||
|
},
|
||||||
|
]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should pass through UnsupportedMimeTypeError', async () => {
|
||||||
|
// Mock a non-image mime type
|
||||||
|
mockBinaryData.mimeType = 'application/pdf';
|
||||||
|
|
||||||
|
const message: MessageTemplate = {
|
||||||
|
type: 'HumanMessagePromptTemplate',
|
||||||
|
message: '',
|
||||||
|
messageType: 'imageBinary',
|
||||||
|
binaryImageDataKey: 'data',
|
||||||
|
};
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
createImageMessage({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
message,
|
||||||
|
}),
|
||||||
|
).rejects.toThrow(NodeOperationError);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -0,0 +1,218 @@
|
|||||||
|
import { HumanMessage } from '@langchain/core/messages';
|
||||||
|
import { ChatPromptTemplate, PromptTemplate } from '@langchain/core/prompts';
|
||||||
|
import { FakeLLM, FakeChatModel } from '@langchain/core/utils/testing';
|
||||||
|
import { mock } from 'jest-mock-extended';
|
||||||
|
import type { IExecuteFunctions } from 'n8n-workflow';
|
||||||
|
import { OperationalError } from 'n8n-workflow';
|
||||||
|
|
||||||
|
import * as imageUtils from '../methods/imageUtils';
|
||||||
|
import { createPromptTemplate } from '../methods/promptUtils';
|
||||||
|
import type { MessageTemplate } from '../methods/types';
|
||||||
|
|
||||||
|
jest.mock('../methods/imageUtils', () => ({
|
||||||
|
createImageMessage: jest.fn(),
|
||||||
|
}));
|
||||||
|
|
||||||
|
describe('promptUtils', () => {
|
||||||
|
describe('createPromptTemplate', () => {
|
||||||
|
let mockContext: jest.Mocked<IExecuteFunctions>;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
mockContext = mock<IExecuteFunctions>();
|
||||||
|
jest.clearAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create a simple prompt template for non-chat models', async () => {
|
||||||
|
const fakeLLM = new FakeLLM({});
|
||||||
|
const result = await createPromptTemplate({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
llm: fakeLLM,
|
||||||
|
query: 'Test query',
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result).toBeInstanceOf(PromptTemplate);
|
||||||
|
expect(result.inputVariables).toContain('query');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create a prompt template with format instructions', async () => {
|
||||||
|
const fakeLLM = new FakeLLM({});
|
||||||
|
const formatInstructions = 'Format your response as JSON';
|
||||||
|
|
||||||
|
const result = await createPromptTemplate({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
llm: fakeLLM,
|
||||||
|
formatInstructions,
|
||||||
|
query: 'Test query',
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result).toBeInstanceOf(PromptTemplate);
|
||||||
|
expect(result.inputVariables).toContain('query');
|
||||||
|
|
||||||
|
// Check that format instructions are included in the template
|
||||||
|
const formattedResult = await result.format({ query: 'Test' });
|
||||||
|
expect(formattedResult).toContain(formatInstructions);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should create a chat prompt template for chat models', async () => {
|
||||||
|
const fakeChatModel = new FakeChatModel({});
|
||||||
|
|
||||||
|
const result = await createPromptTemplate({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
llm: fakeChatModel,
|
||||||
|
query: 'Test query',
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result).toBeInstanceOf(ChatPromptTemplate);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should process text messages correctly', async () => {
|
||||||
|
const fakeChatModel = new FakeChatModel({});
|
||||||
|
const messages: MessageTemplate[] = [
|
||||||
|
{
|
||||||
|
type: 'SystemMessagePromptTemplate',
|
||||||
|
message: 'You are a helpful assistant',
|
||||||
|
messageType: 'text',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
type: 'AIMessagePromptTemplate',
|
||||||
|
message: 'How can I help you?',
|
||||||
|
messageType: 'text',
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const result = await createPromptTemplate({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
llm: fakeChatModel,
|
||||||
|
messages,
|
||||||
|
query: 'Tell me a joke',
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result).toBeInstanceOf(ChatPromptTemplate);
|
||||||
|
|
||||||
|
const formattedMessages = await (result as ChatPromptTemplate).formatMessages({
|
||||||
|
query: 'Tell me a joke',
|
||||||
|
});
|
||||||
|
expect(formattedMessages).toHaveLength(3); // 2 messages + 1 query
|
||||||
|
expect(formattedMessages[0].content).toBe('You are a helpful assistant');
|
||||||
|
expect(formattedMessages[1].content).toBe('How can I help you?');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should escape curly braces in messages', async () => {
|
||||||
|
const fakeChatModel = new FakeChatModel({});
|
||||||
|
const messages: MessageTemplate[] = [
|
||||||
|
{
|
||||||
|
type: 'SystemMessagePromptTemplate',
|
||||||
|
message: 'You are a {helpful} assistant',
|
||||||
|
messageType: 'text',
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const result = await createPromptTemplate({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
llm: fakeChatModel,
|
||||||
|
messages,
|
||||||
|
query: 'Tell me a joke',
|
||||||
|
});
|
||||||
|
|
||||||
|
// Validate the messages have escaped curly braces
|
||||||
|
const formattedMessages = await (result as ChatPromptTemplate).formatMessages({
|
||||||
|
query: 'Tell me a joke',
|
||||||
|
});
|
||||||
|
expect(formattedMessages[0].content).toBe('You are a {helpful} assistant');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle image messages by calling createImageMessage', async () => {
|
||||||
|
const fakeChatModel = new FakeChatModel({});
|
||||||
|
const imageMessage: MessageTemplate = {
|
||||||
|
type: 'HumanMessagePromptTemplate',
|
||||||
|
message: '',
|
||||||
|
messageType: 'imageUrl',
|
||||||
|
imageUrl: 'https://example.com/image.jpg',
|
||||||
|
};
|
||||||
|
|
||||||
|
// Mock the image message creation
|
||||||
|
const mockHumanMessage = new HumanMessage({
|
||||||
|
content: [{ type: 'image_url', image_url: { url: 'https://example.com/image.jpg' } }],
|
||||||
|
});
|
||||||
|
(imageUtils.createImageMessage as jest.Mock).mockResolvedValue(mockHumanMessage);
|
||||||
|
|
||||||
|
await createPromptTemplate({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
llm: fakeChatModel,
|
||||||
|
messages: [imageMessage],
|
||||||
|
query: 'Describe this image',
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(imageUtils.createImageMessage).toHaveBeenCalledWith({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
message: imageMessage,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should throw an error for invalid message types', async () => {
|
||||||
|
const fakeChatModel = new FakeChatModel({});
|
||||||
|
const messages: MessageTemplate[] = [
|
||||||
|
{
|
||||||
|
type: 'InvalidMessageType',
|
||||||
|
message: 'This is an invalid message',
|
||||||
|
messageType: 'text',
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
await expect(
|
||||||
|
createPromptTemplate({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
llm: fakeChatModel,
|
||||||
|
messages,
|
||||||
|
query: 'Test query',
|
||||||
|
}),
|
||||||
|
).rejects.toThrow(OperationalError);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should add the query to an existing human message with content if it exists', async () => {
|
||||||
|
const fakeChatModel = new FakeChatModel({});
|
||||||
|
|
||||||
|
// Create a mock image message with content array
|
||||||
|
const mockHumanMessage = new HumanMessage({
|
||||||
|
content: [{ type: 'image_url', image_url: { url: 'https://example.com/image.jpg' } }],
|
||||||
|
});
|
||||||
|
(imageUtils.createImageMessage as jest.Mock).mockResolvedValue(mockHumanMessage);
|
||||||
|
|
||||||
|
const imageMessage: MessageTemplate = {
|
||||||
|
type: 'HumanMessagePromptTemplate',
|
||||||
|
message: '',
|
||||||
|
messageType: 'imageUrl',
|
||||||
|
imageUrl: 'https://example.com/image.jpg',
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = await createPromptTemplate({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
llm: fakeChatModel,
|
||||||
|
messages: [imageMessage],
|
||||||
|
query: 'Describe this image',
|
||||||
|
});
|
||||||
|
|
||||||
|
// Format the message and check that the query was added to the existing content
|
||||||
|
const formattedMessages = await (result as ChatPromptTemplate).formatMessages({
|
||||||
|
query: 'Describe this image',
|
||||||
|
});
|
||||||
|
expect(formattedMessages).toHaveLength(1);
|
||||||
|
|
||||||
|
// The content should now have the original image and the text query
|
||||||
|
const content = formattedMessages[0].content as any[];
|
||||||
|
expect(content).toHaveLength(2);
|
||||||
|
expect(content[0].type).toBe('image_url');
|
||||||
|
expect(content[1].type).toBe('text');
|
||||||
|
expect(content[1].text).toContain('Describe this image');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -0,0 +1,45 @@
|
|||||||
|
import { formatResponse } from '../methods/responseFormatter';
|
||||||
|
|
||||||
|
describe('responseFormatter', () => {
|
||||||
|
describe('formatResponse', () => {
|
||||||
|
it('should format string responses', () => {
|
||||||
|
const result = formatResponse('Test response');
|
||||||
|
expect(result).toEqual({
|
||||||
|
response: {
|
||||||
|
text: 'Test response',
|
||||||
|
},
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should trim string responses', () => {
|
||||||
|
const result = formatResponse(' Test response with whitespace ');
|
||||||
|
expect(result).toEqual({
|
||||||
|
response: {
|
||||||
|
text: 'Test response with whitespace',
|
||||||
|
},
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle array responses', () => {
|
||||||
|
const testArray = [{ item: 1 }, { item: 2 }];
|
||||||
|
const result = formatResponse(testArray);
|
||||||
|
expect(result).toEqual({ data: testArray });
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle object responses', () => {
|
||||||
|
const testObject = { key: 'value', nested: { key: 'value' } };
|
||||||
|
const result = formatResponse(testObject);
|
||||||
|
expect(result).toEqual(testObject);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should handle primitive non-string responses', () => {
|
||||||
|
const testNumber = 42;
|
||||||
|
const result = formatResponse(testNumber);
|
||||||
|
expect(result).toEqual({
|
||||||
|
response: {
|
||||||
|
text: 42,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
import { mock } from 'jest-mock-extended';
|
|
||||||
import type { IBinaryData } from 'n8n-workflow';
|
|
||||||
|
|
||||||
import { dataUriFromImageData, UnsupportedMimeTypeError } from '../utils';
|
|
||||||
|
|
||||||
describe('dataUriFromImageData', () => {
|
|
||||||
it('should not throw an error on images', async () => {
|
|
||||||
const mockBuffer = Buffer.from('Test data');
|
|
||||||
const mockBinaryData = mock<IBinaryData>({ mimeType: 'image/jpeg' });
|
|
||||||
|
|
||||||
const dataUri = dataUriFromImageData(mockBinaryData, mockBuffer);
|
|
||||||
expect(dataUri).toBe('data:image/jpeg;base64,VGVzdCBkYXRh');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should throw an UnsupportetMimeTypeError on non-images', async () => {
|
|
||||||
const mockBuffer = Buffer.from('Test data');
|
|
||||||
const mockBinaryData = mock<IBinaryData>({ mimeType: 'text/plain' });
|
|
||||||
|
|
||||||
expect(() => {
|
|
||||||
dataUriFromImageData(mockBinaryData, mockBuffer);
|
|
||||||
}).toThrow(UnsupportedMimeTypeError);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
import type { IBinaryData } from 'n8n-workflow';
|
|
||||||
import { ApplicationError } from 'n8n-workflow';
|
|
||||||
|
|
||||||
export class UnsupportedMimeTypeError extends ApplicationError {}
|
|
||||||
|
|
||||||
export function dataUriFromImageData(binaryData: IBinaryData, bufferData: Buffer) {
|
|
||||||
if (!binaryData.mimeType?.startsWith('image/'))
|
|
||||||
throw new UnsupportedMimeTypeError(
|
|
||||||
`${binaryData.mimeType} is not a supported type of binary data. Only images are supported.`,
|
|
||||||
);
|
|
||||||
return `data:${binaryData.mimeType};base64,${bufferData.toString('base64')}`;
|
|
||||||
}
|
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
/* eslint-disable n8n-nodes-base/node-dirname-against-convention */
|
/* eslint-disable n8n-nodes-base/node-dirname-against-convention */
|
||||||
import { ChatOpenAI } from '@langchain/openai';
|
import { AzureChatOpenAI } from '@langchain/openai';
|
||||||
import {
|
import {
|
||||||
NodeConnectionType,
|
NodeConnectionType,
|
||||||
type INodeType,
|
type INodeType,
|
||||||
@@ -184,7 +184,7 @@ export class LmChatAzureOpenAi implements INodeType {
|
|||||||
responseFormat?: 'text' | 'json_object';
|
responseFormat?: 'text' | 'json_object';
|
||||||
};
|
};
|
||||||
|
|
||||||
const model = new ChatOpenAI({
|
const model = new AzureChatOpenAI({
|
||||||
azureOpenAIApiDeploymentName: modelName,
|
azureOpenAIApiDeploymentName: modelName,
|
||||||
// instance name only needed to set base url
|
// instance name only needed to set base url
|
||||||
azureOpenAIApiInstanceName: !credentials.endpoint ? credentials.resourceName : undefined,
|
azureOpenAIApiInstanceName: !credentials.endpoint ? credentials.resourceName : undefined,
|
||||||
|
|||||||
@@ -12,15 +12,17 @@ export type N8nOutputParser =
|
|||||||
|
|
||||||
export { N8nOutputFixingParser, N8nItemListOutputParser, N8nStructuredOutputParser };
|
export { N8nOutputFixingParser, N8nItemListOutputParser, N8nStructuredOutputParser };
|
||||||
|
|
||||||
export async function getOptionalOutputParsers(ctx: IExecuteFunctions): Promise<N8nOutputParser[]> {
|
export async function getOptionalOutputParser(
|
||||||
let outputParsers: N8nOutputParser[] = [];
|
ctx: IExecuteFunctions,
|
||||||
|
): Promise<N8nOutputParser | undefined> {
|
||||||
|
let outputParser: N8nOutputParser | undefined;
|
||||||
|
|
||||||
if (ctx.getNodeParameter('hasOutputParser', 0, true) === true) {
|
if (ctx.getNodeParameter('hasOutputParser', 0, true) === true) {
|
||||||
outputParsers = (await ctx.getInputConnectionData(
|
outputParser = (await ctx.getInputConnectionData(
|
||||||
NodeConnectionType.AiOutputParser,
|
NodeConnectionType.AiOutputParser,
|
||||||
0,
|
0,
|
||||||
)) as N8nOutputParser[];
|
)) as N8nOutputParser;
|
||||||
}
|
}
|
||||||
|
|
||||||
return outputParsers;
|
return outputParser;
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user