mirror of
https://github.com/Abdulazizzn/n8n-enterprise-unlocked.git
synced 2025-12-20 19:32:15 +00:00
refactor(Basic LLM Chain Node): Refactor Basic LLM Chain & add tests (#13850)
This commit is contained in:
@@ -0,0 +1,90 @@
|
||||
import type { BaseLanguageModel } from '@langchain/core/language_models/base';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import type { ChatPromptTemplate, PromptTemplate } from '@langchain/core/prompts';
|
||||
import type { IExecuteFunctions } from 'n8n-workflow';
|
||||
|
||||
import { getTracingConfig } from '@utils/tracing';
|
||||
|
||||
import { createPromptTemplate } from './promptUtils';
|
||||
import type { ChainExecutionParams } from './types';
|
||||
|
||||
/**
|
||||
* Creates a simple chain for LLMs without output parsers
|
||||
*/
|
||||
async function executeSimpleChain({
|
||||
context,
|
||||
llm,
|
||||
query,
|
||||
prompt,
|
||||
}: {
|
||||
context: IExecuteFunctions;
|
||||
llm: BaseLanguageModel;
|
||||
query: string;
|
||||
prompt: ChatPromptTemplate | PromptTemplate;
|
||||
}): Promise<string[]> {
|
||||
const chain = prompt
|
||||
.pipe(llm)
|
||||
.pipe(new StringOutputParser())
|
||||
.withConfig(getTracingConfig(context));
|
||||
|
||||
// Execute the chain
|
||||
const response = await chain.invoke({
|
||||
query,
|
||||
signal: context.getExecutionCancelSignal(),
|
||||
});
|
||||
|
||||
// Ensure response is always returned as an array
|
||||
return [response];
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates and executes an LLM chain with the given prompt and optional output parsers
|
||||
*/
|
||||
export async function executeChain({
|
||||
context,
|
||||
itemIndex,
|
||||
query,
|
||||
llm,
|
||||
outputParser,
|
||||
messages,
|
||||
}: ChainExecutionParams): Promise<unknown[]> {
|
||||
// If no output parsers provided, use a simple chain with basic prompt template
|
||||
if (!outputParser) {
|
||||
const promptTemplate = await createPromptTemplate({
|
||||
context,
|
||||
itemIndex,
|
||||
llm,
|
||||
messages,
|
||||
query,
|
||||
});
|
||||
|
||||
return await executeSimpleChain({
|
||||
context,
|
||||
llm,
|
||||
query,
|
||||
prompt: promptTemplate,
|
||||
});
|
||||
}
|
||||
|
||||
const formatInstructions = outputParser.getFormatInstructions();
|
||||
|
||||
// Create a prompt template with format instructions
|
||||
const promptWithInstructions = await createPromptTemplate({
|
||||
context,
|
||||
itemIndex,
|
||||
llm,
|
||||
messages,
|
||||
formatInstructions,
|
||||
query,
|
||||
});
|
||||
|
||||
const chain = promptWithInstructions
|
||||
.pipe(llm)
|
||||
.pipe(outputParser)
|
||||
.withConfig(getTracingConfig(context));
|
||||
const response = await chain.invoke({ query }, { signal: context.getExecutionCancelSignal() });
|
||||
|
||||
// Ensure response is always returned as an array
|
||||
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
|
||||
return Array.isArray(response) ? response : [response];
|
||||
}
|
||||
@@ -0,0 +1,273 @@
|
||||
import {
|
||||
AIMessagePromptTemplate,
|
||||
HumanMessagePromptTemplate,
|
||||
SystemMessagePromptTemplate,
|
||||
} from '@langchain/core/prompts';
|
||||
import type { IDataObject, INodeProperties } from 'n8n-workflow';
|
||||
import { NodeConnectionType } from 'n8n-workflow';
|
||||
|
||||
import { promptTypeOptions, textFromPreviousNode } from '@utils/descriptions';
|
||||
import { getTemplateNoticeField } from '@utils/sharedFields';
|
||||
|
||||
/**
|
||||
* Dynamic input configuration generation based on node parameters
|
||||
*/
|
||||
export function getInputs(parameters: IDataObject) {
|
||||
const inputs = [
|
||||
{ displayName: '', type: NodeConnectionType.Main },
|
||||
{
|
||||
displayName: 'Model',
|
||||
maxConnections: 1,
|
||||
type: NodeConnectionType.AiLanguageModel,
|
||||
required: true,
|
||||
},
|
||||
];
|
||||
|
||||
// If `hasOutputParser` is undefined it must be version 1.3 or earlier so we
|
||||
// always add the output parser input
|
||||
const hasOutputParser = parameters?.hasOutputParser;
|
||||
if (hasOutputParser === undefined || hasOutputParser === true) {
|
||||
inputs.push({
|
||||
displayName: 'Output Parser',
|
||||
type: NodeConnectionType.AiOutputParser,
|
||||
maxConnections: 1,
|
||||
required: false,
|
||||
});
|
||||
}
|
||||
|
||||
return inputs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Node properties configuration
|
||||
*/
|
||||
export const nodeProperties: INodeProperties[] = [
|
||||
getTemplateNoticeField(1978),
|
||||
{
|
||||
displayName: 'Prompt',
|
||||
name: 'prompt',
|
||||
type: 'string',
|
||||
required: true,
|
||||
default: '={{ $json.input }}',
|
||||
displayOptions: {
|
||||
show: {
|
||||
'@version': [1],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
displayName: 'Prompt',
|
||||
name: 'prompt',
|
||||
type: 'string',
|
||||
required: true,
|
||||
default: '={{ $json.chat_input }}',
|
||||
displayOptions: {
|
||||
show: {
|
||||
'@version': [1.1, 1.2],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
displayName: 'Prompt',
|
||||
name: 'prompt',
|
||||
type: 'string',
|
||||
required: true,
|
||||
default: '={{ $json.chatInput }}',
|
||||
displayOptions: {
|
||||
show: {
|
||||
'@version': [1.3],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
...promptTypeOptions,
|
||||
displayOptions: {
|
||||
hide: {
|
||||
'@version': [1, 1.1, 1.2, 1.3],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
...textFromPreviousNode,
|
||||
displayOptions: { show: { promptType: ['auto'], '@version': [{ _cnd: { gte: 1.5 } }] } },
|
||||
},
|
||||
{
|
||||
displayName: 'Prompt (User Message)',
|
||||
name: 'text',
|
||||
type: 'string',
|
||||
required: true,
|
||||
default: '',
|
||||
placeholder: 'e.g. Hello, how can you help me?',
|
||||
typeOptions: {
|
||||
rows: 2,
|
||||
},
|
||||
displayOptions: {
|
||||
show: {
|
||||
promptType: ['define'],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
displayName: 'Require Specific Output Format',
|
||||
name: 'hasOutputParser',
|
||||
type: 'boolean',
|
||||
default: false,
|
||||
noDataExpression: true,
|
||||
displayOptions: {
|
||||
hide: {
|
||||
'@version': [1, 1.1, 1.3],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
displayName: 'Chat Messages (if Using a Chat Model)',
|
||||
name: 'messages',
|
||||
type: 'fixedCollection',
|
||||
typeOptions: {
|
||||
multipleValues: true,
|
||||
},
|
||||
default: {},
|
||||
placeholder: 'Add prompt',
|
||||
options: [
|
||||
{
|
||||
name: 'messageValues',
|
||||
displayName: 'Prompt',
|
||||
values: [
|
||||
{
|
||||
displayName: 'Type Name or ID',
|
||||
name: 'type',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
name: 'AI',
|
||||
value: AIMessagePromptTemplate.lc_name(),
|
||||
},
|
||||
{
|
||||
name: 'System',
|
||||
value: SystemMessagePromptTemplate.lc_name(),
|
||||
},
|
||||
{
|
||||
name: 'User',
|
||||
value: HumanMessagePromptTemplate.lc_name(),
|
||||
},
|
||||
],
|
||||
default: SystemMessagePromptTemplate.lc_name(),
|
||||
},
|
||||
{
|
||||
displayName: 'Message Type',
|
||||
name: 'messageType',
|
||||
type: 'options',
|
||||
displayOptions: {
|
||||
show: {
|
||||
type: [HumanMessagePromptTemplate.lc_name()],
|
||||
},
|
||||
},
|
||||
options: [
|
||||
{
|
||||
name: 'Text',
|
||||
value: 'text',
|
||||
description: 'Simple text message',
|
||||
},
|
||||
{
|
||||
name: 'Image (Binary)',
|
||||
value: 'imageBinary',
|
||||
description: 'Process the binary input from the previous node',
|
||||
},
|
||||
{
|
||||
name: 'Image (URL)',
|
||||
value: 'imageUrl',
|
||||
description: 'Process the image from the specified URL',
|
||||
},
|
||||
],
|
||||
default: 'text',
|
||||
},
|
||||
{
|
||||
displayName: 'Image Data Field Name',
|
||||
name: 'binaryImageDataKey',
|
||||
type: 'string',
|
||||
default: 'data',
|
||||
required: true,
|
||||
description:
|
||||
"The name of the field in the chain's input that contains the binary image file to be processed",
|
||||
displayOptions: {
|
||||
show: {
|
||||
messageType: ['imageBinary'],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
displayName: 'Image URL',
|
||||
name: 'imageUrl',
|
||||
type: 'string',
|
||||
default: '',
|
||||
required: true,
|
||||
description: 'URL to the image to be processed',
|
||||
displayOptions: {
|
||||
show: {
|
||||
messageType: ['imageUrl'],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
displayName: 'Image Details',
|
||||
description:
|
||||
'Control how the model processes the image and generates its textual understanding',
|
||||
name: 'imageDetail',
|
||||
type: 'options',
|
||||
displayOptions: {
|
||||
show: {
|
||||
type: [HumanMessagePromptTemplate.lc_name()],
|
||||
messageType: ['imageBinary', 'imageUrl'],
|
||||
},
|
||||
},
|
||||
options: [
|
||||
{
|
||||
name: 'Auto',
|
||||
value: 'auto',
|
||||
description:
|
||||
'Model will use the auto setting which will look at the image input size and decide if it should use the low or high setting',
|
||||
},
|
||||
{
|
||||
name: 'Low',
|
||||
value: 'low',
|
||||
description:
|
||||
'The model will receive a low-res 512px x 512px version of the image, and represent the image with a budget of 65 tokens. This allows the API to return faster responses and consume fewer input tokens for use cases that do not require high detail.',
|
||||
},
|
||||
{
|
||||
name: 'High',
|
||||
value: 'high',
|
||||
description:
|
||||
'Allows the model to see the low res image and then creates detailed crops of input images as 512px squares based on the input image size. Each of the detailed crops uses twice the token budget (65 tokens) for a total of 129 tokens.',
|
||||
},
|
||||
],
|
||||
default: 'auto',
|
||||
},
|
||||
|
||||
{
|
||||
displayName: 'Message',
|
||||
name: 'message',
|
||||
type: 'string',
|
||||
required: true,
|
||||
displayOptions: {
|
||||
hide: {
|
||||
messageType: ['imageBinary', 'imageUrl'],
|
||||
},
|
||||
},
|
||||
default: '',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
displayName: `Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='${NodeConnectionType.AiOutputParser}'>output parser</a> on the canvas to specify the output format you require`,
|
||||
name: 'notice',
|
||||
type: 'notice',
|
||||
default: '',
|
||||
displayOptions: {
|
||||
show: {
|
||||
hasOutputParser: [true],
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
@@ -0,0 +1,99 @@
|
||||
import type { BaseLanguageModel } from '@langchain/core/language_models/base';
|
||||
import { HumanMessage } from '@langchain/core/messages';
|
||||
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
|
||||
import { ChatOllama } from '@langchain/ollama';
|
||||
import type { IExecuteFunctions, IBinaryData } from 'n8n-workflow';
|
||||
import { NodeOperationError, NodeConnectionType, OperationalError } from 'n8n-workflow';
|
||||
|
||||
import type { MessageTemplate } from './types';
|
||||
|
||||
export class UnsupportedMimeTypeError extends OperationalError {}
|
||||
|
||||
/**
|
||||
* Converts binary image data to a data URI
|
||||
*/
|
||||
export function dataUriFromImageData(binaryData: IBinaryData, bufferData: Buffer): string {
|
||||
if (!binaryData.mimeType?.startsWith('image/')) {
|
||||
throw new UnsupportedMimeTypeError(
|
||||
`${binaryData.mimeType} is not a supported type of binary data. Only images are supported.`,
|
||||
);
|
||||
}
|
||||
return `data:${binaryData.mimeType};base64,${bufferData.toString('base64')}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a human message with image content from either binary data or URL
|
||||
*/
|
||||
export async function createImageMessage({
|
||||
context,
|
||||
itemIndex,
|
||||
message,
|
||||
}: {
|
||||
context: IExecuteFunctions;
|
||||
itemIndex: number;
|
||||
message: MessageTemplate;
|
||||
}): Promise<HumanMessage> {
|
||||
// Validate message type
|
||||
if (message.messageType !== 'imageBinary' && message.messageType !== 'imageUrl') {
|
||||
throw new NodeOperationError(
|
||||
context.getNode(),
|
||||
'Invalid message type. Only imageBinary and imageUrl are supported',
|
||||
);
|
||||
}
|
||||
|
||||
const detail = message.imageDetail === 'auto' ? undefined : message.imageDetail;
|
||||
|
||||
// Handle image URL case
|
||||
if (message.messageType === 'imageUrl' && message.imageUrl) {
|
||||
return new HumanMessage({
|
||||
content: [
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: message.imageUrl,
|
||||
detail,
|
||||
},
|
||||
},
|
||||
],
|
||||
});
|
||||
}
|
||||
|
||||
// Handle binary image case
|
||||
const binaryDataKey = message.binaryImageDataKey ?? 'data';
|
||||
const inputData = context.getInputData()[itemIndex];
|
||||
const binaryData = inputData.binary?.[binaryDataKey] as IBinaryData;
|
||||
|
||||
if (!binaryData) {
|
||||
throw new NodeOperationError(context.getNode(), 'No binary data set.');
|
||||
}
|
||||
|
||||
const bufferData = await context.helpers.getBinaryDataBuffer(itemIndex, binaryDataKey);
|
||||
const model = (await context.getInputConnectionData(
|
||||
NodeConnectionType.AiLanguageModel,
|
||||
0,
|
||||
)) as BaseLanguageModel;
|
||||
|
||||
try {
|
||||
// Create data URI from binary data
|
||||
const dataURI = dataUriFromImageData(binaryData, bufferData);
|
||||
|
||||
// Some models need different image URL formats
|
||||
const directUriModels = [ChatGoogleGenerativeAI, ChatOllama];
|
||||
const imageUrl = directUriModels.some((i) => model instanceof i)
|
||||
? dataURI
|
||||
: { url: dataURI, detail };
|
||||
|
||||
return new HumanMessage({
|
||||
content: [
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: imageUrl,
|
||||
},
|
||||
],
|
||||
});
|
||||
} catch (error) {
|
||||
if (error instanceof UnsupportedMimeTypeError)
|
||||
throw new NodeOperationError(context.getNode(), error.message);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
export { executeChain } from './chainExecutor';
|
||||
export { getInputs, nodeProperties } from './config';
|
||||
export { formatResponse } from './responseFormatter';
|
||||
export type { MessageTemplate } from './types';
|
||||
@@ -0,0 +1,145 @@
|
||||
import { HumanMessage } from '@langchain/core/messages';
|
||||
import type { BaseMessagePromptTemplateLike } from '@langchain/core/prompts';
|
||||
import {
|
||||
AIMessagePromptTemplate,
|
||||
PromptTemplate,
|
||||
SystemMessagePromptTemplate,
|
||||
HumanMessagePromptTemplate,
|
||||
ChatPromptTemplate,
|
||||
} from '@langchain/core/prompts';
|
||||
import type { IExecuteFunctions } from 'n8n-workflow';
|
||||
import { OperationalError } from 'n8n-workflow';
|
||||
|
||||
import { isChatInstance } from '@utils/helpers';
|
||||
|
||||
import { createImageMessage } from './imageUtils';
|
||||
import type { MessageTemplate, PromptParams } from './types';
|
||||
|
||||
/**
|
||||
* Creates a basic query template that may include format instructions
|
||||
*/
|
||||
function buildQueryTemplate(formatInstructions?: string): PromptTemplate {
|
||||
return new PromptTemplate({
|
||||
template: `{query}${formatInstructions ? '\n{formatInstructions}' : ''}`,
|
||||
inputVariables: ['query'],
|
||||
partialVariables: formatInstructions ? { formatInstructions } : undefined,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Process an array of message templates into LangChain message objects
|
||||
*/
|
||||
async function processMessageTemplates({
|
||||
context,
|
||||
itemIndex,
|
||||
messages,
|
||||
}: {
|
||||
context: IExecuteFunctions;
|
||||
itemIndex: number;
|
||||
messages: MessageTemplate[];
|
||||
}): Promise<BaseMessagePromptTemplateLike[]> {
|
||||
return await Promise.all(
|
||||
messages.map(async (message) => {
|
||||
// Find the appropriate message class based on type
|
||||
const messageClass = [
|
||||
SystemMessagePromptTemplate,
|
||||
AIMessagePromptTemplate,
|
||||
HumanMessagePromptTemplate,
|
||||
].find((m) => m.lc_name() === message.type);
|
||||
|
||||
if (!messageClass) {
|
||||
throw new OperationalError('Invalid message type', {
|
||||
extra: { messageType: message.type },
|
||||
});
|
||||
}
|
||||
|
||||
// Handle image messages specially for human messages
|
||||
if (messageClass === HumanMessagePromptTemplate && message.messageType !== 'text') {
|
||||
return await createImageMessage({ context, itemIndex, message });
|
||||
}
|
||||
|
||||
// Process text messages
|
||||
// Escape curly braces in the message to prevent LangChain from treating them as variables
|
||||
return messageClass.fromTemplate(
|
||||
(message.message || '').replace(/[{}]/g, (match) => match + match),
|
||||
);
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Finalizes the prompt template by adding or updating the query in the message chain
|
||||
*/
|
||||
async function finalizePromptTemplate({
|
||||
parsedMessages,
|
||||
queryTemplate,
|
||||
query,
|
||||
}: {
|
||||
parsedMessages: BaseMessagePromptTemplateLike[];
|
||||
queryTemplate: PromptTemplate;
|
||||
query?: string;
|
||||
}): Promise<ChatPromptTemplate> {
|
||||
// Check if the last message is a human message with multi-content array
|
||||
const lastMessage = parsedMessages[parsedMessages.length - 1];
|
||||
|
||||
if (lastMessage instanceof HumanMessage && Array.isArray(lastMessage.content)) {
|
||||
// Add the query to the existing human message content
|
||||
const humanMessage = new HumanMessagePromptTemplate(queryTemplate);
|
||||
|
||||
// Format the message with the query and add the content synchronously
|
||||
const formattedMessage = await humanMessage.format({ query });
|
||||
|
||||
// Create a new array with the existing content plus the new item
|
||||
if (Array.isArray(lastMessage.content)) {
|
||||
// Clone the current content array and add the new item
|
||||
const updatedContent = [
|
||||
...lastMessage.content,
|
||||
{
|
||||
text: formattedMessage.content.toString(),
|
||||
type: 'text',
|
||||
},
|
||||
];
|
||||
|
||||
// Replace the content with the updated array
|
||||
lastMessage.content = updatedContent;
|
||||
}
|
||||
} else {
|
||||
// Otherwise, add a new human message with the query
|
||||
parsedMessages.push(new HumanMessagePromptTemplate(queryTemplate));
|
||||
}
|
||||
|
||||
return ChatPromptTemplate.fromMessages(parsedMessages);
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds the appropriate prompt template based on model type (chat vs completion)
|
||||
* and provided messages
|
||||
*/
|
||||
export async function createPromptTemplate({
|
||||
context,
|
||||
itemIndex,
|
||||
llm,
|
||||
messages,
|
||||
formatInstructions,
|
||||
query,
|
||||
}: PromptParams) {
|
||||
// Create base query template
|
||||
const queryTemplate = buildQueryTemplate(formatInstructions);
|
||||
|
||||
// For non-chat models, just return the query template
|
||||
if (!isChatInstance(llm)) {
|
||||
return queryTemplate;
|
||||
}
|
||||
|
||||
// For chat models, process the messages if provided
|
||||
const parsedMessages = messages?.length
|
||||
? await processMessageTemplates({ context, itemIndex, messages })
|
||||
: [];
|
||||
|
||||
// Add or update the query in the message chain
|
||||
return await finalizePromptTemplate({
|
||||
parsedMessages,
|
||||
queryTemplate,
|
||||
query,
|
||||
});
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
import type { IDataObject } from 'n8n-workflow';
|
||||
|
||||
/**
|
||||
* Formats the response from the LLM chain into a consistent structure
|
||||
*/
|
||||
export function formatResponse(response: unknown): IDataObject {
|
||||
if (typeof response === 'string') {
|
||||
return {
|
||||
response: {
|
||||
text: response.trim(),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
if (Array.isArray(response)) {
|
||||
return {
|
||||
data: response,
|
||||
};
|
||||
}
|
||||
|
||||
if (response instanceof Object) {
|
||||
return response as IDataObject;
|
||||
}
|
||||
|
||||
return {
|
||||
response: {
|
||||
text: response,
|
||||
},
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
import type { BaseLanguageModel } from '@langchain/core/language_models/base';
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import type { IExecuteFunctions } from 'n8n-workflow';
|
||||
|
||||
import type { N8nOutputParser } from '@utils/output_parsers/N8nOutputParser';
|
||||
|
||||
/**
|
||||
* Interface for describing a message template in the UI
|
||||
*/
|
||||
export interface MessageTemplate {
|
||||
type: string;
|
||||
message: string;
|
||||
messageType: 'text' | 'imageBinary' | 'imageUrl';
|
||||
binaryImageDataKey?: string;
|
||||
imageUrl?: string;
|
||||
imageDetail?: 'auto' | 'low' | 'high';
|
||||
}
|
||||
|
||||
/**
|
||||
* Parameters for prompt creation
|
||||
*/
|
||||
export interface PromptParams {
|
||||
context: IExecuteFunctions;
|
||||
itemIndex: number;
|
||||
llm: BaseLanguageModel | BaseChatModel;
|
||||
messages?: MessageTemplate[];
|
||||
formatInstructions?: string;
|
||||
query?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parameters for chain execution
|
||||
*/
|
||||
export interface ChainExecutionParams {
|
||||
context: IExecuteFunctions;
|
||||
itemIndex: number;
|
||||
query: string;
|
||||
llm: BaseLanguageModel;
|
||||
outputParser?: N8nOutputParser;
|
||||
messages?: MessageTemplate[];
|
||||
}
|
||||
Reference in New Issue
Block a user