mirror of
https://github.com/Abdulazizzn/n8n-enterprise-unlocked.git
synced 2025-12-17 10:02:05 +00:00
fix(Basic LLM Chain Node): Prevent incorrect wrapping of output (#14183)
This commit is contained in:
@@ -34,7 +34,7 @@ export class ChainLlm implements INodeType {
|
|||||||
icon: 'fa:link',
|
icon: 'fa:link',
|
||||||
iconColor: 'black',
|
iconColor: 'black',
|
||||||
group: ['transform'],
|
group: ['transform'],
|
||||||
version: [1, 1.1, 1.2, 1.3, 1.4, 1.5],
|
version: [1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6],
|
||||||
description: 'A simple chain to prompt a large language model',
|
description: 'A simple chain to prompt a large language model',
|
||||||
defaults: {
|
defaults: {
|
||||||
name: 'Basic LLM Chain',
|
name: 'Basic LLM Chain',
|
||||||
@@ -119,7 +119,7 @@ export class ChainLlm implements INodeType {
|
|||||||
// Process each response and add to return data
|
// Process each response and add to return data
|
||||||
responses.forEach((response) => {
|
responses.forEach((response) => {
|
||||||
returnData.push({
|
returnData.push({
|
||||||
json: formatResponse(response),
|
json: formatResponse(response, this.getNode().typeVersion),
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
import type { BaseLanguageModel } from '@langchain/core/language_models/base';
|
import type { BaseLanguageModel } from '@langchain/core/language_models/base';
|
||||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
import type { BaseLLMOutputParser } from '@langchain/core/output_parsers';
|
||||||
|
import { JsonOutputParser, StringOutputParser } from '@langchain/core/output_parsers';
|
||||||
import type { ChatPromptTemplate, PromptTemplate } from '@langchain/core/prompts';
|
import type { ChatPromptTemplate, PromptTemplate } from '@langchain/core/prompts';
|
||||||
import type { IExecuteFunctions } from 'n8n-workflow';
|
import type { IExecuteFunctions } from 'n8n-workflow';
|
||||||
|
|
||||||
@@ -8,6 +9,46 @@ import { getTracingConfig } from '@utils/tracing';
|
|||||||
import { createPromptTemplate } from './promptUtils';
|
import { createPromptTemplate } from './promptUtils';
|
||||||
import type { ChainExecutionParams } from './types';
|
import type { ChainExecutionParams } from './types';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Type guard to check if the LLM has a modelKwargs property(OpenAI)
|
||||||
|
*/
|
||||||
|
export function isModelWithResponseFormat(
|
||||||
|
llm: BaseLanguageModel,
|
||||||
|
): llm is BaseLanguageModel & { modelKwargs: { response_format: { type: string } } } {
|
||||||
|
return (
|
||||||
|
'modelKwargs' in llm &&
|
||||||
|
!!llm.modelKwargs &&
|
||||||
|
typeof llm.modelKwargs === 'object' &&
|
||||||
|
'response_format' in llm.modelKwargs
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Type guard to check if the LLM has a format property(Ollama)
|
||||||
|
*/
|
||||||
|
export function isModelWithFormat(
|
||||||
|
llm: BaseLanguageModel,
|
||||||
|
): llm is BaseLanguageModel & { format: string } {
|
||||||
|
return 'format' in llm && typeof llm.format !== 'undefined';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Determines if an LLM is configured to output JSON and returns the appropriate output parser
|
||||||
|
*/
|
||||||
|
export function getOutputParserForLLM(
|
||||||
|
llm: BaseLanguageModel,
|
||||||
|
): BaseLLMOutputParser<string | Record<string, unknown>> {
|
||||||
|
if (isModelWithResponseFormat(llm) && llm.modelKwargs?.response_format?.type === 'json_object') {
|
||||||
|
return new JsonOutputParser();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isModelWithFormat(llm) && llm.format === 'json') {
|
||||||
|
return new JsonOutputParser();
|
||||||
|
}
|
||||||
|
|
||||||
|
return new StringOutputParser();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a simple chain for LLMs without output parsers
|
* Creates a simple chain for LLMs without output parsers
|
||||||
*/
|
*/
|
||||||
@@ -21,11 +62,10 @@ async function executeSimpleChain({
|
|||||||
llm: BaseLanguageModel;
|
llm: BaseLanguageModel;
|
||||||
query: string;
|
query: string;
|
||||||
prompt: ChatPromptTemplate | PromptTemplate;
|
prompt: ChatPromptTemplate | PromptTemplate;
|
||||||
}): Promise<string[]> {
|
}) {
|
||||||
const chain = prompt
|
const outputParser = getOutputParserForLLM(llm);
|
||||||
.pipe(llm)
|
|
||||||
.pipe(new StringOutputParser())
|
const chain = prompt.pipe(llm).pipe(outputParser).withConfig(getTracingConfig(context));
|
||||||
.withConfig(getTracingConfig(context));
|
|
||||||
|
|
||||||
// Execute the chain
|
// Execute the chain
|
||||||
const response = await chain.invoke({
|
const response = await chain.invoke({
|
||||||
|
|||||||
@@ -3,12 +3,10 @@ import type { IDataObject } from 'n8n-workflow';
|
|||||||
/**
|
/**
|
||||||
* Formats the response from the LLM chain into a consistent structure
|
* Formats the response from the LLM chain into a consistent structure
|
||||||
*/
|
*/
|
||||||
export function formatResponse(response: unknown): IDataObject {
|
export function formatResponse(response: unknown, version: number): IDataObject {
|
||||||
if (typeof response === 'string') {
|
if (typeof response === 'string') {
|
||||||
return {
|
return {
|
||||||
response: {
|
|
||||||
text: response.trim(),
|
text: response.trim(),
|
||||||
},
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -19,9 +17,15 @@ export function formatResponse(response: unknown): IDataObject {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (response instanceof Object) {
|
if (response instanceof Object) {
|
||||||
|
if (version >= 1.6) {
|
||||||
return response as IDataObject;
|
return response as IDataObject;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
text: JSON.stringify(response),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
response: {
|
response: {
|
||||||
text: response,
|
text: response,
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
|
import { JsonOutputParser, StringOutputParser } from '@langchain/core/output_parsers';
|
||||||
import { ChatPromptTemplate, PromptTemplate } from '@langchain/core/prompts';
|
import { ChatPromptTemplate, PromptTemplate } from '@langchain/core/prompts';
|
||||||
import { FakeLLM, FakeChatModel } from '@langchain/core/utils/testing';
|
import { FakeLLM, FakeChatModel } from '@langchain/core/utils/testing';
|
||||||
import { mock } from 'jest-mock-extended';
|
import { mock } from 'jest-mock-extended';
|
||||||
@@ -8,6 +9,7 @@ import type { N8nOutputParser } from '@utils/output_parsers/N8nOutputParser';
|
|||||||
import * as tracing from '@utils/tracing';
|
import * as tracing from '@utils/tracing';
|
||||||
|
|
||||||
import { executeChain } from '../methods/chainExecutor';
|
import { executeChain } from '../methods/chainExecutor';
|
||||||
|
import * as chainExecutor from '../methods/chainExecutor';
|
||||||
import * as promptUtils from '../methods/promptUtils';
|
import * as promptUtils from '../methods/promptUtils';
|
||||||
|
|
||||||
jest.mock('@utils/tracing', () => ({
|
jest.mock('@utils/tracing', () => ({
|
||||||
@@ -27,6 +29,41 @@ describe('chainExecutor', () => {
|
|||||||
jest.clearAllMocks();
|
jest.clearAllMocks();
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe('getOutputParserForLLM', () => {
|
||||||
|
it('should return JsonOutputParser for OpenAI-like models with json_object response format', () => {
|
||||||
|
const openAILikeModel = {
|
||||||
|
modelKwargs: {
|
||||||
|
response_format: {
|
||||||
|
type: 'json_object',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
const parser = chainExecutor.getOutputParserForLLM(
|
||||||
|
openAILikeModel as unknown as BaseChatModel,
|
||||||
|
);
|
||||||
|
expect(parser).toBeInstanceOf(JsonOutputParser);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return JsonOutputParser for Ollama models with json format', () => {
|
||||||
|
const ollamaLikeModel = {
|
||||||
|
format: 'json',
|
||||||
|
};
|
||||||
|
|
||||||
|
const parser = chainExecutor.getOutputParserForLLM(
|
||||||
|
ollamaLikeModel as unknown as BaseChatModel,
|
||||||
|
);
|
||||||
|
expect(parser).toBeInstanceOf(JsonOutputParser);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return StringOutputParser for models without JSON format settings', () => {
|
||||||
|
const regularModel = new FakeLLM({});
|
||||||
|
|
||||||
|
const parser = chainExecutor.getOutputParserForLLM(regularModel);
|
||||||
|
expect(parser).toBeInstanceOf(StringOutputParser);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
describe('executeChain', () => {
|
describe('executeChain', () => {
|
||||||
it('should execute a simple chain without output parsers', async () => {
|
it('should execute a simple chain without output parsers', async () => {
|
||||||
const fakeLLM = new FakeLLM({ response: 'Test response' });
|
const fakeLLM = new FakeLLM({ response: 'Test response' });
|
||||||
@@ -219,5 +256,77 @@ describe('chainExecutor', () => {
|
|||||||
|
|
||||||
expect(result).toEqual(['Test chat response']);
|
expect(result).toEqual(['Test chat response']);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('should use JsonOutputParser for OpenAI models with json_object response format', async () => {
|
||||||
|
const fakeOpenAIModel = new FakeChatModel({});
|
||||||
|
(
|
||||||
|
fakeOpenAIModel as unknown as { modelKwargs: { response_format: { type: string } } }
|
||||||
|
).modelKwargs = {
|
||||||
|
response_format: { type: 'json_object' },
|
||||||
|
};
|
||||||
|
|
||||||
|
const mockPromptTemplate = new PromptTemplate({
|
||||||
|
template: '{query}',
|
||||||
|
inputVariables: ['query'],
|
||||||
|
});
|
||||||
|
|
||||||
|
const mockChain = {
|
||||||
|
invoke: jest.fn().mockResolvedValue('{"result": "json data"}'),
|
||||||
|
};
|
||||||
|
|
||||||
|
const withConfigMock = jest.fn().mockReturnValue(mockChain);
|
||||||
|
const pipeOutputParserMock = jest.fn().mockReturnValue({
|
||||||
|
withConfig: withConfigMock,
|
||||||
|
});
|
||||||
|
|
||||||
|
mockPromptTemplate.pipe = jest.fn().mockReturnValue({
|
||||||
|
pipe: pipeOutputParserMock,
|
||||||
|
});
|
||||||
|
|
||||||
|
(promptUtils.createPromptTemplate as jest.Mock).mockResolvedValue(mockPromptTemplate);
|
||||||
|
|
||||||
|
await executeChain({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
query: 'Hello',
|
||||||
|
llm: fakeOpenAIModel,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(pipeOutputParserMock).toHaveBeenCalledWith(expect.any(JsonOutputParser));
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should use JsonOutputParser for Ollama models with json format', async () => {
|
||||||
|
const fakeOllamaModel = new FakeChatModel({});
|
||||||
|
(fakeOllamaModel as unknown as { format: string }).format = 'json';
|
||||||
|
|
||||||
|
const mockPromptTemplate = new PromptTemplate({
|
||||||
|
template: '{query}',
|
||||||
|
inputVariables: ['query'],
|
||||||
|
});
|
||||||
|
|
||||||
|
const mockChain = {
|
||||||
|
invoke: jest.fn().mockResolvedValue('{"result": "json data"}'),
|
||||||
|
};
|
||||||
|
|
||||||
|
const withConfigMock = jest.fn().mockReturnValue(mockChain);
|
||||||
|
const pipeOutputParserMock = jest.fn().mockReturnValue({
|
||||||
|
withConfig: withConfigMock,
|
||||||
|
});
|
||||||
|
|
||||||
|
mockPromptTemplate.pipe = jest.fn().mockReturnValue({
|
||||||
|
pipe: pipeOutputParserMock,
|
||||||
|
});
|
||||||
|
|
||||||
|
(promptUtils.createPromptTemplate as jest.Mock).mockResolvedValue(mockPromptTemplate);
|
||||||
|
|
||||||
|
await executeChain({
|
||||||
|
context: mockContext,
|
||||||
|
itemIndex: 0,
|
||||||
|
query: 'Hello',
|
||||||
|
llm: fakeOllamaModel,
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(pipeOutputParserMock).toHaveBeenCalledWith(expect.any(JsonOutputParser));
|
||||||
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -3,38 +3,34 @@ import { formatResponse } from '../methods/responseFormatter';
|
|||||||
describe('responseFormatter', () => {
|
describe('responseFormatter', () => {
|
||||||
describe('formatResponse', () => {
|
describe('formatResponse', () => {
|
||||||
it('should format string responses', () => {
|
it('should format string responses', () => {
|
||||||
const result = formatResponse('Test response');
|
const result = formatResponse('Test response', 1.6);
|
||||||
expect(result).toEqual({
|
expect(result).toEqual({
|
||||||
response: {
|
|
||||||
text: 'Test response',
|
text: 'Test response',
|
||||||
},
|
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should trim string responses', () => {
|
it('should trim string responses', () => {
|
||||||
const result = formatResponse(' Test response with whitespace ');
|
const result = formatResponse(' Test response with whitespace ', 1.6);
|
||||||
expect(result).toEqual({
|
expect(result).toEqual({
|
||||||
response: {
|
|
||||||
text: 'Test response with whitespace',
|
text: 'Test response with whitespace',
|
||||||
},
|
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should handle array responses', () => {
|
it('should handle array responses', () => {
|
||||||
const testArray = [{ item: 1 }, { item: 2 }];
|
const testArray = [{ item: 1 }, { item: 2 }];
|
||||||
const result = formatResponse(testArray);
|
const result = formatResponse(testArray, 1.6);
|
||||||
expect(result).toEqual({ data: testArray });
|
expect(result).toEqual({ data: testArray });
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should handle object responses', () => {
|
it('should handle object responses', () => {
|
||||||
const testObject = { key: 'value', nested: { key: 'value' } };
|
const testObject = { key: 'value', nested: { key: 'value' } };
|
||||||
const result = formatResponse(testObject);
|
const result = formatResponse(testObject, 1.6);
|
||||||
expect(result).toEqual(testObject);
|
expect(result).toEqual(testObject);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should handle primitive non-string responses', () => {
|
it('should handle primitive non-string responses', () => {
|
||||||
const testNumber = 42;
|
const testNumber = 42;
|
||||||
const result = formatResponse(testNumber);
|
const result = formatResponse(testNumber, 1.6);
|
||||||
expect(result).toEqual({
|
expect(result).toEqual({
|
||||||
response: {
|
response: {
|
||||||
text: 42,
|
text: 42,
|
||||||
|
|||||||
Reference in New Issue
Block a user