fix(Basic LLM Chain Node): Prevent incorrect wrapping of output (#14183)

This commit is contained in:
oleg
2025-03-26 14:26:09 +01:00
committed by GitHub
parent ee64fdc5cb
commit b9030d45de
5 changed files with 174 additions and 25 deletions

View File

@@ -34,7 +34,7 @@ export class ChainLlm implements INodeType {
icon: 'fa:link',
iconColor: 'black',
group: ['transform'],
version: [1, 1.1, 1.2, 1.3, 1.4, 1.5],
version: [1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6],
description: 'A simple chain to prompt a large language model',
defaults: {
name: 'Basic LLM Chain',
@@ -119,7 +119,7 @@ export class ChainLlm implements INodeType {
// Process each response and add to return data
responses.forEach((response) => {
returnData.push({
json: formatResponse(response),
json: formatResponse(response, this.getNode().typeVersion),
});
});
} catch (error) {

View File

@@ -1,5 +1,6 @@
import type { BaseLanguageModel } from '@langchain/core/language_models/base';
import { StringOutputParser } from '@langchain/core/output_parsers';
import type { BaseLLMOutputParser } from '@langchain/core/output_parsers';
import { JsonOutputParser, StringOutputParser } from '@langchain/core/output_parsers';
import type { ChatPromptTemplate, PromptTemplate } from '@langchain/core/prompts';
import type { IExecuteFunctions } from 'n8n-workflow';
@@ -8,6 +9,46 @@ import { getTracingConfig } from '@utils/tracing';
import { createPromptTemplate } from './promptUtils';
import type { ChainExecutionParams } from './types';
/**
* Type guard to check if the LLM has a modelKwargs property(OpenAI)
*/
export function isModelWithResponseFormat(
llm: BaseLanguageModel,
): llm is BaseLanguageModel & { modelKwargs: { response_format: { type: string } } } {
return (
'modelKwargs' in llm &&
!!llm.modelKwargs &&
typeof llm.modelKwargs === 'object' &&
'response_format' in llm.modelKwargs
);
}
/**
* Type guard to check if the LLM has a format property(Ollama)
*/
export function isModelWithFormat(
llm: BaseLanguageModel,
): llm is BaseLanguageModel & { format: string } {
return 'format' in llm && typeof llm.format !== 'undefined';
}
/**
* Determines if an LLM is configured to output JSON and returns the appropriate output parser
*/
export function getOutputParserForLLM(
llm: BaseLanguageModel,
): BaseLLMOutputParser<string | Record<string, unknown>> {
if (isModelWithResponseFormat(llm) && llm.modelKwargs?.response_format?.type === 'json_object') {
return new JsonOutputParser();
}
if (isModelWithFormat(llm) && llm.format === 'json') {
return new JsonOutputParser();
}
return new StringOutputParser();
}
/**
* Creates a simple chain for LLMs without output parsers
*/
@@ -21,11 +62,10 @@ async function executeSimpleChain({
llm: BaseLanguageModel;
query: string;
prompt: ChatPromptTemplate | PromptTemplate;
}): Promise<string[]> {
const chain = prompt
.pipe(llm)
.pipe(new StringOutputParser())
.withConfig(getTracingConfig(context));
}) {
const outputParser = getOutputParserForLLM(llm);
const chain = prompt.pipe(llm).pipe(outputParser).withConfig(getTracingConfig(context));
// Execute the chain
const response = await chain.invoke({

View File

@@ -3,12 +3,10 @@ import type { IDataObject } from 'n8n-workflow';
/**
* Formats the response from the LLM chain into a consistent structure
*/
export function formatResponse(response: unknown): IDataObject {
export function formatResponse(response: unknown, version: number): IDataObject {
if (typeof response === 'string') {
return {
response: {
text: response.trim(),
},
};
}
@@ -19,9 +17,15 @@ export function formatResponse(response: unknown): IDataObject {
}
if (response instanceof Object) {
if (version >= 1.6) {
return response as IDataObject;
}
return {
text: JSON.stringify(response),
};
}
return {
response: {
text: response,

View File

@@ -1,4 +1,5 @@
import { StringOutputParser } from '@langchain/core/output_parsers';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { JsonOutputParser, StringOutputParser } from '@langchain/core/output_parsers';
import { ChatPromptTemplate, PromptTemplate } from '@langchain/core/prompts';
import { FakeLLM, FakeChatModel } from '@langchain/core/utils/testing';
import { mock } from 'jest-mock-extended';
@@ -8,6 +9,7 @@ import type { N8nOutputParser } from '@utils/output_parsers/N8nOutputParser';
import * as tracing from '@utils/tracing';
import { executeChain } from '../methods/chainExecutor';
import * as chainExecutor from '../methods/chainExecutor';
import * as promptUtils from '../methods/promptUtils';
jest.mock('@utils/tracing', () => ({
@@ -27,6 +29,41 @@ describe('chainExecutor', () => {
jest.clearAllMocks();
});
describe('getOutputParserForLLM', () => {
it('should return JsonOutputParser for OpenAI-like models with json_object response format', () => {
const openAILikeModel = {
modelKwargs: {
response_format: {
type: 'json_object',
},
},
};
const parser = chainExecutor.getOutputParserForLLM(
openAILikeModel as unknown as BaseChatModel,
);
expect(parser).toBeInstanceOf(JsonOutputParser);
});
it('should return JsonOutputParser for Ollama models with json format', () => {
const ollamaLikeModel = {
format: 'json',
};
const parser = chainExecutor.getOutputParserForLLM(
ollamaLikeModel as unknown as BaseChatModel,
);
expect(parser).toBeInstanceOf(JsonOutputParser);
});
it('should return StringOutputParser for models without JSON format settings', () => {
const regularModel = new FakeLLM({});
const parser = chainExecutor.getOutputParserForLLM(regularModel);
expect(parser).toBeInstanceOf(StringOutputParser);
});
});
describe('executeChain', () => {
it('should execute a simple chain without output parsers', async () => {
const fakeLLM = new FakeLLM({ response: 'Test response' });
@@ -219,5 +256,77 @@ describe('chainExecutor', () => {
expect(result).toEqual(['Test chat response']);
});
it('should use JsonOutputParser for OpenAI models with json_object response format', async () => {
const fakeOpenAIModel = new FakeChatModel({});
(
fakeOpenAIModel as unknown as { modelKwargs: { response_format: { type: string } } }
).modelKwargs = {
response_format: { type: 'json_object' },
};
const mockPromptTemplate = new PromptTemplate({
template: '{query}',
inputVariables: ['query'],
});
const mockChain = {
invoke: jest.fn().mockResolvedValue('{"result": "json data"}'),
};
const withConfigMock = jest.fn().mockReturnValue(mockChain);
const pipeOutputParserMock = jest.fn().mockReturnValue({
withConfig: withConfigMock,
});
mockPromptTemplate.pipe = jest.fn().mockReturnValue({
pipe: pipeOutputParserMock,
});
(promptUtils.createPromptTemplate as jest.Mock).mockResolvedValue(mockPromptTemplate);
await executeChain({
context: mockContext,
itemIndex: 0,
query: 'Hello',
llm: fakeOpenAIModel,
});
expect(pipeOutputParserMock).toHaveBeenCalledWith(expect.any(JsonOutputParser));
});
it('should use JsonOutputParser for Ollama models with json format', async () => {
const fakeOllamaModel = new FakeChatModel({});
(fakeOllamaModel as unknown as { format: string }).format = 'json';
const mockPromptTemplate = new PromptTemplate({
template: '{query}',
inputVariables: ['query'],
});
const mockChain = {
invoke: jest.fn().mockResolvedValue('{"result": "json data"}'),
};
const withConfigMock = jest.fn().mockReturnValue(mockChain);
const pipeOutputParserMock = jest.fn().mockReturnValue({
withConfig: withConfigMock,
});
mockPromptTemplate.pipe = jest.fn().mockReturnValue({
pipe: pipeOutputParserMock,
});
(promptUtils.createPromptTemplate as jest.Mock).mockResolvedValue(mockPromptTemplate);
await executeChain({
context: mockContext,
itemIndex: 0,
query: 'Hello',
llm: fakeOllamaModel,
});
expect(pipeOutputParserMock).toHaveBeenCalledWith(expect.any(JsonOutputParser));
});
});
});

View File

@@ -3,38 +3,34 @@ import { formatResponse } from '../methods/responseFormatter';
describe('responseFormatter', () => {
describe('formatResponse', () => {
it('should format string responses', () => {
const result = formatResponse('Test response');
const result = formatResponse('Test response', 1.6);
expect(result).toEqual({
response: {
text: 'Test response',
},
});
});
it('should trim string responses', () => {
const result = formatResponse(' Test response with whitespace ');
const result = formatResponse(' Test response with whitespace ', 1.6);
expect(result).toEqual({
response: {
text: 'Test response with whitespace',
},
});
});
it('should handle array responses', () => {
const testArray = [{ item: 1 }, { item: 2 }];
const result = formatResponse(testArray);
const result = formatResponse(testArray, 1.6);
expect(result).toEqual({ data: testArray });
});
it('should handle object responses', () => {
const testObject = { key: 'value', nested: { key: 'value' } };
const result = formatResponse(testObject);
const result = formatResponse(testObject, 1.6);
expect(result).toEqual(testObject);
});
it('should handle primitive non-string responses', () => {
const testNumber = 42;
const result = formatResponse(testNumber);
const result = formatResponse(testNumber, 1.6);
expect(result).toEqual({
response: {
text: 42,