chore: Add telemetry on structured output errors (no-changelog) (#16389)

This commit is contained in:
Eugene
2025-06-17 08:58:15 +02:00
committed by GitHub
parent 549a541219
commit a953218b9d
4 changed files with 477 additions and 1 deletions

View File

@@ -31,6 +31,7 @@ export class N8nStructuredOutputParser extends StructuredOutputParser<
const { index } = this.context.addInputData(NodeConnectionTypes.AiOutputParser, [
[{ json: { action: 'parse', text } }],
]);
try {
const jsonString = text.includes('```') ? text.split(/```(?:json)?/)[1] : text;
const json = JSON.parse(jsonString.trim());
@@ -61,6 +62,24 @@ export class N8nStructuredOutputParser extends StructuredOutputParser<
},
);
// Add additional context to the error
if (e instanceof SyntaxError) {
nodeError.context.outputParserFailReason = 'Invalid JSON in model output';
} else if (
text.trim() === '{}' ||
(e instanceof z.ZodError &&
e.issues?.[0] &&
e.issues?.[0].code === 'invalid_type' &&
e.issues?.[0].path?.[0] === 'output' &&
e.issues?.[0].expected === 'object' &&
e.issues?.[0].received === 'undefined')
) {
nodeError.context.outputParserFailReason = 'Model output wrapper is an empty object';
} else if (e instanceof z.ZodError) {
nodeError.context.outputParserFailReason =
'Model output does not match the expected schema';
}
logAiEvent(this.context, 'ai-output-parsed', {
text,
response: e.message ?? e,

View File

@@ -732,6 +732,11 @@ export class TelemetryEventRelay extends EventRelay {
eval_rows_left: null,
...TelemetryHelpers.resolveAIMetrics(workflow.nodes, this.nodeTypes),
...TelemetryHelpers.resolveVectorStoreMetrics(workflow.nodes, this.nodeTypes, runData),
...TelemetryHelpers.extractLastExecutedNodeStructuredOutputErrorInfo(
workflow,
this.nodeTypes,
runData,
),
};
if (!manualExecEventProperties.node_graph_string) {

View File

@@ -34,7 +34,9 @@ import type {
IRunData,
ITaskData,
IRun,
INodeParameterResourceLocator,
} from './interfaces';
import { NodeConnectionTypes } from './interfaces';
import { getNodeParameters } from './node-helpers';
import { jsonParse } from './utils';
@@ -251,7 +253,7 @@ export function generateNodesGraph(
if (node.type === AI_TRANSFORM_NODE_TYPE && options?.isCloudDeployment) {
nodeItem.prompts = { instructions: node.parameters.instructions as string };
} else if (node.type === AGENT_LANGCHAIN_NODE_TYPE) {
nodeItem.agent = (node.parameters.agent as string) ?? 'conversationalAgent';
nodeItem.agent = (node.parameters.agent as string) ?? 'toolsAgent';
} else if (node.type === MERGE_NODE_TYPE) {
nodeItem.operation = node.parameters.mode as string;
@@ -644,3 +646,91 @@ export function resolveVectorStoreMetrics(
queriedDataFromVectorStore: retrievingVectorStores.length > 0,
};
}
type AgentNodeStructuredOutputErrorInfo = {
output_parser_fail_reason?: string;
model_name?: string;
num_tools?: number;
};
/**
* Extract additional debug information if the last executed node was an agent node
*/
export function extractLastExecutedNodeStructuredOutputErrorInfo(
workflow: IWorkflowBase,
nodeTypes: INodeTypes,
runData: IRun,
): AgentNodeStructuredOutputErrorInfo {
const info: AgentNodeStructuredOutputErrorInfo = {};
if (runData?.data.resultData.error && runData.data.resultData.lastNodeExecuted) {
const lastNode = getNodeTypeForName(workflow, runData.data.resultData.lastNodeExecuted);
if (lastNode !== undefined) {
if (lastNode.type === AGENT_LANGCHAIN_NODE_TYPE && lastNode.parameters.hasOutputParser) {
// Add additional debug info for agent node structured output errors
const agentOutputError = runData.data.resultData.runData[lastNode.name]?.[0]?.error;
if (
agentOutputError &&
agentOutputError.message === "Model output doesn't fit required format"
) {
info.output_parser_fail_reason = agentOutputError.context
?.outputParserFailReason as string;
}
if (workflow.connections) {
// Count connected tools
info.num_tools =
Object.keys(workflow.connections).filter((node) =>
workflow.connections[node]?.[NodeConnectionTypes.AiTool]?.[0]?.some(
(connectedNode) => connectedNode.node === lastNode.name,
),
)?.length ?? 0;
// Extract model name from the language model node if connected
const languageModelNodeName = Object.keys(workflow.connections).find((node) =>
workflow.connections[node]?.[NodeConnectionTypes.AiLanguageModel]?.[0]?.some(
(connectedNode) => connectedNode.node === lastNode.name,
),
);
if (languageModelNodeName) {
const languageModelNode = getNodeTypeForName(workflow, languageModelNodeName);
if (languageModelNode) {
const nodeType = nodeTypes.getByNameAndVersion(
languageModelNode.type,
languageModelNode.typeVersion,
);
if (nodeType) {
const nodeParameters = getNodeParameters(
nodeType.description.properties,
languageModelNode.parameters,
true,
false,
languageModelNode,
nodeType.description,
);
const modelNameKeys = ['model', 'modelName'] as const;
for (const key of modelNameKeys) {
if (nodeParameters?.[key]) {
info.model_name =
typeof nodeParameters[key] === 'string'
? nodeParameters[key]
: ((nodeParameters[key] as INodeParameterResourceLocator).value as string);
if (info.model_name) {
break;
}
}
}
}
}
}
}
}
}
}
return info;
}

View File

@@ -10,12 +10,14 @@ import type {
IRunData,
NodeConnectionType,
IWorkflowBase,
INodeParameters,
} from '@/interfaces';
import { NodeConnectionTypes } from '@/interfaces';
import * as nodeHelpers from '@/node-helpers';
import {
ANONYMIZATION_CHARACTER as CHAR,
extractLastExecutedNodeCredentialData,
extractLastExecutedNodeStructuredOutputErrorInfo,
generateNodesGraph,
getDomainBase,
getDomainPath,
@@ -2027,3 +2029,363 @@ describe('resolveVectorStoreMetrics', () => {
});
});
});
describe('extractLastExecutedNodeStructuredOutputErrorInfo', () => {
const mockWorkflow = (nodes: INode[], connections?: any): IWorkflowBase => ({
createdAt: new Date(),
updatedAt: new Date(),
id: 'test-workflow',
name: 'Test Workflow',
active: false,
isArchived: false,
nodes,
connections: connections || {},
settings: {},
pinData: {},
versionId: 'test-version',
});
const mockAgentNode = (name = 'Agent', hasOutputParser = true): INode => ({
id: 'agent-node-id',
name,
type: '@n8n/n8n-nodes-langchain.agent',
typeVersion: 1,
position: [100, 100],
parameters: {
hasOutputParser,
},
});
const mockLanguageModelNode = (name = 'Model', model = 'gpt-4'): INode => ({
id: 'model-node-id',
name,
type: 'n8n-nodes-langchain.lmChatOpenAi',
typeVersion: 1,
position: [200, 200],
parameters: {
model,
},
});
const mockToolNode = (name: string): INode => ({
id: `tool-${name}`,
name,
type: 'n8n-nodes-base.httpRequestTool',
typeVersion: 1,
position: [300, 300],
parameters: {},
});
const mockRunData = (lastNodeExecuted: string, error?: any, nodeRunData?: any): IRun => ({
mode: 'manual',
status: error ? 'error' : 'success',
startedAt: new Date(),
stoppedAt: new Date(),
data: {
startData: {},
resultData: {
lastNodeExecuted,
error,
runData: nodeRunData || {},
},
} as any,
});
it('should return empty object when there is no error', () => {
const workflow = mockWorkflow([mockAgentNode()]);
const runData = mockRunData('Agent');
const result = extractLastExecutedNodeStructuredOutputErrorInfo(workflow, nodeTypes, runData);
expect(result).toEqual({});
});
it('should return empty object when lastNodeExecuted is not defined', () => {
const workflow = mockWorkflow([mockAgentNode()]);
const runData = mockRunData('', new Error('Some error'));
const result = extractLastExecutedNodeStructuredOutputErrorInfo(workflow, nodeTypes, runData);
expect(result).toEqual({});
});
it('should return empty object when last executed node is not found in workflow', () => {
const workflow = mockWorkflow([mockAgentNode('Agent')]);
const runData = mockRunData('NonExistentNode', new Error('Some error'));
const result = extractLastExecutedNodeStructuredOutputErrorInfo(workflow, nodeTypes, runData);
expect(result).toEqual({});
});
it('should return empty object when last executed node is not an agent node', () => {
const workflow = mockWorkflow([mockLanguageModelNode('Model')]);
const runData = mockRunData('Model', new Error('Some error'));
const result = extractLastExecutedNodeStructuredOutputErrorInfo(workflow, nodeTypes, runData);
expect(result).toEqual({});
});
it('should return empty object when agent node does not have output parser', () => {
const workflow = mockWorkflow([mockAgentNode('Agent', false)]);
const runData = mockRunData('Agent', new Error('Some error'));
const result = extractLastExecutedNodeStructuredOutputErrorInfo(workflow, nodeTypes, runData);
expect(result).toEqual({});
});
it('should return error info without output parser fail reason when error is not output parser error', () => {
const workflow = mockWorkflow([mockAgentNode()]);
const runData = mockRunData('Agent', new Error('Different error'), {
Agent: [
{
error: {
message: 'Some other error',
},
},
],
});
const result = extractLastExecutedNodeStructuredOutputErrorInfo(workflow, nodeTypes, runData);
expect(result).toEqual({
num_tools: 0,
});
});
it('should return error info with output parser fail reason', () => {
const workflow = mockWorkflow([mockAgentNode()]);
const runData = mockRunData('Agent', new Error('Some error'), {
Agent: [
{
error: {
message: "Model output doesn't fit required format",
context: {
outputParserFailReason: 'Failed to parse JSON output',
},
},
},
],
});
const result = extractLastExecutedNodeStructuredOutputErrorInfo(workflow, nodeTypes, runData);
expect(result).toEqual({
output_parser_fail_reason: 'Failed to parse JSON output',
num_tools: 0,
});
});
it('should count connected tools correctly', () => {
const agentNode = mockAgentNode();
const tool1 = mockToolNode('Tool1');
const tool2 = mockToolNode('Tool2');
const workflow = mockWorkflow([agentNode, tool1, tool2], {
Tool1: {
[NodeConnectionTypes.AiTool]: [
[{ node: 'Agent', type: NodeConnectionTypes.AiTool, index: 0 }],
],
},
Tool2: {
[NodeConnectionTypes.AiTool]: [
[{ node: 'Agent', type: NodeConnectionTypes.AiTool, index: 0 }],
],
},
});
const runData = mockRunData('Agent', new Error('Some error'));
const result = extractLastExecutedNodeStructuredOutputErrorInfo(workflow, nodeTypes, runData);
expect(result).toEqual({
num_tools: 2,
});
});
it('should extract model name from connected language model node', () => {
const agentNode = mockAgentNode();
const modelNode = mockLanguageModelNode('OpenAI Model', 'gpt-4-turbo');
const workflow = mockWorkflow([agentNode, modelNode], {
'OpenAI Model': {
[NodeConnectionTypes.AiLanguageModel]: [
[{ node: 'Agent', type: NodeConnectionTypes.AiLanguageModel, index: 0 }],
],
},
});
const runData = mockRunData('Agent', new Error('Some error'));
jest
.spyOn(nodeHelpers, 'getNodeParameters')
.mockReturnValueOnce(mock<INodeParameters>({ model: { value: 'gpt-4-turbo' } }));
const result = extractLastExecutedNodeStructuredOutputErrorInfo(workflow, nodeTypes, runData);
expect(result).toEqual({
num_tools: 0,
model_name: 'gpt-4-turbo',
});
});
it('should handle complete scenario with tools, model, and output parser error', () => {
const agentNode = mockAgentNode();
const modelNode = mockLanguageModelNode('OpenAI Model', 'gpt-4');
const tool1 = mockToolNode('HTTPTool');
const tool2 = mockToolNode('SlackTool');
const tool3 = mockToolNode('GoogleSheetsTool');
const workflow = mockWorkflow([agentNode, modelNode, tool1, tool2, tool3], {
'OpenAI Model': {
[NodeConnectionTypes.AiLanguageModel]: [
[{ node: 'Agent', type: NodeConnectionTypes.AiLanguageModel, index: 0 }],
],
},
HTTPTool: {
[NodeConnectionTypes.AiTool]: [
[{ node: 'Agent', type: NodeConnectionTypes.AiTool, index: 0 }],
],
},
SlackTool: {
[NodeConnectionTypes.AiTool]: [
[{ node: 'Agent', type: NodeConnectionTypes.AiTool, index: 0 }],
],
},
GoogleSheetsTool: {
[NodeConnectionTypes.AiTool]: [
[{ node: 'Agent', type: NodeConnectionTypes.AiTool, index: 0 }],
],
},
});
const runData = mockRunData('Agent', new Error('Workflow error'), {
Agent: [
{
error: {
message: "Model output doesn't fit required format",
context: {
outputParserFailReason: 'Invalid JSON structure: Expected object, got string',
},
},
},
],
});
jest
.spyOn(nodeHelpers, 'getNodeParameters')
.mockReturnValueOnce(mock<INodeParameters>({ model: { value: 'gpt-4.1-mini' } }));
const result = extractLastExecutedNodeStructuredOutputErrorInfo(workflow, nodeTypes, runData);
expect(result).toEqual({
output_parser_fail_reason: 'Invalid JSON structure: Expected object, got string',
num_tools: 3,
model_name: 'gpt-4.1-mini',
});
});
it('should pick correct model when workflow has multiple model nodes but only one connected to agent', () => {
const agentNode = mockAgentNode();
const connectedModel = mockLanguageModelNode('Connected Model', 'gpt-4');
const unconnectedModel = mockLanguageModelNode('Unconnected Model', 'claude-3');
const workflow = mockWorkflow([agentNode, connectedModel, unconnectedModel], {
'Connected Model': {
[NodeConnectionTypes.AiLanguageModel]: [
[{ node: 'Agent', type: NodeConnectionTypes.AiLanguageModel, index: 0 }],
],
},
// Unconnected Model is not connected to anything
});
const runData = mockRunData('Agent', new Error('Some error'));
jest
.spyOn(nodeHelpers, 'getNodeParameters')
.mockReturnValueOnce(mock<INodeParameters>({ model: 'gpt-4' }));
const result = extractLastExecutedNodeStructuredOutputErrorInfo(workflow, nodeTypes, runData);
expect(result).toEqual({
num_tools: 0,
model_name: 'gpt-4',
});
});
it('should only count tools connected to the agent when workflow has multiple tool nodes', () => {
const agentNode = mockAgentNode();
const connectedTool1 = mockToolNode('ConnectedTool1');
const connectedTool2 = mockToolNode('ConnectedTool2');
const unconnectedTool1 = mockToolNode('UnconnectedTool1');
const unconnectedTool2 = mockToolNode('UnconnectedTool2');
const someOtherNode: INode = {
id: 'other-node',
name: 'SomeOtherNode',
type: 'n8n-nodes-base.set',
typeVersion: 1,
position: [400, 400],
parameters: {},
};
const workflow = mockWorkflow(
[
agentNode,
connectedTool1,
connectedTool2,
unconnectedTool1,
unconnectedTool2,
someOtherNode,
],
{
ConnectedTool1: {
[NodeConnectionTypes.AiTool]: [
[{ node: 'Agent', type: NodeConnectionTypes.AiTool, index: 0 }],
],
},
ConnectedTool2: {
[NodeConnectionTypes.AiTool]: [
[{ node: 'Agent', type: NodeConnectionTypes.AiTool, index: 0 }],
],
},
// UnconnectedTool1 and UnconnectedTool2 are connected to SomeOtherNode, not to Agent
UnconnectedTool1: {
[NodeConnectionTypes.AiTool]: [
[{ node: 'SomeOtherNode', type: NodeConnectionTypes.AiTool, index: 0 }],
],
},
UnconnectedTool2: {
[NodeConnectionTypes.AiTool]: [
[{ node: 'SomeOtherNode', type: NodeConnectionTypes.AiTool, index: 0 }],
],
},
},
);
const runData = mockRunData('Agent', new Error('Some error'));
const result = extractLastExecutedNodeStructuredOutputErrorInfo(workflow, nodeTypes, runData);
expect(result).toEqual({
num_tools: 2, // Only ConnectedTool1 and ConnectedTool2
});
});
it('should extract model name from modelName parameter when model parameter is not present', () => {
const agentNode = mockAgentNode();
const modelNode: INode = {
id: 'model-node-id',
name: 'Google Gemini Model',
type: 'n8n-nodes-langchain.lmChatGoogleGemini',
typeVersion: 1,
position: [200, 200],
parameters: {
// Using modelName instead of model
modelName: 'gemini-1.5-pro',
},
};
const workflow = mockWorkflow([agentNode, modelNode], {
'Google Gemini Model': {
[NodeConnectionTypes.AiLanguageModel]: [
[{ node: 'Agent', type: NodeConnectionTypes.AiLanguageModel, index: 0 }],
],
},
});
const runData = mockRunData('Agent', new Error('Some error'));
jest
.spyOn(nodeHelpers, 'getNodeParameters')
.mockReturnValueOnce(mock<INodeParameters>({ modelName: 'gemini-1.5-pro' }));
const result = extractLastExecutedNodeStructuredOutputErrorInfo(workflow, nodeTypes, runData);
expect(result).toEqual({
num_tools: 0,
model_name: 'gemini-1.5-pro',
});
});
});