feat(core): Add telemetry for Evaluation feature (no-changelog) (#15562)

Co-authored-by: Yiorgis Gozadinos <yiorgis@n8n.io>
This commit is contained in:
Eugene
2025-05-26 13:09:03 +02:00
committed by GitHub
parent 647cb851e5
commit eb3dd199ab
3 changed files with 363 additions and 22 deletions

View File

@@ -15,6 +15,7 @@ import type {
IExecuteData,
INodeExecutionData,
AssignmentCollectionValue,
GenericValue,
} from 'n8n-workflow';
import assert from 'node:assert';
@@ -386,6 +387,20 @@ export class TestRunnerService {
const testRun = await this.testRunRepository.createTestRun(workflowId);
assert(testRun, 'Unable to create a test run');
// Initialize telemetry metadata
const telemetryMeta = {
workflow_id: workflowId,
test_type: 'evaluation',
run_id: testRun.id,
start: Date.now(),
status: 'success' as 'success' | 'fail' | 'cancelled',
test_case_count: 0,
errored_test_case_count: 0,
metric_count: 0,
error_message: '',
duration: 0,
};
// 0.1 Initialize AbortController
const abortController = new AbortController();
this.abortControllers.set(testRun.id, abortController);
@@ -397,8 +412,6 @@ export class TestRunnerService {
userId: user.id,
};
let testRunEndStatusForTelemetry;
const abortSignal = abortController.signal;
const { manager: dbManager } = this.testRunRepository;
@@ -428,6 +441,7 @@ export class TestRunnerService {
);
const testCases = datasetTriggerOutput.map((items) => ({ json: items.json }));
telemetryMeta.test_case_count = testCases.length;
this.logger.debug('Found test cases', { count: testCases.length });
@@ -440,6 +454,7 @@ export class TestRunnerService {
for (const testCase of testCases) {
if (abortSignal.aborted) {
telemetryMeta.status = 'cancelled';
this.logger.debug('Test run was cancelled', {
workflowId,
});
@@ -484,6 +499,7 @@ export class TestRunnerService {
errorCode: 'FAILED_TO_EXECUTE_WORKFLOW',
metrics: {},
});
telemetryMeta.errored_test_case_count++;
continue;
}
const completedAt = new Date();
@@ -503,6 +519,7 @@ export class TestRunnerService {
status: 'error',
errorCode: 'NO_METRICS_COLLECTED',
});
telemetryMeta.errored_test_case_count++;
} else {
this.logger.debug('Test case metrics extracted', addedMetrics);
// Create a new test case execution in DB
@@ -526,6 +543,8 @@ export class TestRunnerService {
error: e,
});
telemetryMeta.errored_test_case_count++;
// In case of an unexpected error save it as failed test case execution and continue with the next test case
if (e instanceof TestCaseExecutionError) {
await this.testCaseExecutionRepository.createTestCaseExecution({
@@ -560,21 +579,21 @@ export class TestRunnerService {
await dbManager.transaction(async (trx) => {
await this.testRunRepository.markAsCancelled(testRun.id, trx);
await this.testCaseExecutionRepository.markAllPendingAsCancelled(testRun.id, trx);
testRunEndStatusForTelemetry = 'cancelled';
});
telemetryMeta.status = 'cancelled';
} else {
const aggregatedMetrics = metrics.getAggregatedMetrics();
telemetryMeta.metric_count = Object.keys(aggregatedMetrics).length;
this.logger.debug('Aggregated metrics', aggregatedMetrics);
await this.testRunRepository.markAsCompleted(testRun.id, aggregatedMetrics);
this.logger.debug('Test run finished', { workflowId, testRunId: testRun.id });
testRunEndStatusForTelemetry = 'completed';
}
} catch (e) {
telemetryMeta.status = 'fail';
if (e instanceof ExecutionCancelledError) {
this.logger.debug('Evaluation execution was cancelled. Cancelling test run', {
testRunId: testRun.id,
@@ -586,25 +605,43 @@ export class TestRunnerService {
await this.testCaseExecutionRepository.markAllPendingAsCancelled(testRun.id, trx);
});
testRunEndStatusForTelemetry = 'cancelled';
telemetryMeta.status = 'cancelled';
} else if (e instanceof TestRunError) {
await this.testRunRepository.markAsError(testRun.id, e.code, e.extra as IDataObject);
testRunEndStatusForTelemetry = 'error';
telemetryMeta.error_message = e.code;
if (e.extra && typeof e.extra === 'object' && 'message' in e.extra) {
telemetryMeta.error_message += `: ${String(e.extra.message)}`;
}
} else {
await this.testRunRepository.markAsError(testRun.id, 'UNKNOWN_ERROR');
testRunEndStatusForTelemetry = 'error';
telemetryMeta.error_message = e instanceof Error ? e.message : 'UNKNOWN_ERROR';
throw e;
}
} finally {
// Calculate duration
telemetryMeta.duration = Date.now() - telemetryMeta.start;
// Clean up abort controller
this.abortControllers.delete(testRun.id);
// Send telemetry event
this.telemetry.track('Test run finished', {
workflow_id: workflowId,
run_id: testRun.id,
status: testRunEndStatusForTelemetry,
});
// Send telemetry event with complete metadata
const telemetryPayload: Record<string, GenericValue> = {
...telemetryMeta,
};
// Add success-specific fields
if (telemetryMeta.status === 'success') {
telemetryPayload.test_case_count = telemetryMeta.test_case_count;
telemetryPayload.errored_test_case_count = telemetryMeta.errored_test_case_count;
telemetryPayload.metric_count = telemetryMeta.metric_count;
}
// Add fail-specific fields
if (telemetryMeta.status === 'fail') {
telemetryPayload.error_message = telemetryMeta.error_message;
}
this.telemetry.track('Test run finished', telemetryPayload);
}
}

View File

@@ -4,12 +4,14 @@ import { useUsageStore } from '@/stores/usage.store';
import { useAsyncState } from '@vueuse/core';
import { PLACEHOLDER_EMPTY_WORKFLOW_ID } from '@/constants';
import { useCanvasOperations } from '@/composables/useCanvasOperations';
import { useTelemetry } from '@/composables/useTelemetry';
import { useToast } from '@/composables/useToast';
import { useI18n } from '@/composables/useI18n';
import { useRouter } from 'vue-router';
import { useNodeTypesStore } from '@/stores/nodeTypes.store';
import { useEvaluationStore } from '@/stores/evaluation.store.ee';
import { computed } from 'vue';
import { useNodeTypesStore } from '@/stores/nodeTypes.store';
import { computed, watch } from 'vue';
import { N8nLink, N8nText } from '@n8n/design-system';
import EvaluationsPaywall from '@/components/Evaluations.ee/Paywall/EvaluationsPaywall.vue';
import SetupWizard from '@/components/Evaluations.ee/SetupWizard/SetupWizard.vue';
@@ -21,10 +23,11 @@ const props = defineProps<{
const workflowsStore = useWorkflowsStore();
const usageStore = useUsageStore();
const evaluationStore = useEvaluationStore();
const nodeTypesStore = useNodeTypesStore();
const telemetry = useTelemetry();
const router = useRouter();
const toast = useToast();
const locale = useI18n();
const nodeTypesStore = useNodeTypesStore();
const { initializeWorkspace } = useCanvasOperations({ router });
@@ -32,13 +35,18 @@ const evaluationsLicensed = computed(() => {
return usageStore.workflowsWithEvaluationsLimit !== 0;
});
const showWizard = computed(() => {
const runs = Object.values(evaluationStore.testRunsById ?? {}).filter(
const runs = computed(() => {
return Object.values(evaluationStore.testRunsById ?? {}).filter(
({ workflowId }) => workflowId === props.name,
);
return runs.length === 0;
});
const hasRuns = computed(() => {
return runs.value.length > 0;
});
const showWizard = computed(() => !hasRuns.value);
// Method to run a test - will be used by the SetupWizard component
async function runTest() {
try {
@@ -54,6 +62,14 @@ async function runTest() {
}
}
const evaluationsQuotaExceeded = computed(() => {
return (
usageStore.workflowsWithEvaluationsLimit !== -1 &&
usageStore.workflowsWithEvaluationsCount >= usageStore.workflowsWithEvaluationsLimit &&
!hasRuns.value
);
});
const { isReady } = useAsyncState(async () => {
try {
await usageStore.getLicenseInfo();
@@ -83,6 +99,33 @@ const { isReady } = useAsyncState(async () => {
}
}
}, undefined);
watch(
isReady,
(ready) => {
if (ready) {
if (showWizard.value) {
telemetry.track('User viewed tests tab', {
workflow_id: props.name,
test_type: 'evaluation',
view: 'setup',
trigger_set_up: evaluationStore.evaluationTriggerExists,
output_set_up: evaluationStore.evaluationSetOutputsNodeExist,
metrics_set_up: evaluationStore.evaluationSetMetricsNodeExist,
quota_reached: evaluationsQuotaExceeded.value,
});
} else {
telemetry.track('User viewed tests tab', {
workflow_id: props.name,
test_type: 'evaluation',
view: 'overview',
run_count: runs.value.length,
});
}
}
},
{ immediate: true },
);
</script>
<template>

View File

@@ -1,4 +1,4 @@
import { describe, it, expect, beforeEach } from 'vitest';
import { describe, it, expect, beforeEach, vi } from 'vitest';
import { mock } from 'vitest-mock-extended';
import { createTestingPinia } from '@pinia/testing';
import { createComponentRenderer } from '@/__tests__/render';
@@ -6,11 +6,30 @@ import EvaluationRootView from '../EvaluationsRootView.vue';
import { useWorkflowsStore } from '@/stores/workflows.store';
import { useEvaluationStore } from '@/stores/evaluation.store.ee';
import { useUsageStore } from '@/stores/usage.store';
import { mockedStore } from '@/__tests__/utils';
import type { IWorkflowDb } from '@/Interface';
import { waitFor } from '@testing-library/vue';
import type { TestRunRecord } from '@/api/evaluation.ee';
import { PLACEHOLDER_EMPTY_WORKFLOW_ID } from '@/constants';
import { useTelemetry } from '@/composables/useTelemetry';
import { EVALUATION_NODE_TYPE, EVALUATION_TRIGGER_NODE_TYPE, NodeHelpers } from 'n8n-workflow';
vi.mock('@/composables/useTelemetry', () => {
const track = vi.fn();
return {
useTelemetry: () => ({
track,
}),
};
});
const getNodeType = vi.fn();
vi.mock('@/stores/nodeTypes.store', () => ({
useNodeTypesStore: vi.fn(() => ({
getNodeType,
})),
}));
describe('EvaluationsRootView', () => {
const renderComponent = createComponentRenderer(EvaluationRootView);
@@ -37,6 +56,21 @@ describe('EvaluationsRootView', () => {
beforeEach(() => {
createTestingPinia();
vi.clearAllMocks();
vi.spyOn(NodeHelpers, 'getNodeParameters').mockReturnValue({
assignments: {
assignments: [
{
id: 'xxxxx',
name: '=',
value: '',
type: 'string',
},
],
},
options: {},
});
});
it('should initialize workflow on mount if not already initialized', async () => {
@@ -91,4 +125,231 @@ describe('EvaluationsRootView', () => {
await waitFor(() => expect(container.querySelector('.setupContent')).toBeTruthy());
});
describe('telemetry', () => {
it('should send telemetry event on mount with setup view when no test runs exist', async () => {
const workflowsStore = mockedStore(useWorkflowsStore);
const evaluationStore = mockedStore(useEvaluationStore);
const usageStore = mockedStore(useUsageStore);
workflowsStore.workflow = mockWorkflow;
evaluationStore.testRunsById = {};
usageStore.workflowsWithEvaluationsLimit = 10;
usageStore.workflowsWithEvaluationsCount = 0;
// Mock no evaluation nodes in workflow
getNodeType.mockReturnValue(null);
renderComponent({ props: { name: mockWorkflow.id } });
await waitFor(() => {
expect(useTelemetry().track).toHaveBeenCalledWith('User viewed tests tab', {
workflow_id: mockWorkflow.id,
test_type: 'evaluation',
view: 'setup',
trigger_set_up: false,
output_set_up: false,
metrics_set_up: false,
quota_reached: false,
});
});
});
it('should send telemetry event on mount with overview view when test runs exist', async () => {
const workflowsStore = mockedStore(useWorkflowsStore);
const evaluationStore = mockedStore(useEvaluationStore);
const usageStore = mockedStore(useUsageStore);
workflowsStore.workflow = mockWorkflow;
evaluationStore.testRunsById = {
run1: mock<TestRunRecord>({ workflowId: mockWorkflow.id }),
run2: mock<TestRunRecord>({ workflowId: mockWorkflow.id }),
};
usageStore.workflowsWithEvaluationsLimit = 10;
usageStore.workflowsWithEvaluationsCount = 1;
renderComponent({ props: { name: mockWorkflow.id } });
await waitFor(() => {
expect(useTelemetry().track).toHaveBeenCalledWith('User viewed tests tab', {
workflow_id: mockWorkflow.id,
test_type: 'evaluation',
view: 'overview',
run_count: 2,
});
});
});
it('should send telemetry event with trigger_set_up true when dataset trigger node exists', async () => {
const workflowsStore = mockedStore(useWorkflowsStore);
const evaluationStore = mockedStore(useEvaluationStore);
const usageStore = mockedStore(useUsageStore);
const workflowWithTrigger = mock<IWorkflowDb>({
...mockWorkflow,
nodes: [
{
id: 'trigger1',
name: 'Dataset Trigger',
type: EVALUATION_TRIGGER_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {},
},
],
});
workflowsStore.workflow = workflowWithTrigger;
evaluationStore.testRunsById = {};
usageStore.workflowsWithEvaluationsLimit = 10;
usageStore.workflowsWithEvaluationsCount = 0;
// Mock dataset trigger node type exists
getNodeType.mockImplementation((nodeType) =>
nodeType === EVALUATION_TRIGGER_NODE_TYPE ? { name: EVALUATION_TRIGGER_NODE_TYPE } : null,
);
renderComponent({ props: { name: mockWorkflow.id } });
await waitFor(() => {
expect(useTelemetry().track).toHaveBeenCalledWith('User viewed tests tab', {
workflow_id: mockWorkflow.id,
test_type: 'evaluation',
view: 'setup',
trigger_set_up: true,
output_set_up: false,
metrics_set_up: false,
quota_reached: false,
});
});
});
it('should send telemetry event with output_set_up true when evaluation set output node exists', async () => {
const workflowsStore = mockedStore(useWorkflowsStore);
const evaluationStore = mockedStore(useEvaluationStore);
const usageStore = mockedStore(useUsageStore);
const workflowWithOutputNode = mock<IWorkflowDb>({
...mockWorkflow,
nodes: [
{
id: 'output1',
name: 'Set Outputs',
type: EVALUATION_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {
operation: 'setOutputs',
},
},
],
});
vi.spyOn(NodeHelpers, 'getNodeParameters').mockReturnValue({
operation: 'setOutputs',
});
workflowsStore.workflow = workflowWithOutputNode;
evaluationStore.testRunsById = {};
usageStore.workflowsWithEvaluationsLimit = 10;
usageStore.workflowsWithEvaluationsCount = 0;
// Mock evaluation node type exists
getNodeType.mockImplementation((nodeType) =>
nodeType === EVALUATION_NODE_TYPE ? { name: EVALUATION_NODE_TYPE } : null,
);
renderComponent({ props: { name: mockWorkflow.id } });
await waitFor(() => {
expect(useTelemetry().track).toHaveBeenCalledWith('User viewed tests tab', {
workflow_id: mockWorkflow.id,
test_type: 'evaluation',
view: 'setup',
trigger_set_up: false,
output_set_up: true,
metrics_set_up: false,
quota_reached: false,
});
});
});
it('should send telemetry event with metrics_set_up true when evaluation metrics node exists', async () => {
const workflowsStore = mockedStore(useWorkflowsStore);
const evaluationStore = mockedStore(useEvaluationStore);
const usageStore = mockedStore(useUsageStore);
const workflowWithMetricsNode = mock<IWorkflowDb>({
...mockWorkflow,
nodes: [
{
id: 'metrics1',
name: 'Set Metrics',
type: EVALUATION_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {
operation: 'setMetrics',
},
},
],
});
vi.spyOn(NodeHelpers, 'getNodeParameters').mockReturnValue({
operation: 'setMetrics',
});
workflowsStore.workflow = workflowWithMetricsNode;
evaluationStore.testRunsById = {};
usageStore.workflowsWithEvaluationsLimit = 10;
usageStore.workflowsWithEvaluationsCount = 0;
// Mock evaluation node type exists
getNodeType.mockImplementation((nodeType) =>
nodeType === EVALUATION_NODE_TYPE ? { name: EVALUATION_NODE_TYPE } : null,
);
renderComponent({ props: { name: mockWorkflow.id } });
await waitFor(() => {
expect(useTelemetry().track).toHaveBeenCalledWith('User viewed tests tab', {
workflow_id: mockWorkflow.id,
test_type: 'evaluation',
view: 'setup',
trigger_set_up: false,
output_set_up: false,
metrics_set_up: true,
quota_reached: false,
});
});
});
it('should send telemetry event with quota_reached true when evaluations quota is exceeded', async () => {
const workflowsStore = mockedStore(useWorkflowsStore);
const evaluationStore = mockedStore(useEvaluationStore);
const usageStore = mockedStore(useUsageStore);
workflowsStore.workflow = mockWorkflow;
evaluationStore.testRunsById = {};
usageStore.workflowsWithEvaluationsLimit = 5;
usageStore.workflowsWithEvaluationsCount = 5; // At limit
// Mock no evaluation nodes in workflow
getNodeType.mockReturnValue(null);
renderComponent({ props: { name: mockWorkflow.id } });
await waitFor(() => {
expect(useTelemetry().track).toHaveBeenCalledWith('User viewed tests tab', {
workflow_id: mockWorkflow.id,
test_type: 'evaluation',
view: 'setup',
trigger_set_up: false,
output_set_up: false,
metrics_set_up: false,
quota_reached: true,
});
});
});
});
});